xref: /openbmc/linux/arch/mips/cavium-octeon/setup.c (revision 24b1944f)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004-2007 Cavium Networks
7  * Copyright (C) 2008, 2009 Wind River Systems
8  *   written by Ralf Baechle <ralf@linux-mips.org>
9  */
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/console.h>
13 #include <linux/delay.h>
14 #include <linux/export.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/serial.h>
18 #include <linux/smp.h>
19 #include <linux/types.h>
20 #include <linux/string.h>	/* for memset */
21 #include <linux/tty.h>
22 #include <linux/time.h>
23 #include <linux/platform_device.h>
24 #include <linux/serial_core.h>
25 #include <linux/serial_8250.h>
26 #include <linux/of_fdt.h>
27 #include <linux/libfdt.h>
28 #include <linux/kexec.h>
29 
30 #include <asm/processor.h>
31 #include <asm/reboot.h>
32 #include <asm/smp-ops.h>
33 #include <asm/irq_cpu.h>
34 #include <asm/mipsregs.h>
35 #include <asm/bootinfo.h>
36 #include <asm/sections.h>
37 #include <asm/time.h>
38 
39 #include <asm/octeon/octeon.h>
40 #include <asm/octeon/pci-octeon.h>
41 #include <asm/octeon/cvmx-mio-defs.h>
42 
43 extern struct plat_smp_ops octeon_smp_ops;
44 
45 #ifdef CONFIG_PCI
46 extern void pci_console_init(const char *arg);
47 #endif
48 
49 static unsigned long long MAX_MEMORY = 512ull << 20;
50 
51 struct octeon_boot_descriptor *octeon_boot_desc_ptr;
52 
53 struct cvmx_bootinfo *octeon_bootinfo;
54 EXPORT_SYMBOL(octeon_bootinfo);
55 
56 static unsigned long long RESERVE_LOW_MEM = 0ull;
57 #ifdef CONFIG_KEXEC
58 #ifdef CONFIG_SMP
59 /*
60  * Wait for relocation code is prepared and send
61  * secondary CPUs to spin until kernel is relocated.
62  */
63 static void octeon_kexec_smp_down(void *ignored)
64 {
65 	int cpu = smp_processor_id();
66 
67 	local_irq_disable();
68 	set_cpu_online(cpu, false);
69 	while (!atomic_read(&kexec_ready_to_reboot))
70 		cpu_relax();
71 
72 	asm volatile (
73 	"	sync						\n"
74 	"	synci	($0)					\n");
75 
76 	relocated_kexec_smp_wait(NULL);
77 }
78 #endif
79 
80 #define OCTEON_DDR0_BASE    (0x0ULL)
81 #define OCTEON_DDR0_SIZE    (0x010000000ULL)
82 #define OCTEON_DDR1_BASE    (0x410000000ULL)
83 #define OCTEON_DDR1_SIZE    (0x010000000ULL)
84 #define OCTEON_DDR2_BASE    (0x020000000ULL)
85 #define OCTEON_DDR2_SIZE    (0x3e0000000ULL)
86 #define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
87 
88 static struct kimage *kimage_ptr;
89 
90 static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
91 {
92 	int64_t addr;
93 	struct cvmx_bootmem_desc *bootmem_desc;
94 
95 	bootmem_desc = cvmx_bootmem_get_desc();
96 
97 	if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
98 		mem_size = OCTEON_MAX_PHY_MEM_SIZE;
99 		pr_err("Error: requested memory too large,"
100 		       "truncating to maximum size\n");
101 	}
102 
103 	bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
104 	bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
105 
106 	addr = (OCTEON_DDR0_BASE + RESERVE_LOW_MEM + low_reserved_bytes);
107 	bootmem_desc->head_addr = 0;
108 
109 	if (mem_size <= OCTEON_DDR0_SIZE) {
110 		__cvmx_bootmem_phy_free(addr,
111 				mem_size - RESERVE_LOW_MEM -
112 				low_reserved_bytes, 0);
113 		return;
114 	}
115 
116 	__cvmx_bootmem_phy_free(addr,
117 			OCTEON_DDR0_SIZE - RESERVE_LOW_MEM -
118 			low_reserved_bytes, 0);
119 
120 	mem_size -= OCTEON_DDR0_SIZE;
121 
122 	if (mem_size > OCTEON_DDR1_SIZE) {
123 		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
124 		__cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
125 				mem_size - OCTEON_DDR1_SIZE, 0);
126 	} else
127 		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
128 }
129 
130 static int octeon_kexec_prepare(struct kimage *image)
131 {
132 	int i;
133 	char *bootloader = "kexec";
134 
135 	octeon_boot_desc_ptr->argc = 0;
136 	for (i = 0; i < image->nr_segments; i++) {
137 		if (!strncmp(bootloader, (char *)image->segment[i].buf,
138 				strlen(bootloader))) {
139 			/*
140 			 * convert command line string to array
141 			 * of parameters (as bootloader does).
142 			 */
143 			int argc = 0, offt;
144 			char *str = (char *)image->segment[i].buf;
145 			char *ptr = strchr(str, ' ');
146 			while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) {
147 				*ptr = '\0';
148 				if (ptr[1] != ' ') {
149 					offt = (int)(ptr - str + 1);
150 					octeon_boot_desc_ptr->argv[argc] =
151 						image->segment[i].mem + offt;
152 					argc++;
153 				}
154 				ptr = strchr(ptr + 1, ' ');
155 			}
156 			octeon_boot_desc_ptr->argc = argc;
157 			break;
158 		}
159 	}
160 
161 	/*
162 	 * Information about segments will be needed during pre-boot memory
163 	 * initialization.
164 	 */
165 	kimage_ptr = image;
166 	return 0;
167 }
168 
169 static void octeon_generic_shutdown(void)
170 {
171 	int i;
172 #ifdef CONFIG_SMP
173 	int cpu;
174 #endif
175 	struct cvmx_bootmem_desc *bootmem_desc;
176 	void *named_block_array_ptr;
177 
178 	bootmem_desc = cvmx_bootmem_get_desc();
179 	named_block_array_ptr =
180 		cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr);
181 
182 #ifdef CONFIG_SMP
183 	/* disable watchdogs */
184 	for_each_online_cpu(cpu)
185 		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
186 #else
187 	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
188 #endif
189 	if (kimage_ptr != kexec_crash_image) {
190 		memset(named_block_array_ptr,
191 			0x0,
192 			CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
193 			sizeof(struct cvmx_bootmem_named_block_desc));
194 		/*
195 		 * Mark all memory (except low 0x100000 bytes) as free.
196 		 * It is the same thing that bootloader does.
197 		 */
198 		kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL,
199 				0x100000);
200 		/*
201 		 * Allocate all segments to avoid their corruption during boot.
202 		 */
203 		for (i = 0; i < kimage_ptr->nr_segments; i++)
204 			cvmx_bootmem_alloc_address(
205 				kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
206 				kimage_ptr->segment[i].mem - PAGE_SIZE,
207 				PAGE_SIZE);
208 	} else {
209 		/*
210 		 * Do not mark all memory as free. Free only named sections
211 		 * leaving the rest of memory unchanged.
212 		 */
213 		struct cvmx_bootmem_named_block_desc *ptr =
214 			(struct cvmx_bootmem_named_block_desc *)
215 			named_block_array_ptr;
216 
217 		for (i = 0; i < bootmem_desc->named_block_num_blocks; i++)
218 			if (ptr[i].size)
219 				cvmx_bootmem_free_named(ptr[i].name);
220 	}
221 	kexec_args[2] = 1UL; /* running on octeon_main_processor */
222 	kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
223 #ifdef CONFIG_SMP
224 	secondary_kexec_args[2] = 0UL; /* running on secondary cpu */
225 	secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
226 #endif
227 }
228 
229 static void octeon_shutdown(void)
230 {
231 	octeon_generic_shutdown();
232 #ifdef CONFIG_SMP
233 	smp_call_function(octeon_kexec_smp_down, NULL, 0);
234 	smp_wmb();
235 	while (num_online_cpus() > 1) {
236 		cpu_relax();
237 		mdelay(1);
238 	}
239 #endif
240 }
241 
242 static void octeon_crash_shutdown(struct pt_regs *regs)
243 {
244 	octeon_generic_shutdown();
245 	default_machine_crash_shutdown(regs);
246 }
247 
248 #endif /* CONFIG_KEXEC */
249 
250 #ifdef CONFIG_CAVIUM_RESERVE32
251 uint64_t octeon_reserve32_memory;
252 EXPORT_SYMBOL(octeon_reserve32_memory);
253 #endif
254 
255 #ifdef CONFIG_KEXEC
256 /* crashkernel cmdline parameter is parsed _after_ memory setup
257  * we also parse it here (workaround for EHB5200) */
258 static uint64_t crashk_size, crashk_base;
259 #endif
260 
261 static int octeon_uart;
262 
263 extern asmlinkage void handle_int(void);
264 extern asmlinkage void plat_irq_dispatch(void);
265 
266 /**
267  * Return non zero if we are currently running in the Octeon simulator
268  *
269  * Returns
270  */
271 int octeon_is_simulation(void)
272 {
273 	return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
274 }
275 EXPORT_SYMBOL(octeon_is_simulation);
276 
277 /**
278  * Return true if Octeon is in PCI Host mode. This means
279  * Linux can control the PCI bus.
280  *
281  * Returns Non zero if Octeon in host mode.
282  */
283 int octeon_is_pci_host(void)
284 {
285 #ifdef CONFIG_PCI
286 	return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
287 #else
288 	return 0;
289 #endif
290 }
291 
292 /**
293  * Get the clock rate of Octeon
294  *
295  * Returns Clock rate in HZ
296  */
297 uint64_t octeon_get_clock_rate(void)
298 {
299 	struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
300 
301 	return sysinfo->cpu_clock_hz;
302 }
303 EXPORT_SYMBOL(octeon_get_clock_rate);
304 
305 static u64 octeon_io_clock_rate;
306 
307 u64 octeon_get_io_clock_rate(void)
308 {
309 	return octeon_io_clock_rate;
310 }
311 EXPORT_SYMBOL(octeon_get_io_clock_rate);
312 
313 
314 /**
315  * Write to the LCD display connected to the bootbus. This display
316  * exists on most Cavium evaluation boards. If it doesn't exist, then
317  * this function doesn't do anything.
318  *
319  * @s:	    String to write
320  */
321 void octeon_write_lcd(const char *s)
322 {
323 	if (octeon_bootinfo->led_display_base_addr) {
324 		void __iomem *lcd_address =
325 			ioremap_nocache(octeon_bootinfo->led_display_base_addr,
326 					8);
327 		int i;
328 		for (i = 0; i < 8; i++, s++) {
329 			if (*s)
330 				iowrite8(*s, lcd_address + i);
331 			else
332 				iowrite8(' ', lcd_address + i);
333 		}
334 		iounmap(lcd_address);
335 	}
336 }
337 
338 /**
339  * Return the console uart passed by the bootloader
340  *
341  * Returns uart	  (0 or 1)
342  */
343 int octeon_get_boot_uart(void)
344 {
345 	int uart;
346 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
347 	uart = 1;
348 #else
349 	uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
350 		1 : 0;
351 #endif
352 	return uart;
353 }
354 
355 /**
356  * Get the coremask Linux was booted on.
357  *
358  * Returns Core mask
359  */
360 int octeon_get_boot_coremask(void)
361 {
362 	return octeon_boot_desc_ptr->core_mask;
363 }
364 
365 /**
366  * Check the hardware BIST results for a CPU
367  */
368 void octeon_check_cpu_bist(void)
369 {
370 	const int coreid = cvmx_get_core_num();
371 	unsigned long long mask;
372 	unsigned long long bist_val;
373 
374 	/* Check BIST results for COP0 registers */
375 	mask = 0x1f00000000ull;
376 	bist_val = read_octeon_c0_icacheerr();
377 	if (bist_val & mask)
378 		pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
379 		       coreid, bist_val);
380 
381 	bist_val = read_octeon_c0_dcacheerr();
382 	if (bist_val & 1)
383 		pr_err("Core%d L1 Dcache parity error: "
384 		       "CacheErr(dcache) = 0x%llx\n",
385 		       coreid, bist_val);
386 
387 	mask = 0xfc00000000000000ull;
388 	bist_val = read_c0_cvmmemctl();
389 	if (bist_val & mask)
390 		pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
391 		       coreid, bist_val);
392 
393 	write_octeon_c0_dcacheerr(0);
394 }
395 
396 /**
397  * Reboot Octeon
398  *
399  * @command: Command to pass to the bootloader. Currently ignored.
400  */
401 static void octeon_restart(char *command)
402 {
403 	/* Disable all watchdogs before soft reset. They don't get cleared */
404 #ifdef CONFIG_SMP
405 	int cpu;
406 	for_each_online_cpu(cpu)
407 		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
408 #else
409 	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
410 #endif
411 
412 	mb();
413 	while (1)
414 		cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
415 }
416 
417 
418 /**
419  * Permanently stop a core.
420  *
421  * @arg: Ignored.
422  */
423 static void octeon_kill_core(void *arg)
424 {
425 	if (octeon_is_simulation())
426 		/* A break instruction causes the simulator stop a core */
427 		asm volatile ("break" ::: "memory");
428 
429 	local_irq_disable();
430 	/* Disable watchdog on this core. */
431 	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
432 	/* Spin in a low power mode. */
433 	while (true)
434 		asm volatile ("wait" ::: "memory");
435 }
436 
437 
438 /**
439  * Halt the system
440  */
441 static void octeon_halt(void)
442 {
443 	smp_call_function(octeon_kill_core, NULL, 0);
444 
445 	switch (octeon_bootinfo->board_type) {
446 	case CVMX_BOARD_TYPE_NAO38:
447 		/* Driving a 1 to GPIO 12 shuts off this board */
448 		cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
449 		cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
450 		break;
451 	default:
452 		octeon_write_lcd("PowerOff");
453 		break;
454 	}
455 
456 	octeon_kill_core(NULL);
457 }
458 
459 /**
460  * Return a string representing the system type
461  *
462  * Returns
463  */
464 const char *octeon_board_type_string(void)
465 {
466 	static char name[80];
467 	sprintf(name, "%s (%s)",
468 		cvmx_board_type_to_string(octeon_bootinfo->board_type),
469 		octeon_model_get_string(read_c0_prid()));
470 	return name;
471 }
472 
473 const char *get_system_type(void)
474 	__attribute__ ((alias("octeon_board_type_string")));
475 
476 void octeon_user_io_init(void)
477 {
478 	union octeon_cvmemctl cvmmemctl;
479 	union cvmx_iob_fau_timeout fau_timeout;
480 	union cvmx_pow_nw_tim nm_tim;
481 
482 	/* Get the current settings for CP0_CVMMEMCTL_REG */
483 	cvmmemctl.u64 = read_c0_cvmmemctl();
484 	/* R/W If set, marked write-buffer entries time out the same
485 	 * as as other entries; if clear, marked write-buffer entries
486 	 * use the maximum timeout. */
487 	cvmmemctl.s.dismarkwblongto = 1;
488 	/* R/W If set, a merged store does not clear the write-buffer
489 	 * entry timeout state. */
490 	cvmmemctl.s.dismrgclrwbto = 0;
491 	/* R/W Two bits that are the MSBs of the resultant CVMSEG LM
492 	 * word location for an IOBDMA. The other 8 bits come from the
493 	 * SCRADDR field of the IOBDMA. */
494 	cvmmemctl.s.iobdmascrmsb = 0;
495 	/* R/W If set, SYNCWS and SYNCS only order marked stores; if
496 	 * clear, SYNCWS and SYNCS only order unmarked
497 	 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
498 	 * set. */
499 	cvmmemctl.s.syncwsmarked = 0;
500 	/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
501 	cvmmemctl.s.dissyncws = 0;
502 	/* R/W If set, no stall happens on write buffer full. */
503 	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
504 		cvmmemctl.s.diswbfst = 1;
505 	else
506 		cvmmemctl.s.diswbfst = 0;
507 	/* R/W If set (and SX set), supervisor-level loads/stores can
508 	 * use XKPHYS addresses with <48>==0 */
509 	cvmmemctl.s.xkmemenas = 0;
510 
511 	/* R/W If set (and UX set), user-level loads/stores can use
512 	 * XKPHYS addresses with VA<48>==0 */
513 	cvmmemctl.s.xkmemenau = 0;
514 
515 	/* R/W If set (and SX set), supervisor-level loads/stores can
516 	 * use XKPHYS addresses with VA<48>==1 */
517 	cvmmemctl.s.xkioenas = 0;
518 
519 	/* R/W If set (and UX set), user-level loads/stores can use
520 	 * XKPHYS addresses with VA<48>==1 */
521 	cvmmemctl.s.xkioenau = 0;
522 
523 	/* R/W If set, all stores act as SYNCW (NOMERGE must be set
524 	 * when this is set) RW, reset to 0. */
525 	cvmmemctl.s.allsyncw = 0;
526 
527 	/* R/W If set, no stores merge, and all stores reach the
528 	 * coherent bus in order. */
529 	cvmmemctl.s.nomerge = 0;
530 	/* R/W Selects the bit in the counter used for DID time-outs 0
531 	 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
532 	 * between 1x and 2x this interval. For example, with
533 	 * DIDTTO=3, expiration interval is between 16K and 32K. */
534 	cvmmemctl.s.didtto = 0;
535 	/* R/W If set, the (mem) CSR clock never turns off. */
536 	cvmmemctl.s.csrckalwys = 0;
537 	/* R/W If set, mclk never turns off. */
538 	cvmmemctl.s.mclkalwys = 0;
539 	/* R/W Selects the bit in the counter used for write buffer
540 	 * flush time-outs (WBFLT+11) is the bit position in an
541 	 * internal counter used to determine expiration. The write
542 	 * buffer expires between 1x and 2x this interval. For
543 	 * example, with WBFLT = 0, a write buffer expires between 2K
544 	 * and 4K cycles after the write buffer entry is allocated. */
545 	cvmmemctl.s.wbfltime = 0;
546 	/* R/W If set, do not put Istream in the L2 cache. */
547 	cvmmemctl.s.istrnol2 = 0;
548 
549 	/*
550 	 * R/W The write buffer threshold. As per erratum Core-14752
551 	 * for CN63XX, a sc/scd might fail if the write buffer is
552 	 * full.  Lowering WBTHRESH greatly lowers the chances of the
553 	 * write buffer ever being full and triggering the erratum.
554 	 */
555 	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
556 		cvmmemctl.s.wbthresh = 4;
557 	else
558 		cvmmemctl.s.wbthresh = 10;
559 
560 	/* R/W If set, CVMSEG is available for loads/stores in
561 	 * kernel/debug mode. */
562 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
563 	cvmmemctl.s.cvmsegenak = 1;
564 #else
565 	cvmmemctl.s.cvmsegenak = 0;
566 #endif
567 	/* R/W If set, CVMSEG is available for loads/stores in
568 	 * supervisor mode. */
569 	cvmmemctl.s.cvmsegenas = 0;
570 	/* R/W If set, CVMSEG is available for loads/stores in user
571 	 * mode. */
572 	cvmmemctl.s.cvmsegenau = 0;
573 	/* R/W Size of local memory in cache blocks, 54 (6912 bytes)
574 	 * is max legal value. */
575 	cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
576 
577 	write_c0_cvmmemctl(cvmmemctl.u64);
578 
579 	if (smp_processor_id() == 0)
580 		pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
581 			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
582 			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
583 
584 	/* Set a default for the hardware timeouts */
585 	fau_timeout.u64 = 0;
586 	fau_timeout.s.tout_val = 0xfff;
587 	/* Disable tagwait FAU timeout */
588 	fau_timeout.s.tout_enb = 0;
589 	cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
590 
591 	nm_tim.u64 = 0;
592 	/* 4096 cycles */
593 	nm_tim.s.nw_tim = 3;
594 	cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
595 
596 	write_octeon_c0_icacheerr(0);
597 	write_c0_derraddr1(0);
598 }
599 
600 /**
601  * Early entry point for arch setup
602  */
603 void __init prom_init(void)
604 {
605 	struct cvmx_sysinfo *sysinfo;
606 	const char *arg;
607 	char *p;
608 	int i;
609 	int argc;
610 #ifdef CONFIG_CAVIUM_RESERVE32
611 	int64_t addr = -1;
612 #endif
613 	/*
614 	 * The bootloader passes a pointer to the boot descriptor in
615 	 * $a3, this is available as fw_arg3.
616 	 */
617 	octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
618 	octeon_bootinfo =
619 		cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
620 	cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
621 
622 	sysinfo = cvmx_sysinfo_get();
623 	memset(sysinfo, 0, sizeof(*sysinfo));
624 	sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
625 	sysinfo->phy_mem_desc_ptr =
626 		cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
627 	sysinfo->core_mask = octeon_bootinfo->core_mask;
628 	sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
629 	sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
630 	sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
631 	sysinfo->board_type = octeon_bootinfo->board_type;
632 	sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
633 	sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
634 	memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
635 	       sizeof(sysinfo->mac_addr_base));
636 	sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
637 	memcpy(sysinfo->board_serial_number,
638 	       octeon_bootinfo->board_serial_number,
639 	       sizeof(sysinfo->board_serial_number));
640 	sysinfo->compact_flash_common_base_addr =
641 		octeon_bootinfo->compact_flash_common_base_addr;
642 	sysinfo->compact_flash_attribute_base_addr =
643 		octeon_bootinfo->compact_flash_attribute_base_addr;
644 	sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
645 	sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
646 	sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
647 
648 	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
649 		/* I/O clock runs at a different rate than the CPU. */
650 		union cvmx_mio_rst_boot rst_boot;
651 		rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
652 		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
653 	} else {
654 		octeon_io_clock_rate = sysinfo->cpu_clock_hz;
655 	}
656 
657 	/*
658 	 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
659 	 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
660 	 */
661 	if (!octeon_is_simulation() &&
662 	    octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
663 		cvmx_write_csr(CVMX_LED_EN, 0);
664 		cvmx_write_csr(CVMX_LED_PRT, 0);
665 		cvmx_write_csr(CVMX_LED_DBG, 0);
666 		cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
667 		cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
668 		cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
669 		cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
670 		cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
671 		cvmx_write_csr(CVMX_LED_EN, 1);
672 	}
673 #ifdef CONFIG_CAVIUM_RESERVE32
674 	/*
675 	 * We need to temporarily allocate all memory in the reserve32
676 	 * region. This makes sure the kernel doesn't allocate this
677 	 * memory when it is getting memory from the
678 	 * bootloader. Later, after the memory allocations are
679 	 * complete, the reserve32 will be freed.
680 	 *
681 	 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
682 	 * is in case we later use hugetlb entries with it.
683 	 */
684 	addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
685 						0, 0, 2 << 20,
686 						"CAVIUM_RESERVE32", 0);
687 	if (addr < 0)
688 		pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
689 	else
690 		octeon_reserve32_memory = addr;
691 #endif
692 
693 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
694 	if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
695 		pr_info("Skipping L2 locking due to reduced L2 cache size\n");
696 	} else {
697 		uint32_t ebase = read_c0_ebase() & 0x3ffff000;
698 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
699 		/* TLB refill */
700 		cvmx_l2c_lock_mem_region(ebase, 0x100);
701 #endif
702 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
703 		/* General exception */
704 		cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
705 #endif
706 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
707 		/* Interrupt handler */
708 		cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
709 #endif
710 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
711 		cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
712 		cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
713 #endif
714 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
715 		cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
716 #endif
717 	}
718 #endif
719 
720 	octeon_check_cpu_bist();
721 
722 	octeon_uart = octeon_get_boot_uart();
723 
724 #ifdef CONFIG_SMP
725 	octeon_write_lcd("LinuxSMP");
726 #else
727 	octeon_write_lcd("Linux");
728 #endif
729 
730 #ifdef CONFIG_CAVIUM_GDB
731 	/*
732 	 * When debugging the linux kernel, force the cores to enter
733 	 * the debug exception handler to break in.
734 	 */
735 	if (octeon_get_boot_debug_flag()) {
736 		cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
737 		cvmx_read_csr(CVMX_CIU_DINT);
738 	}
739 #endif
740 
741 	octeon_setup_delays();
742 
743 	/*
744 	 * BIST should always be enabled when doing a soft reset. L2
745 	 * Cache locking for instance is not cleared unless BIST is
746 	 * enabled.  Unfortunately due to a chip errata G-200 for
747 	 * Cn38XX and CN31XX, BIST msut be disabled on these parts.
748 	 */
749 	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
750 	    OCTEON_IS_MODEL(OCTEON_CN31XX))
751 		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
752 	else
753 		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
754 
755 	/* Default to 64MB in the simulator to speed things up */
756 	if (octeon_is_simulation())
757 		MAX_MEMORY = 64ull << 20;
758 
759 	arg = strstr(arcs_cmdline, "mem=");
760 	if (arg) {
761 		MAX_MEMORY = memparse(arg + 4, &p);
762 		if (MAX_MEMORY == 0)
763 			MAX_MEMORY = 32ull << 30;
764 		if (*p == '@')
765 			RESERVE_LOW_MEM = memparse(p + 1, &p);
766 	}
767 
768 	arcs_cmdline[0] = 0;
769 	argc = octeon_boot_desc_ptr->argc;
770 	for (i = 0; i < argc; i++) {
771 		const char *arg =
772 			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
773 		if ((strncmp(arg, "MEM=", 4) == 0) ||
774 		    (strncmp(arg, "mem=", 4) == 0)) {
775 			MAX_MEMORY = memparse(arg + 4, &p);
776 			if (MAX_MEMORY == 0)
777 				MAX_MEMORY = 32ull << 30;
778 			if (*p == '@')
779 				RESERVE_LOW_MEM = memparse(p + 1, &p);
780 		} else if (strcmp(arg, "ecc_verbose") == 0) {
781 #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
782 			__cvmx_interrupt_ecc_report_single_bit_errors = 1;
783 			pr_notice("Reporting of single bit ECC errors is "
784 				  "turned on\n");
785 #endif
786 #ifdef CONFIG_KEXEC
787 		} else if (strncmp(arg, "crashkernel=", 12) == 0) {
788 			crashk_size = memparse(arg+12, &p);
789 			if (*p == '@')
790 				crashk_base = memparse(p+1, &p);
791 			strcat(arcs_cmdline, " ");
792 			strcat(arcs_cmdline, arg);
793 			/*
794 			 * To do: switch parsing to new style, something like:
795 			 * parse_crashkernel(arg, sysinfo->system_dram_size,
796 			 *		  &crashk_size, &crashk_base);
797 			 */
798 #endif
799 		} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
800 			   sizeof(arcs_cmdline) - 1) {
801 			strcat(arcs_cmdline, " ");
802 			strcat(arcs_cmdline, arg);
803 		}
804 	}
805 
806 	if (strstr(arcs_cmdline, "console=") == NULL) {
807 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
808 		strcat(arcs_cmdline, " console=ttyS0,115200");
809 #else
810 		if (octeon_uart == 1)
811 			strcat(arcs_cmdline, " console=ttyS1,115200");
812 		else
813 			strcat(arcs_cmdline, " console=ttyS0,115200");
814 #endif
815 	}
816 
817 	if (octeon_is_simulation()) {
818 		/*
819 		 * The simulator uses a mtdram device pre filled with
820 		 * the filesystem. Also specify the calibration delay
821 		 * to avoid calculating it every time.
822 		 */
823 		strcat(arcs_cmdline, " rw root=1f00 slram=root,0x40000000,+1073741824");
824 	}
825 
826 	mips_hpt_frequency = octeon_get_clock_rate();
827 
828 	octeon_init_cvmcount();
829 
830 	_machine_restart = octeon_restart;
831 	_machine_halt = octeon_halt;
832 
833 #ifdef CONFIG_KEXEC
834 	_machine_kexec_shutdown = octeon_shutdown;
835 	_machine_crash_shutdown = octeon_crash_shutdown;
836 	_machine_kexec_prepare = octeon_kexec_prepare;
837 #endif
838 
839 	octeon_user_io_init();
840 	register_smp_ops(&octeon_smp_ops);
841 }
842 
843 /* Exclude a single page from the regions obtained in plat_mem_setup. */
844 #ifndef CONFIG_CRASH_DUMP
845 static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
846 {
847 	if (addr > *mem && addr < *mem + *size) {
848 		u64 inc = addr - *mem;
849 		add_memory_region(*mem, inc, BOOT_MEM_RAM);
850 		*mem += inc;
851 		*size -= inc;
852 	}
853 
854 	if (addr == *mem && *size > PAGE_SIZE) {
855 		*mem += PAGE_SIZE;
856 		*size -= PAGE_SIZE;
857 	}
858 }
859 #endif /* CONFIG_CRASH_DUMP */
860 
861 void __init plat_mem_setup(void)
862 {
863 	uint64_t mem_alloc_size;
864 	uint64_t total;
865 	uint64_t crashk_end;
866 #ifndef CONFIG_CRASH_DUMP
867 	int64_t memory;
868 	uint64_t kernel_start;
869 	uint64_t kernel_size;
870 #endif
871 
872 	total = 0;
873 	crashk_end = 0;
874 
875 	/*
876 	 * The Mips memory init uses the first memory location for
877 	 * some memory vectors. When SPARSEMEM is in use, it doesn't
878 	 * verify that the size is big enough for the final
879 	 * vectors. Making the smallest chuck 4MB seems to be enough
880 	 * to consistently work.
881 	 */
882 	mem_alloc_size = 4 << 20;
883 	if (mem_alloc_size > MAX_MEMORY)
884 		mem_alloc_size = MAX_MEMORY;
885 
886 /* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
887 #ifdef CONFIG_CRASH_DUMP
888 	add_memory_region(RESERVE_LOW_MEM, MAX_MEMORY, BOOT_MEM_RAM);
889 	total += MAX_MEMORY;
890 #else
891 #ifdef CONFIG_KEXEC
892 	if (crashk_size > 0) {
893 		add_memory_region(crashk_base, crashk_size, BOOT_MEM_RAM);
894 		crashk_end = crashk_base + crashk_size;
895 	}
896 #endif
897 	/*
898 	 * When allocating memory, we want incrementing addresses from
899 	 * bootmem_alloc so the code in add_memory_region can merge
900 	 * regions next to each other.
901 	 */
902 	cvmx_bootmem_lock();
903 	while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
904 		&& (total < MAX_MEMORY)) {
905 		memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
906 						__pa_symbol(&__init_end), -1,
907 						0x100000,
908 						CVMX_BOOTMEM_FLAG_NO_LOCKING);
909 		if (memory >= 0) {
910 			u64 size = mem_alloc_size;
911 #ifdef CONFIG_KEXEC
912 			uint64_t end;
913 #endif
914 
915 			/*
916 			 * exclude a page at the beginning and end of
917 			 * the 256MB PCIe 'hole' so the kernel will not
918 			 * try to allocate multi-page buffers that
919 			 * span the discontinuity.
920 			 */
921 			memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
922 					    &memory, &size);
923 			memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
924 					    CVMX_PCIE_BAR1_PHYS_SIZE,
925 					    &memory, &size);
926 #ifdef CONFIG_KEXEC
927 			end = memory + mem_alloc_size;
928 
929 			/*
930 			 * This function automatically merges address regions
931 			 * next to each other if they are received in
932 			 * incrementing order
933 			 */
934 			if (memory < crashk_base && end >  crashk_end) {
935 				/* region is fully in */
936 				add_memory_region(memory,
937 						  crashk_base - memory,
938 						  BOOT_MEM_RAM);
939 				total += crashk_base - memory;
940 				add_memory_region(crashk_end,
941 						  end - crashk_end,
942 						  BOOT_MEM_RAM);
943 				total += end - crashk_end;
944 				continue;
945 			}
946 
947 			if (memory >= crashk_base && end <= crashk_end)
948 				/*
949 				 * Entire memory region is within the new
950 				 *  kernel's memory, ignore it.
951 				 */
952 				continue;
953 
954 			if (memory > crashk_base && memory < crashk_end &&
955 			    end > crashk_end) {
956 				/*
957 				 * Overlap with the beginning of the region,
958 				 * reserve the beginning.
959 				  */
960 				mem_alloc_size -= crashk_end - memory;
961 				memory = crashk_end;
962 			} else if (memory < crashk_base && end > crashk_base &&
963 				   end < crashk_end)
964 				/*
965 				 * Overlap with the beginning of the region,
966 				 * chop of end.
967 				 */
968 				mem_alloc_size -= end - crashk_base;
969 #endif
970 			add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
971 			total += mem_alloc_size;
972 			/* Recovering mem_alloc_size */
973 			mem_alloc_size = 4 << 20;
974 		} else {
975 			break;
976 		}
977 	}
978 	cvmx_bootmem_unlock();
979 	/* Add the memory region for the kernel. */
980 	kernel_start = (unsigned long) _text;
981 	kernel_size = ALIGN(_end - _text, 0x100000);
982 
983 	/* Adjust for physical offset. */
984 	kernel_start &= ~0xffffffff80000000ULL;
985 	add_memory_region(kernel_start, kernel_size, BOOT_MEM_RAM);
986 #endif /* CONFIG_CRASH_DUMP */
987 
988 #ifdef CONFIG_CAVIUM_RESERVE32
989 	/*
990 	 * Now that we've allocated the kernel memory it is safe to
991 	 * free the reserved region. We free it here so that builtin
992 	 * drivers can use the memory.
993 	 */
994 	if (octeon_reserve32_memory)
995 		cvmx_bootmem_free_named("CAVIUM_RESERVE32");
996 #endif /* CONFIG_CAVIUM_RESERVE32 */
997 
998 	if (total == 0)
999 		panic("Unable to allocate memory from "
1000 		      "cvmx_bootmem_phy_alloc\n");
1001 }
1002 
1003 /*
1004  * Emit one character to the boot UART.	 Exported for use by the
1005  * watchdog timer.
1006  */
1007 int prom_putchar(char c)
1008 {
1009 	uint64_t lsrval;
1010 
1011 	/* Spin until there is room */
1012 	do {
1013 		lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
1014 	} while ((lsrval & 0x20) == 0);
1015 
1016 	/* Write the byte */
1017 	cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
1018 	return 1;
1019 }
1020 EXPORT_SYMBOL(prom_putchar);
1021 
1022 void prom_free_prom_memory(void)
1023 {
1024 	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
1025 		/* Check for presence of Core-14449 fix.  */
1026 		u32 insn;
1027 		u32 *foo;
1028 
1029 		foo = &insn;
1030 
1031 		asm volatile("# before" : : : "memory");
1032 		prefetch(foo);
1033 		asm volatile(
1034 			".set push\n\t"
1035 			".set noreorder\n\t"
1036 			"bal 1f\n\t"
1037 			"nop\n"
1038 			"1:\tlw %0,-12($31)\n\t"
1039 			".set pop\n\t"
1040 			: "=r" (insn) : : "$31", "memory");
1041 
1042 		if ((insn >> 26) != 0x33)
1043 			panic("No PREF instruction at Core-14449 probe point.");
1044 
1045 		if (((insn >> 16) & 0x1f) != 28)
1046 			panic("Core-14449 WAR not in place (%04x).\n"
1047 			      "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn);
1048 	}
1049 }
1050 
1051 int octeon_prune_device_tree(void);
1052 
1053 extern const char __dtb_octeon_3xxx_begin;
1054 extern const char __dtb_octeon_3xxx_end;
1055 extern const char __dtb_octeon_68xx_begin;
1056 extern const char __dtb_octeon_68xx_end;
1057 void __init device_tree_init(void)
1058 {
1059 	int dt_size;
1060 	struct boot_param_header *fdt;
1061 	bool do_prune;
1062 
1063 	if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
1064 		fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
1065 		if (fdt_check_header(fdt))
1066 			panic("Corrupt Device Tree passed to kernel.");
1067 		dt_size = be32_to_cpu(fdt->totalsize);
1068 		do_prune = false;
1069 	} else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
1070 		fdt = (struct boot_param_header *)&__dtb_octeon_68xx_begin;
1071 		dt_size = &__dtb_octeon_68xx_end - &__dtb_octeon_68xx_begin;
1072 		do_prune = true;
1073 	} else {
1074 		fdt = (struct boot_param_header *)&__dtb_octeon_3xxx_begin;
1075 		dt_size = &__dtb_octeon_3xxx_end - &__dtb_octeon_3xxx_begin;
1076 		do_prune = true;
1077 	}
1078 
1079 	/* Copy the default tree from init memory. */
1080 	initial_boot_params = early_init_dt_alloc_memory_arch(dt_size, 8);
1081 	if (initial_boot_params == NULL)
1082 		panic("Could not allocate initial_boot_params\n");
1083 	memcpy(initial_boot_params, fdt, dt_size);
1084 
1085 	if (do_prune) {
1086 		octeon_prune_device_tree();
1087 		pr_info("Using internal Device Tree.\n");
1088 	} else {
1089 		pr_info("Using passed Device Tree.\n");
1090 	}
1091 	unflatten_device_tree();
1092 }
1093 
1094 static int __initdata disable_octeon_edac_p;
1095 
1096 static int __init disable_octeon_edac(char *str)
1097 {
1098 	disable_octeon_edac_p = 1;
1099 	return 0;
1100 }
1101 early_param("disable_octeon_edac", disable_octeon_edac);
1102 
1103 static char *edac_device_names[] = {
1104 	"octeon_l2c_edac",
1105 	"octeon_pc_edac",
1106 };
1107 
1108 static int __init edac_devinit(void)
1109 {
1110 	struct platform_device *dev;
1111 	int i, err = 0;
1112 	int num_lmc;
1113 	char *name;
1114 
1115 	if (disable_octeon_edac_p)
1116 		return 0;
1117 
1118 	for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) {
1119 		name = edac_device_names[i];
1120 		dev = platform_device_register_simple(name, -1, NULL, 0);
1121 		if (IS_ERR(dev)) {
1122 			pr_err("Registation of %s failed!\n", name);
1123 			err = PTR_ERR(dev);
1124 		}
1125 	}
1126 
1127 	num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 :
1128 		(OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1);
1129 	for (i = 0; i < num_lmc; i++) {
1130 		dev = platform_device_register_simple("octeon_lmc_edac",
1131 						      i, NULL, 0);
1132 		if (IS_ERR(dev)) {
1133 			pr_err("Registation of octeon_lmc_edac %d failed!\n", i);
1134 			err = PTR_ERR(dev);
1135 		}
1136 	}
1137 
1138 	return err;
1139 }
1140 device_initcall(edac_devinit);
1141