1 /*
2  * Copyright 2007-2011 Freescale Semiconductor, Inc.
3  *
4  * (C) Copyright 2003 Motorola Inc.
5  * Modified by Xianghua Xiao, X.Xiao@motorola.com
6  *
7  * (C) Copyright 2000
8  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
9  *
10  * See file CREDITS for list of people who contributed to this
11  * project.
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License as
15  * published by the Free Software Foundation; either version 2 of
16  * the License, or (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
26  * MA 02111-1307 USA
27  */
28 
29 #include <common.h>
30 #include <watchdog.h>
31 #include <asm/processor.h>
32 #include <ioports.h>
33 #include <sata.h>
34 #include <fm_eth.h>
35 #include <asm/io.h>
36 #include <asm/cache.h>
37 #include <asm/mmu.h>
38 #include <asm/fsl_law.h>
39 #include <asm/fsl_serdes.h>
40 #include <asm/fsl_srio.h>
41 #include <linux/compiler.h>
42 #include "mp.h"
43 #ifdef CONFIG_SYS_QE_FMAN_FW_IN_NAND
44 #include <nand.h>
45 #include <errno.h>
46 #endif
47 
48 #include "../../../../drivers/block/fsl_sata.h"
49 
50 DECLARE_GLOBAL_DATA_PTR;
51 
52 #ifdef CONFIG_QE
53 extern qe_iop_conf_t qe_iop_conf_tab[];
54 extern void qe_config_iopin(u8 port, u8 pin, int dir,
55 				int open_drain, int assign);
56 extern void qe_init(uint qe_base);
57 extern void qe_reset(void);
58 
59 static void config_qe_ioports(void)
60 {
61 	u8      port, pin;
62 	int     dir, open_drain, assign;
63 	int     i;
64 
65 	for (i = 0; qe_iop_conf_tab[i].assign != QE_IOP_TAB_END; i++) {
66 		port		= qe_iop_conf_tab[i].port;
67 		pin		= qe_iop_conf_tab[i].pin;
68 		dir		= qe_iop_conf_tab[i].dir;
69 		open_drain	= qe_iop_conf_tab[i].open_drain;
70 		assign		= qe_iop_conf_tab[i].assign;
71 		qe_config_iopin(port, pin, dir, open_drain, assign);
72 	}
73 }
74 #endif
75 
76 #ifdef CONFIG_CPM2
77 void config_8560_ioports (volatile ccsr_cpm_t * cpm)
78 {
79 	int portnum;
80 
81 	for (portnum = 0; portnum < 4; portnum++) {
82 		uint pmsk = 0,
83 		     ppar = 0,
84 		     psor = 0,
85 		     pdir = 0,
86 		     podr = 0,
87 		     pdat = 0;
88 		iop_conf_t *iopc = (iop_conf_t *) & iop_conf_tab[portnum][0];
89 		iop_conf_t *eiopc = iopc + 32;
90 		uint msk = 1;
91 
92 		/*
93 		 * NOTE:
94 		 * index 0 refers to pin 31,
95 		 * index 31 refers to pin 0
96 		 */
97 		while (iopc < eiopc) {
98 			if (iopc->conf) {
99 				pmsk |= msk;
100 				if (iopc->ppar)
101 					ppar |= msk;
102 				if (iopc->psor)
103 					psor |= msk;
104 				if (iopc->pdir)
105 					pdir |= msk;
106 				if (iopc->podr)
107 					podr |= msk;
108 				if (iopc->pdat)
109 					pdat |= msk;
110 			}
111 
112 			msk <<= 1;
113 			iopc++;
114 		}
115 
116 		if (pmsk != 0) {
117 			volatile ioport_t *iop = ioport_addr (cpm, portnum);
118 			uint tpmsk = ~pmsk;
119 
120 			/*
121 			 * the (somewhat confused) paragraph at the
122 			 * bottom of page 35-5 warns that there might
123 			 * be "unknown behaviour" when programming
124 			 * PSORx and PDIRx, if PPARx = 1, so I
125 			 * decided this meant I had to disable the
126 			 * dedicated function first, and enable it
127 			 * last.
128 			 */
129 			iop->ppar &= tpmsk;
130 			iop->psor = (iop->psor & tpmsk) | psor;
131 			iop->podr = (iop->podr & tpmsk) | podr;
132 			iop->pdat = (iop->pdat & tpmsk) | pdat;
133 			iop->pdir = (iop->pdir & tpmsk) | pdir;
134 			iop->ppar |= ppar;
135 		}
136 	}
137 }
138 #endif
139 
140 #ifdef CONFIG_SYS_FSL_CPC
141 static void enable_cpc(void)
142 {
143 	int i;
144 	u32 size = 0;
145 
146 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
147 
148 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
149 		u32 cpccfg0 = in_be32(&cpc->cpccfg0);
150 		size += CPC_CFG0_SZ_K(cpccfg0);
151 #ifdef CONFIG_RAMBOOT_PBL
152 		if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN) {
153 			/* find and disable LAW of SRAM */
154 			struct law_entry law = find_law(CONFIG_SYS_INIT_L3_ADDR);
155 
156 			if (law.index == -1) {
157 				printf("\nFatal error happened\n");
158 				return;
159 			}
160 			disable_law(law.index);
161 
162 			clrbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_CDQ_SPEC_DIS);
163 			out_be32(&cpc->cpccsr0, 0);
164 			out_be32(&cpc->cpcsrcr0, 0);
165 		}
166 #endif
167 
168 #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A002
169 		setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_TAG_ECC_SCRUB_DIS);
170 #endif
171 #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A003
172 		setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_DATA_ECC_SCRUB_DIS);
173 #endif
174 
175 		out_be32(&cpc->cpccsr0, CPC_CSR0_CE | CPC_CSR0_PE);
176 		/* Read back to sync write */
177 		in_be32(&cpc->cpccsr0);
178 
179 	}
180 
181 	printf("Corenet Platform Cache: %d KB enabled\n", size);
182 }
183 
184 void invalidate_cpc(void)
185 {
186 	int i;
187 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
188 
189 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
190 		/* skip CPC when it used as all SRAM */
191 		if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN)
192 			continue;
193 		/* Flash invalidate the CPC and clear all the locks */
194 		out_be32(&cpc->cpccsr0, CPC_CSR0_FI | CPC_CSR0_LFC);
195 		while (in_be32(&cpc->cpccsr0) & (CPC_CSR0_FI | CPC_CSR0_LFC))
196 			;
197 	}
198 }
199 #else
200 #define enable_cpc()
201 #define invalidate_cpc()
202 #endif /* CONFIG_SYS_FSL_CPC */
203 
204 /*
205  * Breathe some life into the CPU...
206  *
207  * Set up the memory map
208  * initialize a bunch of registers
209  */
210 
211 #ifdef CONFIG_FSL_CORENET
212 static void corenet_tb_init(void)
213 {
214 	volatile ccsr_rcpm_t *rcpm =
215 		(void *)(CONFIG_SYS_FSL_CORENET_RCPM_ADDR);
216 	volatile ccsr_pic_t *pic =
217 		(void *)(CONFIG_SYS_MPC8xxx_PIC_ADDR);
218 	u32 whoami = in_be32(&pic->whoami);
219 
220 	/* Enable the timebase register for this core */
221 	out_be32(&rcpm->ctbenrl, (1 << whoami));
222 }
223 #endif
224 
225 void cpu_init_f (void)
226 {
227 	extern void m8560_cpm_reset (void);
228 #ifdef CONFIG_SYS_DCSRBAR_PHYS
229 	ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
230 #endif
231 #if defined(CONFIG_SECURE_BOOT)
232 	struct law_entry law;
233 #endif
234 #ifdef CONFIG_MPC8548
235 	ccsr_local_ecm_t *ecm = (void *)(CONFIG_SYS_MPC85xx_ECM_ADDR);
236 	uint svr = get_svr();
237 
238 	/*
239 	 * CPU2 errata workaround: A core hang possible while executing
240 	 * a msync instruction and a snoopable transaction from an I/O
241 	 * master tagged to make quick forward progress is present.
242 	 * Fixed in silicon rev 2.1.
243 	 */
244 	if ((SVR_MAJ(svr) == 1) || ((SVR_MAJ(svr) == 2 && SVR_MIN(svr) == 0x0)))
245 		out_be32(&ecm->eebpcr, in_be32(&ecm->eebpcr) | (1 << 16));
246 #endif
247 
248 	disable_tlb(14);
249 	disable_tlb(15);
250 
251 #if defined(CONFIG_SECURE_BOOT)
252 	/* Disable the LAW created for NOR flash by the PBI commands */
253 	law = find_law(CONFIG_SYS_PBI_FLASH_BASE);
254 	if (law.index != -1)
255 		disable_law(law.index);
256 #endif
257 
258 #ifdef CONFIG_CPM2
259 	config_8560_ioports((ccsr_cpm_t *)CONFIG_SYS_MPC85xx_CPM_ADDR);
260 #endif
261 
262        init_early_memctl_regs();
263 
264 #if defined(CONFIG_CPM2)
265 	m8560_cpm_reset();
266 #endif
267 #ifdef CONFIG_QE
268 	/* Config QE ioports */
269 	config_qe_ioports();
270 #endif
271 #if defined(CONFIG_FSL_DMA)
272 	dma_init();
273 #endif
274 #ifdef CONFIG_FSL_CORENET
275 	corenet_tb_init();
276 #endif
277 	init_used_tlb_cams();
278 
279 	/* Invalidate the CPC before DDR gets enabled */
280 	invalidate_cpc();
281 
282  #ifdef CONFIG_SYS_DCSRBAR_PHYS
283 	/* set DCSRCR so that DCSR space is 1G */
284 	setbits_be32(&gur->dcsrcr, FSL_CORENET_DCSR_SZ_1G);
285 	in_be32(&gur->dcsrcr);
286 #endif
287 
288 }
289 
290 /* Implement a dummy function for those platforms w/o SERDES */
291 static void __fsl_serdes__init(void)
292 {
293 	return ;
294 }
295 __attribute__((weak, alias("__fsl_serdes__init"))) void fsl_serdes_init(void);
296 
297 /*
298  * Initialize L2 as cache.
299  *
300  * The newer 8548, etc, parts have twice as much cache, but
301  * use the same bit-encoding as the older 8555, etc, parts.
302  *
303  */
304 int cpu_init_r(void)
305 {
306 	__maybe_unused u32 svr = get_svr();
307 #ifdef CONFIG_SYS_LBC_LCRR
308 	volatile fsl_lbc_t *lbc = LBC_BASE_ADDR;
309 #endif
310 
311 #if defined(CONFIG_SYS_P4080_ERRATUM_CPU22) || \
312 	defined(CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011)
313 	/*
314 	 * CPU22 applies to P4080 rev 1.0, 2.0, fixed in 3.0
315 	 * NMG_CPU_A011 applies to P4080 rev 1.0, 2.0, fixed in 3.0
316 	 * also applies to P3041 rev 1.0, 1.1, P2041 rev 1.0, 1.1
317 	 */
318 	if (SVR_SOC_VER(svr) != SVR_P4080 || SVR_MAJ(svr) < 3) {
319 		flush_dcache();
320 		mtspr(L1CSR2, (mfspr(L1CSR2) | L1CSR2_DCWS));
321 		sync();
322 	}
323 #endif
324 
325 	puts ("L2:    ");
326 
327 #if defined(CONFIG_L2_CACHE)
328 	volatile ccsr_l2cache_t *l2cache = (void *)CONFIG_SYS_MPC85xx_L2_ADDR;
329 	volatile uint cache_ctl;
330 	uint ver;
331 	u32 l2siz_field;
332 
333 	ver = SVR_SOC_VER(svr);
334 
335 	asm("msync;isync");
336 	cache_ctl = l2cache->l2ctl;
337 
338 #if defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SYS_INIT_L2_ADDR)
339 	if (cache_ctl & MPC85xx_L2CTL_L2E) {
340 		/* Clear L2 SRAM memory-mapped base address */
341 		out_be32(&l2cache->l2srbar0, 0x0);
342 		out_be32(&l2cache->l2srbar1, 0x0);
343 
344 		/* set MBECCDIS=0, SBECCDIS=0 */
345 		clrbits_be32(&l2cache->l2errdis,
346 				(MPC85xx_L2ERRDIS_MBECC |
347 				 MPC85xx_L2ERRDIS_SBECC));
348 
349 		/* set L2E=0, L2SRAM=0 */
350 		clrbits_be32(&l2cache->l2ctl,
351 				(MPC85xx_L2CTL_L2E |
352 				 MPC85xx_L2CTL_L2SRAM_ENTIRE));
353 	}
354 #endif
355 
356 	l2siz_field = (cache_ctl >> 28) & 0x3;
357 
358 	switch (l2siz_field) {
359 	case 0x0:
360 		printf(" unknown size (0x%08x)\n", cache_ctl);
361 		return -1;
362 		break;
363 	case 0x1:
364 		if (ver == SVR_8540 || ver == SVR_8560   ||
365 		    ver == SVR_8541 || ver == SVR_8555) {
366 			puts("128 KB ");
367 			/* set L2E=1, L2I=1, & L2BLKSZ=1 (128 Kbyte) */
368 			cache_ctl = 0xc4000000;
369 		} else {
370 			puts("256 KB ");
371 			cache_ctl = 0xc0000000; /* set L2E=1, L2I=1, & L2SRAM=0 */
372 		}
373 		break;
374 	case 0x2:
375 		if (ver == SVR_8540 || ver == SVR_8560   ||
376 		    ver == SVR_8541 || ver == SVR_8555) {
377 			puts("256 KB ");
378 			/* set L2E=1, L2I=1, & L2BLKSZ=2 (256 Kbyte) */
379 			cache_ctl = 0xc8000000;
380 		} else {
381 			puts ("512 KB ");
382 			/* set L2E=1, L2I=1, & L2SRAM=0 */
383 			cache_ctl = 0xc0000000;
384 		}
385 		break;
386 	case 0x3:
387 		puts("1024 KB ");
388 		/* set L2E=1, L2I=1, & L2SRAM=0 */
389 		cache_ctl = 0xc0000000;
390 		break;
391 	}
392 
393 	if (l2cache->l2ctl & MPC85xx_L2CTL_L2E) {
394 		puts("already enabled");
395 #if defined(CONFIG_SYS_INIT_L2_ADDR) && defined(CONFIG_SYS_FLASH_BASE)
396 		u32 l2srbar = l2cache->l2srbar0;
397 		if (l2cache->l2ctl & MPC85xx_L2CTL_L2SRAM_ENTIRE
398 				&& l2srbar >= CONFIG_SYS_FLASH_BASE) {
399 			l2srbar = CONFIG_SYS_INIT_L2_ADDR;
400 			l2cache->l2srbar0 = l2srbar;
401 			printf("moving to 0x%08x", CONFIG_SYS_INIT_L2_ADDR);
402 		}
403 #endif /* CONFIG_SYS_INIT_L2_ADDR */
404 		puts("\n");
405 	} else {
406 		asm("msync;isync");
407 		l2cache->l2ctl = cache_ctl; /* invalidate & enable */
408 		asm("msync;isync");
409 		puts("enabled\n");
410 	}
411 #elif defined(CONFIG_BACKSIDE_L2_CACHE)
412 	if (SVR_SOC_VER(svr) == SVR_P2040) {
413 		puts("N/A\n");
414 		goto skip_l2;
415 	}
416 
417 	u32 l2cfg0 = mfspr(SPRN_L2CFG0);
418 
419 	/* invalidate the L2 cache */
420 	mtspr(SPRN_L2CSR0, (L2CSR0_L2FI|L2CSR0_L2LFC));
421 	while (mfspr(SPRN_L2CSR0) & (L2CSR0_L2FI|L2CSR0_L2LFC))
422 		;
423 
424 #ifdef CONFIG_SYS_CACHE_STASHING
425 	/* set stash id to (coreID) * 2 + 32 + L2 (1) */
426 	mtspr(SPRN_L2CSR1, (32 + 1));
427 #endif
428 
429 	/* enable the cache */
430 	mtspr(SPRN_L2CSR0, CONFIG_SYS_INIT_L2CSR0);
431 
432 	if (CONFIG_SYS_INIT_L2CSR0 & L2CSR0_L2E) {
433 		while (!(mfspr(SPRN_L2CSR0) & L2CSR0_L2E))
434 			;
435 		printf("%d KB enabled\n", (l2cfg0 & 0x3fff) * 64);
436 	}
437 
438 skip_l2:
439 #else
440 	puts("disabled\n");
441 #endif
442 
443 	enable_cpc();
444 
445 	/* needs to be in ram since code uses global static vars */
446 	fsl_serdes_init();
447 
448 #ifdef CONFIG_SYS_SRIO
449 	srio_init();
450 #ifdef CONFIG_SRIOBOOT_MASTER
451 	srio_boot_master();
452 #ifdef CONFIG_SRIOBOOT_SLAVE_HOLDOFF
453 	srio_boot_master_release_slave();
454 #endif
455 #endif
456 #endif
457 
458 #if defined(CONFIG_MP)
459 	setup_mp();
460 #endif
461 
462 #ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC136
463 	{
464 		void *p;
465 		p = (void *)CONFIG_SYS_DCSRBAR + 0x20520;
466 		setbits_be32(p, 1 << (31 - 14));
467 	}
468 #endif
469 
470 #ifdef CONFIG_SYS_LBC_LCRR
471 	/*
472 	 * Modify the CLKDIV field of LCRR register to improve the writing
473 	 * speed for NOR flash.
474 	 */
475 	clrsetbits_be32(&lbc->lcrr, LCRR_CLKDIV, CONFIG_SYS_LBC_LCRR);
476 	__raw_readl(&lbc->lcrr);
477 	isync();
478 #ifdef CONFIG_SYS_FSL_ERRATUM_NMG_LBC103
479 	udelay(100);
480 #endif
481 #endif
482 
483 #ifdef CONFIG_SYS_FSL_USB1_PHY_ENABLE
484 	{
485 		ccsr_usb_phy_t *usb_phy1 =
486 			(void *)CONFIG_SYS_MPC85xx_USB1_PHY_ADDR;
487 		out_be32(&usb_phy1->usb_enable_override,
488 				CONFIG_SYS_FSL_USB_ENABLE_OVERRIDE);
489 	}
490 #endif
491 #ifdef CONFIG_SYS_FSL_USB2_PHY_ENABLE
492 	{
493 		ccsr_usb_phy_t *usb_phy2 =
494 			(void *)CONFIG_SYS_MPC85xx_USB2_PHY_ADDR;
495 		out_be32(&usb_phy2->usb_enable_override,
496 				CONFIG_SYS_FSL_USB_ENABLE_OVERRIDE);
497 	}
498 #endif
499 
500 #ifdef CONFIG_FMAN_ENET
501 	fman_enet_init();
502 #endif
503 
504 #if defined(CONFIG_FSL_SATA_V2) && defined(CONFIG_FSL_SATA_ERRATUM_A001)
505 	/*
506 	 * For P1022/1013 Rev1.0 silicon, after power on SATA host
507 	 * controller is configured in legacy mode instead of the
508 	 * expected enterprise mode. Software needs to clear bit[28]
509 	 * of HControl register to change to enterprise mode from
510 	 * legacy mode.  We assume that the controller is offline.
511 	 */
512 	if (IS_SVR_REV(svr, 1, 0) &&
513 	    ((SVR_SOC_VER(svr) == SVR_P1022) ||
514 	     (SVR_SOC_VER(svr) == SVR_P1013))) {
515 		fsl_sata_reg_t *reg;
516 
517 		/* first SATA controller */
518 		reg = (void *)CONFIG_SYS_MPC85xx_SATA1_ADDR;
519 		clrbits_le32(&reg->hcontrol, HCONTROL_ENTERPRISE_EN);
520 
521 		/* second SATA controller */
522 		reg = (void *)CONFIG_SYS_MPC85xx_SATA2_ADDR;
523 		clrbits_le32(&reg->hcontrol, HCONTROL_ENTERPRISE_EN);
524 	}
525 #endif
526 
527 
528 	return 0;
529 }
530 
531 extern void setup_ivors(void);
532 
533 void arch_preboot_os(void)
534 {
535 	u32 msr;
536 
537 	/*
538 	 * We are changing interrupt offsets and are about to boot the OS so
539 	 * we need to make sure we disable all async interrupts. EE is already
540 	 * disabled by the time we get called.
541 	 */
542 	msr = mfmsr();
543 	msr &= ~(MSR_ME|MSR_CE);
544 	mtmsr(msr);
545 
546 	setup_ivors();
547 }
548 
549 #if defined(CONFIG_CMD_SATA) && defined(CONFIG_FSL_SATA)
550 int sata_initialize(void)
551 {
552 	if (is_serdes_configured(SATA1) || is_serdes_configured(SATA2))
553 		return __sata_initialize();
554 
555 	return 1;
556 }
557 #endif
558 
559 void cpu_secondary_init_r(void)
560 {
561 #ifdef CONFIG_QE
562 	uint qe_base = CONFIG_SYS_IMMR + 0x00080000; /* QE immr base */
563 #ifdef CONFIG_SYS_QE_FMAN_FW_IN_NAND
564 	int ret;
565 	size_t fw_length = CONFIG_SYS_QE_FMAN_FW_LENGTH;
566 
567 	/* load QE firmware from NAND flash to DDR first */
568 	ret = nand_read(&nand_info[0], (loff_t)CONFIG_SYS_QE_FMAN_FW_IN_NAND,
569 			&fw_length, (u_char *)CONFIG_SYS_QE_FMAN_FW_ADDR);
570 
571 	if (ret && ret == -EUCLEAN) {
572 		printf ("NAND read for QE firmware at offset %x failed %d\n",
573 				CONFIG_SYS_QE_FMAN_FW_IN_NAND, ret);
574 	}
575 #endif
576 	qe_init(qe_base);
577 	qe_reset();
578 #endif
579 }
580