1 /*
2  * Copyright 2007-2011 Freescale Semiconductor, Inc.
3  *
4  * (C) Copyright 2003 Motorola Inc.
5  * Modified by Xianghua Xiao, X.Xiao@motorola.com
6  *
7  * (C) Copyright 2000
8  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
9  *
10  * See file CREDITS for list of people who contributed to this
11  * project.
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License as
15  * published by the Free Software Foundation; either version 2 of
16  * the License, or (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
26  * MA 02111-1307 USA
27  */
28 
29 #include <common.h>
30 #include <watchdog.h>
31 #include <asm/processor.h>
32 #include <ioports.h>
33 #include <sata.h>
34 #include <fm_eth.h>
35 #include <asm/io.h>
36 #include <asm/cache.h>
37 #include <asm/mmu.h>
38 #include <asm/fsl_law.h>
39 #include <asm/fsl_serdes.h>
40 #include <asm/fsl_srio.h>
41 #include <linux/compiler.h>
42 #include "mp.h"
43 #ifdef CONFIG_SYS_QE_FMAN_FW_IN_NAND
44 #include <nand.h>
45 #include <errno.h>
46 #endif
47 
48 #include "../../../../drivers/block/fsl_sata.h"
49 
50 DECLARE_GLOBAL_DATA_PTR;
51 
52 #ifdef CONFIG_QE
53 extern qe_iop_conf_t qe_iop_conf_tab[];
54 extern void qe_config_iopin(u8 port, u8 pin, int dir,
55 				int open_drain, int assign);
56 extern void qe_init(uint qe_base);
57 extern void qe_reset(void);
58 
59 static void config_qe_ioports(void)
60 {
61 	u8      port, pin;
62 	int     dir, open_drain, assign;
63 	int     i;
64 
65 	for (i = 0; qe_iop_conf_tab[i].assign != QE_IOP_TAB_END; i++) {
66 		port		= qe_iop_conf_tab[i].port;
67 		pin		= qe_iop_conf_tab[i].pin;
68 		dir		= qe_iop_conf_tab[i].dir;
69 		open_drain	= qe_iop_conf_tab[i].open_drain;
70 		assign		= qe_iop_conf_tab[i].assign;
71 		qe_config_iopin(port, pin, dir, open_drain, assign);
72 	}
73 }
74 #endif
75 
76 #ifdef CONFIG_CPM2
77 void config_8560_ioports (volatile ccsr_cpm_t * cpm)
78 {
79 	int portnum;
80 
81 	for (portnum = 0; portnum < 4; portnum++) {
82 		uint pmsk = 0,
83 		     ppar = 0,
84 		     psor = 0,
85 		     pdir = 0,
86 		     podr = 0,
87 		     pdat = 0;
88 		iop_conf_t *iopc = (iop_conf_t *) & iop_conf_tab[portnum][0];
89 		iop_conf_t *eiopc = iopc + 32;
90 		uint msk = 1;
91 
92 		/*
93 		 * NOTE:
94 		 * index 0 refers to pin 31,
95 		 * index 31 refers to pin 0
96 		 */
97 		while (iopc < eiopc) {
98 			if (iopc->conf) {
99 				pmsk |= msk;
100 				if (iopc->ppar)
101 					ppar |= msk;
102 				if (iopc->psor)
103 					psor |= msk;
104 				if (iopc->pdir)
105 					pdir |= msk;
106 				if (iopc->podr)
107 					podr |= msk;
108 				if (iopc->pdat)
109 					pdat |= msk;
110 			}
111 
112 			msk <<= 1;
113 			iopc++;
114 		}
115 
116 		if (pmsk != 0) {
117 			volatile ioport_t *iop = ioport_addr (cpm, portnum);
118 			uint tpmsk = ~pmsk;
119 
120 			/*
121 			 * the (somewhat confused) paragraph at the
122 			 * bottom of page 35-5 warns that there might
123 			 * be "unknown behaviour" when programming
124 			 * PSORx and PDIRx, if PPARx = 1, so I
125 			 * decided this meant I had to disable the
126 			 * dedicated function first, and enable it
127 			 * last.
128 			 */
129 			iop->ppar &= tpmsk;
130 			iop->psor = (iop->psor & tpmsk) | psor;
131 			iop->podr = (iop->podr & tpmsk) | podr;
132 			iop->pdat = (iop->pdat & tpmsk) | pdat;
133 			iop->pdir = (iop->pdir & tpmsk) | pdir;
134 			iop->ppar |= ppar;
135 		}
136 	}
137 }
138 #endif
139 
140 #ifdef CONFIG_SYS_FSL_CPC
141 static void enable_cpc(void)
142 {
143 	int i;
144 	u32 size = 0;
145 
146 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
147 
148 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
149 		u32 cpccfg0 = in_be32(&cpc->cpccfg0);
150 		size += CPC_CFG0_SZ_K(cpccfg0);
151 #ifdef CONFIG_RAMBOOT_PBL
152 		if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN) {
153 			/* find and disable LAW of SRAM */
154 			struct law_entry law = find_law(CONFIG_SYS_INIT_L3_ADDR);
155 
156 			if (law.index == -1) {
157 				printf("\nFatal error happened\n");
158 				return;
159 			}
160 			disable_law(law.index);
161 
162 			clrbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_CDQ_SPEC_DIS);
163 			out_be32(&cpc->cpccsr0, 0);
164 			out_be32(&cpc->cpcsrcr0, 0);
165 		}
166 #endif
167 
168 #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A002
169 		setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_TAG_ECC_SCRUB_DIS);
170 #endif
171 #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A003
172 		setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_DATA_ECC_SCRUB_DIS);
173 #endif
174 
175 		out_be32(&cpc->cpccsr0, CPC_CSR0_CE | CPC_CSR0_PE);
176 		/* Read back to sync write */
177 		in_be32(&cpc->cpccsr0);
178 
179 	}
180 
181 	printf("Corenet Platform Cache: %d KB enabled\n", size);
182 }
183 
184 void invalidate_cpc(void)
185 {
186 	int i;
187 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
188 
189 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
190 		/* skip CPC when it used as all SRAM */
191 		if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN)
192 			continue;
193 		/* Flash invalidate the CPC and clear all the locks */
194 		out_be32(&cpc->cpccsr0, CPC_CSR0_FI | CPC_CSR0_LFC);
195 		while (in_be32(&cpc->cpccsr0) & (CPC_CSR0_FI | CPC_CSR0_LFC))
196 			;
197 	}
198 }
199 #else
200 #define enable_cpc()
201 #define invalidate_cpc()
202 #endif /* CONFIG_SYS_FSL_CPC */
203 
204 /*
205  * Breathe some life into the CPU...
206  *
207  * Set up the memory map
208  * initialize a bunch of registers
209  */
210 
211 #ifdef CONFIG_FSL_CORENET
212 static void corenet_tb_init(void)
213 {
214 	volatile ccsr_rcpm_t *rcpm =
215 		(void *)(CONFIG_SYS_FSL_CORENET_RCPM_ADDR);
216 	volatile ccsr_pic_t *pic =
217 		(void *)(CONFIG_SYS_MPC8xxx_PIC_ADDR);
218 	u32 whoami = in_be32(&pic->whoami);
219 
220 	/* Enable the timebase register for this core */
221 	out_be32(&rcpm->ctbenrl, (1 << whoami));
222 }
223 #endif
224 
225 void cpu_init_f (void)
226 {
227 	extern void m8560_cpm_reset (void);
228 #ifdef CONFIG_SYS_DCSRBAR_PHYS
229 	ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
230 #endif
231 #if defined(CONFIG_SECURE_BOOT)
232 	struct law_entry law;
233 #endif
234 #ifdef CONFIG_MPC8548
235 	ccsr_local_ecm_t *ecm = (void *)(CONFIG_SYS_MPC85xx_ECM_ADDR);
236 	uint svr = get_svr();
237 
238 	/*
239 	 * CPU2 errata workaround: A core hang possible while executing
240 	 * a msync instruction and a snoopable transaction from an I/O
241 	 * master tagged to make quick forward progress is present.
242 	 * Fixed in silicon rev 2.1.
243 	 */
244 	if ((SVR_MAJ(svr) == 1) || ((SVR_MAJ(svr) == 2 && SVR_MIN(svr) == 0x0)))
245 		out_be32(&ecm->eebpcr, in_be32(&ecm->eebpcr) | (1 << 16));
246 #endif
247 
248 	disable_tlb(14);
249 	disable_tlb(15);
250 
251 #if defined(CONFIG_SECURE_BOOT)
252 	/* Disable the LAW created for NOR flash by the PBI commands */
253 	law = find_law(CONFIG_SYS_PBI_FLASH_BASE);
254 	if (law.index != -1)
255 		disable_law(law.index);
256 #endif
257 
258 #ifdef CONFIG_CPM2
259 	config_8560_ioports((ccsr_cpm_t *)CONFIG_SYS_MPC85xx_CPM_ADDR);
260 #endif
261 
262        init_early_memctl_regs();
263 
264 #if defined(CONFIG_CPM2)
265 	m8560_cpm_reset();
266 #endif
267 #ifdef CONFIG_QE
268 	/* Config QE ioports */
269 	config_qe_ioports();
270 #endif
271 #if defined(CONFIG_FSL_DMA)
272 	dma_init();
273 #endif
274 #ifdef CONFIG_FSL_CORENET
275 	corenet_tb_init();
276 #endif
277 	init_used_tlb_cams();
278 
279 	/* Invalidate the CPC before DDR gets enabled */
280 	invalidate_cpc();
281 
282  #ifdef CONFIG_SYS_DCSRBAR_PHYS
283 	/* set DCSRCR so that DCSR space is 1G */
284 	setbits_be32(&gur->dcsrcr, FSL_CORENET_DCSR_SZ_1G);
285 	in_be32(&gur->dcsrcr);
286 #endif
287 
288 }
289 
290 /* Implement a dummy function for those platforms w/o SERDES */
291 static void __fsl_serdes__init(void)
292 {
293 	return ;
294 }
295 __attribute__((weak, alias("__fsl_serdes__init"))) void fsl_serdes_init(void);
296 
297 /*
298  * Initialize L2 as cache.
299  *
300  * The newer 8548, etc, parts have twice as much cache, but
301  * use the same bit-encoding as the older 8555, etc, parts.
302  *
303  */
304 int cpu_init_r(void)
305 {
306 	__maybe_unused u32 svr = get_svr();
307 #ifdef CONFIG_SYS_LBC_LCRR
308 	volatile fsl_lbc_t *lbc = LBC_BASE_ADDR;
309 #endif
310 
311 #if defined(CONFIG_SYS_P4080_ERRATUM_CPU22)
312 	flush_dcache();
313 	mtspr(L1CSR2, (mfspr(L1CSR2) | L1CSR2_DCWS));
314 	sync();
315 #endif
316 
317 	puts ("L2:    ");
318 
319 #if defined(CONFIG_L2_CACHE)
320 	volatile ccsr_l2cache_t *l2cache = (void *)CONFIG_SYS_MPC85xx_L2_ADDR;
321 	volatile uint cache_ctl;
322 	uint ver;
323 	u32 l2siz_field;
324 
325 	ver = SVR_SOC_VER(svr);
326 
327 	asm("msync;isync");
328 	cache_ctl = l2cache->l2ctl;
329 
330 #if defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SYS_INIT_L2_ADDR)
331 	if (cache_ctl & MPC85xx_L2CTL_L2E) {
332 		/* Clear L2 SRAM memory-mapped base address */
333 		out_be32(&l2cache->l2srbar0, 0x0);
334 		out_be32(&l2cache->l2srbar1, 0x0);
335 
336 		/* set MBECCDIS=0, SBECCDIS=0 */
337 		clrbits_be32(&l2cache->l2errdis,
338 				(MPC85xx_L2ERRDIS_MBECC |
339 				 MPC85xx_L2ERRDIS_SBECC));
340 
341 		/* set L2E=0, L2SRAM=0 */
342 		clrbits_be32(&l2cache->l2ctl,
343 				(MPC85xx_L2CTL_L2E |
344 				 MPC85xx_L2CTL_L2SRAM_ENTIRE));
345 	}
346 #endif
347 
348 	l2siz_field = (cache_ctl >> 28) & 0x3;
349 
350 	switch (l2siz_field) {
351 	case 0x0:
352 		printf(" unknown size (0x%08x)\n", cache_ctl);
353 		return -1;
354 		break;
355 	case 0x1:
356 		if (ver == SVR_8540 || ver == SVR_8560   ||
357 		    ver == SVR_8541 || ver == SVR_8541_E ||
358 		    ver == SVR_8555 || ver == SVR_8555_E) {
359 			puts("128 KB ");
360 			/* set L2E=1, L2I=1, & L2BLKSZ=1 (128 Kbyte) */
361 			cache_ctl = 0xc4000000;
362 		} else {
363 			puts("256 KB ");
364 			cache_ctl = 0xc0000000; /* set L2E=1, L2I=1, & L2SRAM=0 */
365 		}
366 		break;
367 	case 0x2:
368 		if (ver == SVR_8540 || ver == SVR_8560   ||
369 		    ver == SVR_8541 || ver == SVR_8541_E ||
370 		    ver == SVR_8555 || ver == SVR_8555_E) {
371 			puts("256 KB ");
372 			/* set L2E=1, L2I=1, & L2BLKSZ=2 (256 Kbyte) */
373 			cache_ctl = 0xc8000000;
374 		} else {
375 			puts ("512 KB ");
376 			/* set L2E=1, L2I=1, & L2SRAM=0 */
377 			cache_ctl = 0xc0000000;
378 		}
379 		break;
380 	case 0x3:
381 		puts("1024 KB ");
382 		/* set L2E=1, L2I=1, & L2SRAM=0 */
383 		cache_ctl = 0xc0000000;
384 		break;
385 	}
386 
387 	if (l2cache->l2ctl & MPC85xx_L2CTL_L2E) {
388 		puts("already enabled");
389 #if defined(CONFIG_SYS_INIT_L2_ADDR) && defined(CONFIG_SYS_FLASH_BASE)
390 		u32 l2srbar = l2cache->l2srbar0;
391 		if (l2cache->l2ctl & MPC85xx_L2CTL_L2SRAM_ENTIRE
392 				&& l2srbar >= CONFIG_SYS_FLASH_BASE) {
393 			l2srbar = CONFIG_SYS_INIT_L2_ADDR;
394 			l2cache->l2srbar0 = l2srbar;
395 			printf("moving to 0x%08x", CONFIG_SYS_INIT_L2_ADDR);
396 		}
397 #endif /* CONFIG_SYS_INIT_L2_ADDR */
398 		puts("\n");
399 	} else {
400 		asm("msync;isync");
401 		l2cache->l2ctl = cache_ctl; /* invalidate & enable */
402 		asm("msync;isync");
403 		puts("enabled\n");
404 	}
405 #elif defined(CONFIG_BACKSIDE_L2_CACHE)
406 	if ((SVR_SOC_VER(svr) == SVR_P2040) ||
407 	    (SVR_SOC_VER(svr) == SVR_P2040_E)) {
408 		puts("N/A\n");
409 		goto skip_l2;
410 	}
411 
412 	u32 l2cfg0 = mfspr(SPRN_L2CFG0);
413 
414 	/* invalidate the L2 cache */
415 	mtspr(SPRN_L2CSR0, (L2CSR0_L2FI|L2CSR0_L2LFC));
416 	while (mfspr(SPRN_L2CSR0) & (L2CSR0_L2FI|L2CSR0_L2LFC))
417 		;
418 
419 #ifdef CONFIG_SYS_CACHE_STASHING
420 	/* set stash id to (coreID) * 2 + 32 + L2 (1) */
421 	mtspr(SPRN_L2CSR1, (32 + 1));
422 #endif
423 
424 	/* enable the cache */
425 	mtspr(SPRN_L2CSR0, CONFIG_SYS_INIT_L2CSR0);
426 
427 	if (CONFIG_SYS_INIT_L2CSR0 & L2CSR0_L2E) {
428 		while (!(mfspr(SPRN_L2CSR0) & L2CSR0_L2E))
429 			;
430 		printf("%d KB enabled\n", (l2cfg0 & 0x3fff) * 64);
431 	}
432 
433 skip_l2:
434 #else
435 	puts("disabled\n");
436 #endif
437 
438 	enable_cpc();
439 
440 	/* needs to be in ram since code uses global static vars */
441 	fsl_serdes_init();
442 
443 #ifdef CONFIG_SYS_SRIO
444 	srio_init();
445 #ifdef CONFIG_SRIOBOOT_MASTER
446 	srio_boot_master();
447 #ifdef CONFIG_SRIOBOOT_SLAVE_HOLDOFF
448 	srio_boot_master_release_slave();
449 #endif
450 #endif
451 #endif
452 
453 #if defined(CONFIG_MP)
454 	setup_mp();
455 #endif
456 
457 #ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC136
458 	{
459 		void *p;
460 		p = (void *)CONFIG_SYS_DCSRBAR + 0x20520;
461 		setbits_be32(p, 1 << (31 - 14));
462 	}
463 #endif
464 
465 #ifdef CONFIG_SYS_LBC_LCRR
466 	/*
467 	 * Modify the CLKDIV field of LCRR register to improve the writing
468 	 * speed for NOR flash.
469 	 */
470 	clrsetbits_be32(&lbc->lcrr, LCRR_CLKDIV, CONFIG_SYS_LBC_LCRR);
471 	__raw_readl(&lbc->lcrr);
472 	isync();
473 #ifdef CONFIG_SYS_FSL_ERRATUM_NMG_LBC103
474 	udelay(100);
475 #endif
476 #endif
477 
478 #ifdef CONFIG_SYS_FSL_USB1_PHY_ENABLE
479 	{
480 		ccsr_usb_phy_t *usb_phy1 =
481 			(void *)CONFIG_SYS_MPC85xx_USB1_PHY_ADDR;
482 		out_be32(&usb_phy1->usb_enable_override,
483 				CONFIG_SYS_FSL_USB_ENABLE_OVERRIDE);
484 	}
485 #endif
486 #ifdef CONFIG_SYS_FSL_USB2_PHY_ENABLE
487 	{
488 		ccsr_usb_phy_t *usb_phy2 =
489 			(void *)CONFIG_SYS_MPC85xx_USB2_PHY_ADDR;
490 		out_be32(&usb_phy2->usb_enable_override,
491 				CONFIG_SYS_FSL_USB_ENABLE_OVERRIDE);
492 	}
493 #endif
494 
495 #ifdef CONFIG_FMAN_ENET
496 	fman_enet_init();
497 #endif
498 
499 #if defined(CONFIG_FSL_SATA_V2) && defined(CONFIG_FSL_SATA_ERRATUM_A001)
500 	/*
501 	 * For P1022/1013 Rev1.0 silicon, after power on SATA host
502 	 * controller is configured in legacy mode instead of the
503 	 * expected enterprise mode. Software needs to clear bit[28]
504 	 * of HControl register to change to enterprise mode from
505 	 * legacy mode.  We assume that the controller is offline.
506 	 */
507 	if (IS_SVR_REV(svr, 1, 0) &&
508 	    ((SVR_SOC_VER(svr) == SVR_P1022) ||
509 	     (SVR_SOC_VER(svr) == SVR_P1022_E) ||
510 	     (SVR_SOC_VER(svr) == SVR_P1013) ||
511 	     (SVR_SOC_VER(svr) == SVR_P1013_E))) {
512 		fsl_sata_reg_t *reg;
513 
514 		/* first SATA controller */
515 		reg = (void *)CONFIG_SYS_MPC85xx_SATA1_ADDR;
516 		clrbits_le32(&reg->hcontrol, HCONTROL_ENTERPRISE_EN);
517 
518 		/* second SATA controller */
519 		reg = (void *)CONFIG_SYS_MPC85xx_SATA2_ADDR;
520 		clrbits_le32(&reg->hcontrol, HCONTROL_ENTERPRISE_EN);
521 	}
522 #endif
523 
524 
525 	return 0;
526 }
527 
528 extern void setup_ivors(void);
529 
530 void arch_preboot_os(void)
531 {
532 	u32 msr;
533 
534 	/*
535 	 * We are changing interrupt offsets and are about to boot the OS so
536 	 * we need to make sure we disable all async interrupts. EE is already
537 	 * disabled by the time we get called.
538 	 */
539 	msr = mfmsr();
540 	msr &= ~(MSR_ME|MSR_CE|MSR_DE);
541 	mtmsr(msr);
542 
543 	setup_ivors();
544 }
545 
546 #if defined(CONFIG_CMD_SATA) && defined(CONFIG_FSL_SATA)
547 int sata_initialize(void)
548 {
549 	if (is_serdes_configured(SATA1) || is_serdes_configured(SATA2))
550 		return __sata_initialize();
551 
552 	return 1;
553 }
554 #endif
555 
556 void cpu_secondary_init_r(void)
557 {
558 #ifdef CONFIG_QE
559 	uint qe_base = CONFIG_SYS_IMMR + 0x00080000; /* QE immr base */
560 #ifdef CONFIG_SYS_QE_FMAN_FW_IN_NAND
561 	int ret;
562 	size_t fw_length = CONFIG_SYS_QE_FMAN_FW_LENGTH;
563 
564 	/* load QE firmware from NAND flash to DDR first */
565 	ret = nand_read(&nand_info[0], (loff_t)CONFIG_SYS_QE_FMAN_FW_IN_NAND,
566 			&fw_length, (u_char *)CONFIG_SYS_QE_FMAN_FW_ADDR);
567 
568 	if (ret && ret == -EUCLEAN) {
569 		printf ("NAND read for QE firmware at offset %x failed %d\n",
570 				CONFIG_SYS_QE_FMAN_FW_IN_NAND, ret);
571 	}
572 #endif
573 	qe_init(qe_base);
574 	qe_reset();
575 #endif
576 }
577