1 /*
2  * Copyright 2007-2011 Freescale Semiconductor, Inc.
3  *
4  * (C) Copyright 2003 Motorola Inc.
5  * Modified by Xianghua Xiao, X.Xiao@motorola.com
6  *
7  * (C) Copyright 2000
8  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
9  *
10  * See file CREDITS for list of people who contributed to this
11  * project.
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License as
15  * published by the Free Software Foundation; either version 2 of
16  * the License, or (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
26  * MA 02111-1307 USA
27  */
28 
29 #include <common.h>
30 #include <watchdog.h>
31 #include <asm/processor.h>
32 #include <ioports.h>
33 #include <sata.h>
34 #include <asm/io.h>
35 #include <asm/cache.h>
36 #include <asm/mmu.h>
37 #include <asm/fsl_law.h>
38 #include <asm/fsl_serdes.h>
39 #include "mp.h"
40 #ifdef CONFIG_SYS_QE_FW_IN_NAND
41 #include <nand.h>
42 #include <errno.h>
43 #endif
44 
45 DECLARE_GLOBAL_DATA_PTR;
46 
47 extern void srio_init(void);
48 
49 #ifdef CONFIG_QE
50 extern qe_iop_conf_t qe_iop_conf_tab[];
51 extern void qe_config_iopin(u8 port, u8 pin, int dir,
52 				int open_drain, int assign);
53 extern void qe_init(uint qe_base);
54 extern void qe_reset(void);
55 
56 static void config_qe_ioports(void)
57 {
58 	u8      port, pin;
59 	int     dir, open_drain, assign;
60 	int     i;
61 
62 	for (i = 0; qe_iop_conf_tab[i].assign != QE_IOP_TAB_END; i++) {
63 		port		= qe_iop_conf_tab[i].port;
64 		pin		= qe_iop_conf_tab[i].pin;
65 		dir		= qe_iop_conf_tab[i].dir;
66 		open_drain	= qe_iop_conf_tab[i].open_drain;
67 		assign		= qe_iop_conf_tab[i].assign;
68 		qe_config_iopin(port, pin, dir, open_drain, assign);
69 	}
70 }
71 #endif
72 
73 #ifdef CONFIG_CPM2
74 void config_8560_ioports (volatile ccsr_cpm_t * cpm)
75 {
76 	int portnum;
77 
78 	for (portnum = 0; portnum < 4; portnum++) {
79 		uint pmsk = 0,
80 		     ppar = 0,
81 		     psor = 0,
82 		     pdir = 0,
83 		     podr = 0,
84 		     pdat = 0;
85 		iop_conf_t *iopc = (iop_conf_t *) & iop_conf_tab[portnum][0];
86 		iop_conf_t *eiopc = iopc + 32;
87 		uint msk = 1;
88 
89 		/*
90 		 * NOTE:
91 		 * index 0 refers to pin 31,
92 		 * index 31 refers to pin 0
93 		 */
94 		while (iopc < eiopc) {
95 			if (iopc->conf) {
96 				pmsk |= msk;
97 				if (iopc->ppar)
98 					ppar |= msk;
99 				if (iopc->psor)
100 					psor |= msk;
101 				if (iopc->pdir)
102 					pdir |= msk;
103 				if (iopc->podr)
104 					podr |= msk;
105 				if (iopc->pdat)
106 					pdat |= msk;
107 			}
108 
109 			msk <<= 1;
110 			iopc++;
111 		}
112 
113 		if (pmsk != 0) {
114 			volatile ioport_t *iop = ioport_addr (cpm, portnum);
115 			uint tpmsk = ~pmsk;
116 
117 			/*
118 			 * the (somewhat confused) paragraph at the
119 			 * bottom of page 35-5 warns that there might
120 			 * be "unknown behaviour" when programming
121 			 * PSORx and PDIRx, if PPARx = 1, so I
122 			 * decided this meant I had to disable the
123 			 * dedicated function first, and enable it
124 			 * last.
125 			 */
126 			iop->ppar &= tpmsk;
127 			iop->psor = (iop->psor & tpmsk) | psor;
128 			iop->podr = (iop->podr & tpmsk) | podr;
129 			iop->pdat = (iop->pdat & tpmsk) | pdat;
130 			iop->pdir = (iop->pdir & tpmsk) | pdir;
131 			iop->ppar |= ppar;
132 		}
133 	}
134 }
135 #endif
136 
137 #ifdef CONFIG_SYS_FSL_CPC
138 static void enable_cpc(void)
139 {
140 	int i;
141 	u32 size = 0;
142 
143 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
144 
145 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
146 		u32 cpccfg0 = in_be32(&cpc->cpccfg0);
147 		size += CPC_CFG0_SZ_K(cpccfg0);
148 #ifdef CONFIG_RAMBOOT_PBL
149 		if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN) {
150 			/* find and disable LAW of SRAM */
151 			struct law_entry law = find_law(CONFIG_SYS_INIT_L3_ADDR);
152 
153 			if (law.index == -1) {
154 				printf("\nFatal error happened\n");
155 				return;
156 			}
157 			disable_law(law.index);
158 
159 			clrbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_CDQ_SPEC_DIS);
160 			out_be32(&cpc->cpccsr0, 0);
161 			out_be32(&cpc->cpcsrcr0, 0);
162 		}
163 #endif
164 
165 #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A002
166 		setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_TAG_ECC_SCRUB_DIS);
167 #endif
168 #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A003
169 		setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_DATA_ECC_SCRUB_DIS);
170 #endif
171 
172 		out_be32(&cpc->cpccsr0, CPC_CSR0_CE | CPC_CSR0_PE);
173 		/* Read back to sync write */
174 		in_be32(&cpc->cpccsr0);
175 
176 	}
177 
178 	printf("Corenet Platform Cache: %d KB enabled\n", size);
179 }
180 
181 void invalidate_cpc(void)
182 {
183 	int i;
184 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
185 
186 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
187 		/* skip CPC when it used as all SRAM */
188 		if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN)
189 			continue;
190 		/* Flash invalidate the CPC and clear all the locks */
191 		out_be32(&cpc->cpccsr0, CPC_CSR0_FI | CPC_CSR0_LFC);
192 		while (in_be32(&cpc->cpccsr0) & (CPC_CSR0_FI | CPC_CSR0_LFC))
193 			;
194 	}
195 }
196 #else
197 #define enable_cpc()
198 #define invalidate_cpc()
199 #endif /* CONFIG_SYS_FSL_CPC */
200 
201 /*
202  * Breathe some life into the CPU...
203  *
204  * Set up the memory map
205  * initialize a bunch of registers
206  */
207 
208 #ifdef CONFIG_FSL_CORENET
209 static void corenet_tb_init(void)
210 {
211 	volatile ccsr_rcpm_t *rcpm =
212 		(void *)(CONFIG_SYS_FSL_CORENET_RCPM_ADDR);
213 	volatile ccsr_pic_t *pic =
214 		(void *)(CONFIG_SYS_MPC8xxx_PIC_ADDR);
215 	u32 whoami = in_be32(&pic->whoami);
216 
217 	/* Enable the timebase register for this core */
218 	out_be32(&rcpm->ctbenrl, (1 << whoami));
219 }
220 #endif
221 
222 void cpu_init_f (void)
223 {
224 	extern void m8560_cpm_reset (void);
225 #ifdef CONFIG_SYS_DCSRBAR_PHYS
226 	ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
227 #endif
228 
229 #ifdef CONFIG_MPC8548
230 	ccsr_local_ecm_t *ecm = (void *)(CONFIG_SYS_MPC85xx_ECM_ADDR);
231 	uint svr = get_svr();
232 
233 	/*
234 	 * CPU2 errata workaround: A core hang possible while executing
235 	 * a msync instruction and a snoopable transaction from an I/O
236 	 * master tagged to make quick forward progress is present.
237 	 * Fixed in silicon rev 2.1.
238 	 */
239 	if ((SVR_MAJ(svr) == 1) || ((SVR_MAJ(svr) == 2 && SVR_MIN(svr) == 0x0)))
240 		out_be32(&ecm->eebpcr, in_be32(&ecm->eebpcr) | (1 << 16));
241 #endif
242 
243 	disable_tlb(14);
244 	disable_tlb(15);
245 
246 #ifdef CONFIG_CPM2
247 	config_8560_ioports((ccsr_cpm_t *)CONFIG_SYS_MPC85xx_CPM_ADDR);
248 #endif
249 
250        init_early_memctl_regs();
251 
252 #if defined(CONFIG_CPM2)
253 	m8560_cpm_reset();
254 #endif
255 #ifdef CONFIG_QE
256 	/* Config QE ioports */
257 	config_qe_ioports();
258 #endif
259 #if defined(CONFIG_FSL_DMA)
260 	dma_init();
261 #endif
262 #ifdef CONFIG_FSL_CORENET
263 	corenet_tb_init();
264 #endif
265 	init_used_tlb_cams();
266 
267 	/* Invalidate the CPC before DDR gets enabled */
268 	invalidate_cpc();
269 
270  #ifdef CONFIG_SYS_DCSRBAR_PHYS
271 	/* set DCSRCR so that DCSR space is 1G */
272 	setbits_be32(&gur->dcsrcr, FSL_CORENET_DCSR_SZ_1G);
273 	in_be32(&gur->dcsrcr);
274 #endif
275 
276 }
277 
278 /* Implement a dummy function for those platforms w/o SERDES */
279 static void __fsl_serdes__init(void)
280 {
281 	return ;
282 }
283 __attribute__((weak, alias("__fsl_serdes__init"))) void fsl_serdes_init(void);
284 
285 /*
286  * Initialize L2 as cache.
287  *
288  * The newer 8548, etc, parts have twice as much cache, but
289  * use the same bit-encoding as the older 8555, etc, parts.
290  *
291  */
292 int cpu_init_r(void)
293 {
294 #ifdef CONFIG_SYS_LBC_LCRR
295 	volatile fsl_lbc_t *lbc = LBC_BASE_ADDR;
296 #endif
297 
298 #if defined(CONFIG_SYS_P4080_ERRATUM_CPU22)
299 	flush_dcache();
300 	mtspr(L1CSR2, (mfspr(L1CSR2) | L1CSR2_DCWS));
301 	sync();
302 #endif
303 
304 	puts ("L2:    ");
305 
306 #if defined(CONFIG_L2_CACHE)
307 	volatile ccsr_l2cache_t *l2cache = (void *)CONFIG_SYS_MPC85xx_L2_ADDR;
308 	volatile uint cache_ctl;
309 	uint svr, ver;
310 	uint l2srbar;
311 	u32 l2siz_field;
312 
313 	svr = get_svr();
314 	ver = SVR_SOC_VER(svr);
315 
316 	asm("msync;isync");
317 	cache_ctl = l2cache->l2ctl;
318 
319 #if defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SYS_INIT_L2_ADDR)
320 	if (cache_ctl & MPC85xx_L2CTL_L2E) {
321 		/* Clear L2 SRAM memory-mapped base address */
322 		out_be32(&l2cache->l2srbar0, 0x0);
323 		out_be32(&l2cache->l2srbar1, 0x0);
324 
325 		/* set MBECCDIS=0, SBECCDIS=0 */
326 		clrbits_be32(&l2cache->l2errdis,
327 				(MPC85xx_L2ERRDIS_MBECC |
328 				 MPC85xx_L2ERRDIS_SBECC));
329 
330 		/* set L2E=0, L2SRAM=0 */
331 		clrbits_be32(&l2cache->l2ctl,
332 				(MPC85xx_L2CTL_L2E |
333 				 MPC85xx_L2CTL_L2SRAM_ENTIRE));
334 	}
335 #endif
336 
337 	l2siz_field = (cache_ctl >> 28) & 0x3;
338 
339 	switch (l2siz_field) {
340 	case 0x0:
341 		printf(" unknown size (0x%08x)\n", cache_ctl);
342 		return -1;
343 		break;
344 	case 0x1:
345 		if (ver == SVR_8540 || ver == SVR_8560   ||
346 		    ver == SVR_8541 || ver == SVR_8541_E ||
347 		    ver == SVR_8555 || ver == SVR_8555_E) {
348 			puts("128 KB ");
349 			/* set L2E=1, L2I=1, & L2BLKSZ=1 (128 Kbyte) */
350 			cache_ctl = 0xc4000000;
351 		} else {
352 			puts("256 KB ");
353 			cache_ctl = 0xc0000000; /* set L2E=1, L2I=1, & L2SRAM=0 */
354 		}
355 		break;
356 	case 0x2:
357 		if (ver == SVR_8540 || ver == SVR_8560   ||
358 		    ver == SVR_8541 || ver == SVR_8541_E ||
359 		    ver == SVR_8555 || ver == SVR_8555_E) {
360 			puts("256 KB ");
361 			/* set L2E=1, L2I=1, & L2BLKSZ=2 (256 Kbyte) */
362 			cache_ctl = 0xc8000000;
363 		} else {
364 			puts ("512 KB ");
365 			/* set L2E=1, L2I=1, & L2SRAM=0 */
366 			cache_ctl = 0xc0000000;
367 		}
368 		break;
369 	case 0x3:
370 		puts("1024 KB ");
371 		/* set L2E=1, L2I=1, & L2SRAM=0 */
372 		cache_ctl = 0xc0000000;
373 		break;
374 	}
375 
376 	if (l2cache->l2ctl & MPC85xx_L2CTL_L2E) {
377 		puts("already enabled");
378 		l2srbar = l2cache->l2srbar0;
379 #if defined(CONFIG_SYS_INIT_L2_ADDR) && defined(CONFIG_SYS_FLASH_BASE)
380 		if (l2cache->l2ctl & MPC85xx_L2CTL_L2SRAM_ENTIRE
381 				&& l2srbar >= CONFIG_SYS_FLASH_BASE) {
382 			l2srbar = CONFIG_SYS_INIT_L2_ADDR;
383 			l2cache->l2srbar0 = l2srbar;
384 			printf("moving to 0x%08x", CONFIG_SYS_INIT_L2_ADDR);
385 		}
386 #endif /* CONFIG_SYS_INIT_L2_ADDR */
387 		puts("\n");
388 	} else {
389 		asm("msync;isync");
390 		l2cache->l2ctl = cache_ctl; /* invalidate & enable */
391 		asm("msync;isync");
392 		puts("enabled\n");
393 	}
394 #elif defined(CONFIG_BACKSIDE_L2_CACHE)
395 	if ((SVR_SOC_VER(get_svr()) == SVR_P2040) ||
396 	    (SVR_SOC_VER(get_svr()) == SVR_P2040_E)) {
397 		puts("N/A\n");
398 		goto skip_l2;
399 	}
400 
401 	u32 l2cfg0 = mfspr(SPRN_L2CFG0);
402 
403 	/* invalidate the L2 cache */
404 	mtspr(SPRN_L2CSR0, (L2CSR0_L2FI|L2CSR0_L2LFC));
405 	while (mfspr(SPRN_L2CSR0) & (L2CSR0_L2FI|L2CSR0_L2LFC))
406 		;
407 
408 #ifdef CONFIG_SYS_CACHE_STASHING
409 	/* set stash id to (coreID) * 2 + 32 + L2 (1) */
410 	mtspr(SPRN_L2CSR1, (32 + 1));
411 #endif
412 
413 	/* enable the cache */
414 	mtspr(SPRN_L2CSR0, CONFIG_SYS_INIT_L2CSR0);
415 
416 	if (CONFIG_SYS_INIT_L2CSR0 & L2CSR0_L2E) {
417 		while (!(mfspr(SPRN_L2CSR0) & L2CSR0_L2E))
418 			;
419 		printf("%d KB enabled\n", (l2cfg0 & 0x3fff) * 64);
420 	}
421 
422 skip_l2:
423 #else
424 	puts("disabled\n");
425 #endif
426 
427 	enable_cpc();
428 
429 	/* needs to be in ram since code uses global static vars */
430 	fsl_serdes_init();
431 
432 #ifdef CONFIG_SYS_SRIO
433 	srio_init();
434 #endif
435 
436 #if defined(CONFIG_MP)
437 	setup_mp();
438 #endif
439 
440 #ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC136
441 	{
442 		void *p;
443 		p = (void *)CONFIG_SYS_DCSRBAR + 0x20520;
444 		setbits_be32(p, 1 << (31 - 14));
445 	}
446 #endif
447 
448 #ifdef CONFIG_SYS_LBC_LCRR
449 	/*
450 	 * Modify the CLKDIV field of LCRR register to improve the writing
451 	 * speed for NOR flash.
452 	 */
453 	clrsetbits_be32(&lbc->lcrr, LCRR_CLKDIV, CONFIG_SYS_LBC_LCRR);
454 	__raw_readl(&lbc->lcrr);
455 	isync();
456 #endif
457 
458 #ifdef CONFIG_SYS_FSL_USB1_PHY_ENABLE
459 	{
460 		ccsr_usb_phy_t *usb_phy1 =
461 			(void *)CONFIG_SYS_MPC85xx_USB1_PHY_ADDR;
462 		out_be32(&usb_phy1->usb_enable_override,
463 				CONFIG_SYS_FSL_USB_ENABLE_OVERRIDE);
464 	}
465 #endif
466 #ifdef CONFIG_SYS_FSL_USB2_PHY_ENABLE
467 	{
468 		ccsr_usb_phy_t *usb_phy2 =
469 			(void *)CONFIG_SYS_MPC85xx_USB2_PHY_ADDR;
470 		out_be32(&usb_phy2->usb_enable_override,
471 				CONFIG_SYS_FSL_USB_ENABLE_OVERRIDE);
472 	}
473 #endif
474 
475 	return 0;
476 }
477 
478 extern void setup_ivors(void);
479 
480 void arch_preboot_os(void)
481 {
482 	u32 msr;
483 
484 	/*
485 	 * We are changing interrupt offsets and are about to boot the OS so
486 	 * we need to make sure we disable all async interrupts. EE is already
487 	 * disabled by the time we get called.
488 	 */
489 	msr = mfmsr();
490 	msr &= ~(MSR_ME|MSR_CE|MSR_DE);
491 	mtmsr(msr);
492 
493 	setup_ivors();
494 }
495 
496 #if defined(CONFIG_CMD_SATA) && defined(CONFIG_FSL_SATA)
497 int sata_initialize(void)
498 {
499 	if (is_serdes_configured(SATA1) || is_serdes_configured(SATA2))
500 		return __sata_initialize();
501 
502 	return 1;
503 }
504 #endif
505 
506 void cpu_secondary_init_r(void)
507 {
508 #ifdef CONFIG_QE
509 	uint qe_base = CONFIG_SYS_IMMR + 0x00080000; /* QE immr base */
510 #ifdef CONFIG_SYS_QE_FW_IN_NAND
511 	int ret;
512 	size_t fw_length = CONFIG_SYS_QE_FW_LENGTH;
513 
514 	/* load QE firmware from NAND flash to DDR first */
515 	ret = nand_read(&nand_info[0], (loff_t)CONFIG_SYS_QE_FW_IN_NAND,
516 			&fw_length, (u_char *)CONFIG_SYS_QE_FW_ADDR);
517 
518 	if (ret && ret == -EUCLEAN) {
519 		printf ("NAND read for QE firmware at offset %x failed %d\n",
520 				CONFIG_SYS_QE_FW_IN_NAND, ret);
521 	}
522 #endif
523 	qe_init(qe_base);
524 	qe_reset();
525 #endif
526 }
527