1 /*
2  * Copyright 2007-2011 Freescale Semiconductor, Inc.
3  *
4  * (C) Copyright 2003 Motorola Inc.
5  * Modified by Xianghua Xiao, X.Xiao@motorola.com
6  *
7  * (C) Copyright 2000
8  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
9  *
10  * See file CREDITS for list of people who contributed to this
11  * project.
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License as
15  * published by the Free Software Foundation; either version 2 of
16  * the License, or (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
26  * MA 02111-1307 USA
27  */
28 
29 #include <common.h>
30 #include <watchdog.h>
31 #include <asm/processor.h>
32 #include <ioports.h>
33 #include <sata.h>
34 #include <asm/io.h>
35 #include <asm/cache.h>
36 #include <asm/mmu.h>
37 #include <asm/fsl_law.h>
38 #include <asm/fsl_serdes.h>
39 #include "mp.h"
40 #ifdef CONFIG_SYS_QE_FW_IN_NAND
41 #include <nand.h>
42 #include <errno.h>
43 #endif
44 
45 DECLARE_GLOBAL_DATA_PTR;
46 
47 extern void srio_init(void);
48 
49 #ifdef CONFIG_QE
50 extern qe_iop_conf_t qe_iop_conf_tab[];
51 extern void qe_config_iopin(u8 port, u8 pin, int dir,
52 				int open_drain, int assign);
53 extern void qe_init(uint qe_base);
54 extern void qe_reset(void);
55 
56 static void config_qe_ioports(void)
57 {
58 	u8      port, pin;
59 	int     dir, open_drain, assign;
60 	int     i;
61 
62 	for (i = 0; qe_iop_conf_tab[i].assign != QE_IOP_TAB_END; i++) {
63 		port		= qe_iop_conf_tab[i].port;
64 		pin		= qe_iop_conf_tab[i].pin;
65 		dir		= qe_iop_conf_tab[i].dir;
66 		open_drain	= qe_iop_conf_tab[i].open_drain;
67 		assign		= qe_iop_conf_tab[i].assign;
68 		qe_config_iopin(port, pin, dir, open_drain, assign);
69 	}
70 }
71 #endif
72 
73 #ifdef CONFIG_CPM2
74 void config_8560_ioports (volatile ccsr_cpm_t * cpm)
75 {
76 	int portnum;
77 
78 	for (portnum = 0; portnum < 4; portnum++) {
79 		uint pmsk = 0,
80 		     ppar = 0,
81 		     psor = 0,
82 		     pdir = 0,
83 		     podr = 0,
84 		     pdat = 0;
85 		iop_conf_t *iopc = (iop_conf_t *) & iop_conf_tab[portnum][0];
86 		iop_conf_t *eiopc = iopc + 32;
87 		uint msk = 1;
88 
89 		/*
90 		 * NOTE:
91 		 * index 0 refers to pin 31,
92 		 * index 31 refers to pin 0
93 		 */
94 		while (iopc < eiopc) {
95 			if (iopc->conf) {
96 				pmsk |= msk;
97 				if (iopc->ppar)
98 					ppar |= msk;
99 				if (iopc->psor)
100 					psor |= msk;
101 				if (iopc->pdir)
102 					pdir |= msk;
103 				if (iopc->podr)
104 					podr |= msk;
105 				if (iopc->pdat)
106 					pdat |= msk;
107 			}
108 
109 			msk <<= 1;
110 			iopc++;
111 		}
112 
113 		if (pmsk != 0) {
114 			volatile ioport_t *iop = ioport_addr (cpm, portnum);
115 			uint tpmsk = ~pmsk;
116 
117 			/*
118 			 * the (somewhat confused) paragraph at the
119 			 * bottom of page 35-5 warns that there might
120 			 * be "unknown behaviour" when programming
121 			 * PSORx and PDIRx, if PPARx = 1, so I
122 			 * decided this meant I had to disable the
123 			 * dedicated function first, and enable it
124 			 * last.
125 			 */
126 			iop->ppar &= tpmsk;
127 			iop->psor = (iop->psor & tpmsk) | psor;
128 			iop->podr = (iop->podr & tpmsk) | podr;
129 			iop->pdat = (iop->pdat & tpmsk) | pdat;
130 			iop->pdir = (iop->pdir & tpmsk) | pdir;
131 			iop->ppar |= ppar;
132 		}
133 	}
134 }
135 #endif
136 
137 #ifdef CONFIG_SYS_FSL_CPC
138 static void enable_cpc(void)
139 {
140 	int i;
141 	u32 size = 0;
142 
143 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
144 
145 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
146 		u32 cpccfg0 = in_be32(&cpc->cpccfg0);
147 		size += CPC_CFG0_SZ_K(cpccfg0);
148 #ifdef CONFIG_RAMBOOT_PBL
149 		if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN) {
150 			/* find and disable LAW of SRAM */
151 			struct law_entry law = find_law(CONFIG_SYS_INIT_L3_ADDR);
152 
153 			if (law.index == -1) {
154 				printf("\nFatal error happened\n");
155 				return;
156 			}
157 			disable_law(law.index);
158 
159 			clrbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_CDQ_SPEC_DIS);
160 			out_be32(&cpc->cpccsr0, 0);
161 			out_be32(&cpc->cpcsrcr0, 0);
162 		}
163 #endif
164 
165 #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A002
166 		setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_TAG_ECC_SCRUB_DIS);
167 #endif
168 #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A003
169 		setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_DATA_ECC_SCRUB_DIS);
170 #endif
171 
172 		out_be32(&cpc->cpccsr0, CPC_CSR0_CE | CPC_CSR0_PE);
173 		/* Read back to sync write */
174 		in_be32(&cpc->cpccsr0);
175 
176 	}
177 
178 	printf("Corenet Platform Cache: %d KB enabled\n", size);
179 }
180 
181 void invalidate_cpc(void)
182 {
183 	int i;
184 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
185 
186 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
187 		/* skip CPC when it used as all SRAM */
188 		if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN)
189 			continue;
190 		/* Flash invalidate the CPC and clear all the locks */
191 		out_be32(&cpc->cpccsr0, CPC_CSR0_FI | CPC_CSR0_LFC);
192 		while (in_be32(&cpc->cpccsr0) & (CPC_CSR0_FI | CPC_CSR0_LFC))
193 			;
194 	}
195 }
196 #else
197 #define enable_cpc()
198 #define invalidate_cpc()
199 #endif /* CONFIG_SYS_FSL_CPC */
200 
201 /*
202  * Breathe some life into the CPU...
203  *
204  * Set up the memory map
205  * initialize a bunch of registers
206  */
207 
208 #ifdef CONFIG_FSL_CORENET
209 static void corenet_tb_init(void)
210 {
211 	volatile ccsr_rcpm_t *rcpm =
212 		(void *)(CONFIG_SYS_FSL_CORENET_RCPM_ADDR);
213 	volatile ccsr_pic_t *pic =
214 		(void *)(CONFIG_SYS_MPC8xxx_PIC_ADDR);
215 	u32 whoami = in_be32(&pic->whoami);
216 
217 	/* Enable the timebase register for this core */
218 	out_be32(&rcpm->ctbenrl, (1 << whoami));
219 }
220 #endif
221 
222 void cpu_init_f (void)
223 {
224 	extern void m8560_cpm_reset (void);
225 #ifdef CONFIG_MPC8548
226 	ccsr_local_ecm_t *ecm = (void *)(CONFIG_SYS_MPC85xx_ECM_ADDR);
227 	uint svr = get_svr();
228 
229 	/*
230 	 * CPU2 errata workaround: A core hang possible while executing
231 	 * a msync instruction and a snoopable transaction from an I/O
232 	 * master tagged to make quick forward progress is present.
233 	 * Fixed in silicon rev 2.1.
234 	 */
235 	if ((SVR_MAJ(svr) == 1) || ((SVR_MAJ(svr) == 2 && SVR_MIN(svr) == 0x0)))
236 		out_be32(&ecm->eebpcr, in_be32(&ecm->eebpcr) | (1 << 16));
237 #endif
238 
239 	disable_tlb(14);
240 	disable_tlb(15);
241 
242 #ifdef CONFIG_CPM2
243 	config_8560_ioports((ccsr_cpm_t *)CONFIG_SYS_MPC85xx_CPM_ADDR);
244 #endif
245 
246        init_early_memctl_regs();
247 
248 #if defined(CONFIG_CPM2)
249 	m8560_cpm_reset();
250 #endif
251 #ifdef CONFIG_QE
252 	/* Config QE ioports */
253 	config_qe_ioports();
254 #endif
255 #if defined(CONFIG_FSL_DMA)
256 	dma_init();
257 #endif
258 #ifdef CONFIG_FSL_CORENET
259 	corenet_tb_init();
260 #endif
261 	init_used_tlb_cams();
262 
263 	/* Invalidate the CPC before DDR gets enabled */
264 	invalidate_cpc();
265 }
266 
267 /* Implement a dummy function for those platforms w/o SERDES */
268 static void __fsl_serdes__init(void)
269 {
270 	return ;
271 }
272 __attribute__((weak, alias("__fsl_serdes__init"))) void fsl_serdes_init(void);
273 
274 /*
275  * Initialize L2 as cache.
276  *
277  * The newer 8548, etc, parts have twice as much cache, but
278  * use the same bit-encoding as the older 8555, etc, parts.
279  *
280  */
281 int cpu_init_r(void)
282 {
283 #ifdef CONFIG_SYS_LBC_LCRR
284 	volatile fsl_lbc_t *lbc = LBC_BASE_ADDR;
285 #endif
286 
287 #if defined(CONFIG_SYS_P4080_ERRATUM_CPU22)
288 	flush_dcache();
289 	mtspr(L1CSR2, (mfspr(L1CSR2) | L1CSR2_DCWS));
290 	sync();
291 #endif
292 
293 	puts ("L2:    ");
294 
295 #if defined(CONFIG_L2_CACHE)
296 	volatile ccsr_l2cache_t *l2cache = (void *)CONFIG_SYS_MPC85xx_L2_ADDR;
297 	volatile uint cache_ctl;
298 	uint svr, ver;
299 	uint l2srbar;
300 	u32 l2siz_field;
301 
302 	svr = get_svr();
303 	ver = SVR_SOC_VER(svr);
304 
305 	asm("msync;isync");
306 	cache_ctl = l2cache->l2ctl;
307 
308 #if defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SYS_INIT_L2_ADDR)
309 	if (cache_ctl & MPC85xx_L2CTL_L2E) {
310 		/* Clear L2 SRAM memory-mapped base address */
311 		out_be32(&l2cache->l2srbar0, 0x0);
312 		out_be32(&l2cache->l2srbar1, 0x0);
313 
314 		/* set MBECCDIS=0, SBECCDIS=0 */
315 		clrbits_be32(&l2cache->l2errdis,
316 				(MPC85xx_L2ERRDIS_MBECC |
317 				 MPC85xx_L2ERRDIS_SBECC));
318 
319 		/* set L2E=0, L2SRAM=0 */
320 		clrbits_be32(&l2cache->l2ctl,
321 				(MPC85xx_L2CTL_L2E |
322 				 MPC85xx_L2CTL_L2SRAM_ENTIRE));
323 	}
324 #endif
325 
326 	l2siz_field = (cache_ctl >> 28) & 0x3;
327 
328 	switch (l2siz_field) {
329 	case 0x0:
330 		printf(" unknown size (0x%08x)\n", cache_ctl);
331 		return -1;
332 		break;
333 	case 0x1:
334 		if (ver == SVR_8540 || ver == SVR_8560   ||
335 		    ver == SVR_8541 || ver == SVR_8541_E ||
336 		    ver == SVR_8555 || ver == SVR_8555_E) {
337 			puts("128 KB ");
338 			/* set L2E=1, L2I=1, & L2BLKSZ=1 (128 Kbyte) */
339 			cache_ctl = 0xc4000000;
340 		} else {
341 			puts("256 KB ");
342 			cache_ctl = 0xc0000000; /* set L2E=1, L2I=1, & L2SRAM=0 */
343 		}
344 		break;
345 	case 0x2:
346 		if (ver == SVR_8540 || ver == SVR_8560   ||
347 		    ver == SVR_8541 || ver == SVR_8541_E ||
348 		    ver == SVR_8555 || ver == SVR_8555_E) {
349 			puts("256 KB ");
350 			/* set L2E=1, L2I=1, & L2BLKSZ=2 (256 Kbyte) */
351 			cache_ctl = 0xc8000000;
352 		} else {
353 			puts ("512 KB ");
354 			/* set L2E=1, L2I=1, & L2SRAM=0 */
355 			cache_ctl = 0xc0000000;
356 		}
357 		break;
358 	case 0x3:
359 		puts("1024 KB ");
360 		/* set L2E=1, L2I=1, & L2SRAM=0 */
361 		cache_ctl = 0xc0000000;
362 		break;
363 	}
364 
365 	if (l2cache->l2ctl & MPC85xx_L2CTL_L2E) {
366 		puts("already enabled");
367 		l2srbar = l2cache->l2srbar0;
368 #if defined(CONFIG_SYS_INIT_L2_ADDR) && defined(CONFIG_SYS_FLASH_BASE)
369 		if (l2cache->l2ctl & MPC85xx_L2CTL_L2SRAM_ENTIRE
370 				&& l2srbar >= CONFIG_SYS_FLASH_BASE) {
371 			l2srbar = CONFIG_SYS_INIT_L2_ADDR;
372 			l2cache->l2srbar0 = l2srbar;
373 			printf("moving to 0x%08x", CONFIG_SYS_INIT_L2_ADDR);
374 		}
375 #endif /* CONFIG_SYS_INIT_L2_ADDR */
376 		puts("\n");
377 	} else {
378 		asm("msync;isync");
379 		l2cache->l2ctl = cache_ctl; /* invalidate & enable */
380 		asm("msync;isync");
381 		puts("enabled\n");
382 	}
383 #elif defined(CONFIG_BACKSIDE_L2_CACHE)
384 	u32 l2cfg0 = mfspr(SPRN_L2CFG0);
385 
386 	/* invalidate the L2 cache */
387 	mtspr(SPRN_L2CSR0, (L2CSR0_L2FI|L2CSR0_L2LFC));
388 	while (mfspr(SPRN_L2CSR0) & (L2CSR0_L2FI|L2CSR0_L2LFC))
389 		;
390 
391 #ifdef CONFIG_SYS_CACHE_STASHING
392 	/* set stash id to (coreID) * 2 + 32 + L2 (1) */
393 	mtspr(SPRN_L2CSR1, (32 + 1));
394 #endif
395 
396 	/* enable the cache */
397 	mtspr(SPRN_L2CSR0, CONFIG_SYS_INIT_L2CSR0);
398 
399 	if (CONFIG_SYS_INIT_L2CSR0 & L2CSR0_L2E) {
400 		while (!(mfspr(SPRN_L2CSR0) & L2CSR0_L2E))
401 			;
402 		printf("%d KB enabled\n", (l2cfg0 & 0x3fff) * 64);
403 	}
404 #else
405 	puts("disabled\n");
406 #endif
407 
408 	enable_cpc();
409 
410 	/* needs to be in ram since code uses global static vars */
411 	fsl_serdes_init();
412 
413 #ifdef CONFIG_SYS_SRIO
414 	srio_init();
415 #endif
416 
417 #if defined(CONFIG_MP)
418 	setup_mp();
419 #endif
420 
421 #ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC136
422 	{
423 		void *p;
424 		p = (void *)CONFIG_SYS_DCSRBAR + 0x20520;
425 		setbits_be32(p, 1 << (31 - 14));
426 	}
427 #endif
428 
429 #ifdef CONFIG_SYS_LBC_LCRR
430 	/*
431 	 * Modify the CLKDIV field of LCRR register to improve the writing
432 	 * speed for NOR flash.
433 	 */
434 	clrsetbits_be32(&lbc->lcrr, LCRR_CLKDIV, CONFIG_SYS_LBC_LCRR);
435 	__raw_readl(&lbc->lcrr);
436 	isync();
437 #endif
438 
439 #ifdef CONFIG_SYS_FSL_USB1_PHY_ENABLE
440 	{
441 		ccsr_usb_phy_t *usb_phy1 =
442 			(void *)CONFIG_SYS_MPC85xx_USB1_PHY_ADDR;
443 		out_be32(&usb_phy1->usb_enable_override,
444 				CONFIG_SYS_FSL_USB_ENABLE_OVERRIDE);
445 	}
446 #endif
447 #ifdef CONFIG_SYS_FSL_USB2_PHY_ENABLE
448 	{
449 		ccsr_usb_phy_t *usb_phy2 =
450 			(void *)CONFIG_SYS_MPC85xx_USB2_PHY_ADDR;
451 		out_be32(&usb_phy2->usb_enable_override,
452 				CONFIG_SYS_FSL_USB_ENABLE_OVERRIDE);
453 	}
454 #endif
455 
456 	return 0;
457 }
458 
459 extern void setup_ivors(void);
460 
461 void arch_preboot_os(void)
462 {
463 	u32 msr;
464 
465 	/*
466 	 * We are changing interrupt offsets and are about to boot the OS so
467 	 * we need to make sure we disable all async interrupts. EE is already
468 	 * disabled by the time we get called.
469 	 */
470 	msr = mfmsr();
471 	msr &= ~(MSR_ME|MSR_CE|MSR_DE);
472 	mtmsr(msr);
473 
474 	setup_ivors();
475 }
476 
477 #if defined(CONFIG_CMD_SATA) && defined(CONFIG_FSL_SATA)
478 int sata_initialize(void)
479 {
480 	if (is_serdes_configured(SATA1) || is_serdes_configured(SATA2))
481 		return __sata_initialize();
482 
483 	return 1;
484 }
485 #endif
486 
487 void cpu_secondary_init_r(void)
488 {
489 #ifdef CONFIG_QE
490 	uint qe_base = CONFIG_SYS_IMMR + 0x00080000; /* QE immr base */
491 #ifdef CONFIG_SYS_QE_FW_IN_NAND
492 	int ret;
493 	size_t fw_length = CONFIG_SYS_QE_FW_LENGTH;
494 
495 	/* load QE firmware from NAND flash to DDR first */
496 	ret = nand_read(&nand_info[0], (loff_t)CONFIG_SYS_QE_FW_IN_NAND,
497 			&fw_length, (u_char *)CONFIG_SYS_QE_FW_ADDR);
498 
499 	if (ret && ret == -EUCLEAN) {
500 		printf ("NAND read for QE firmware at offset %x failed %d\n",
501 				CONFIG_SYS_QE_FW_IN_NAND, ret);
502 	}
503 #endif
504 	qe_init(qe_base);
505 	qe_reset();
506 #endif
507 }
508