1 /*
2  * Copyright (C) 2015-2017 Socionext Inc.
3  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
4  *
5  * based on commit 21b6e480f92ccc38fe0502e3116411d6509d3bf2 of Diag by:
6  * Copyright (C) 2015 Socionext Inc.
7  *
8  * SPDX-License-Identifier:	GPL-2.0+
9  */
10 
11 #include <common.h>
12 #include <linux/errno.h>
13 #include <linux/io.h>
14 #include <linux/sizes.h>
15 #include <asm/processor.h>
16 
17 #include "../init.h"
18 #include "../soc-info.h"
19 #include "ddrmphy-regs.h"
20 #include "umc-regs.h"
21 
22 #define DRAM_CH_NR	3
23 
24 enum dram_freq {
25 	DRAM_FREQ_1866M,
26 	DRAM_FREQ_2133M,
27 	DRAM_FREQ_NR,
28 };
29 
30 enum dram_size {
31 	DRAM_SZ_256M,
32 	DRAM_SZ_512M,
33 	DRAM_SZ_NR,
34 };
35 
36 static u32 ddrphy_pgcr2[DRAM_FREQ_NR] = {0x00FC7E5D, 0x00FC90AB};
37 static u32 ddrphy_ptr0[DRAM_FREQ_NR] = {0x0EA09205, 0x10C0A6C6};
38 static u32 ddrphy_ptr1[DRAM_FREQ_NR] = {0x0DAC041B, 0x0FA104B1};
39 static u32 ddrphy_ptr3[DRAM_FREQ_NR] = {0x15171e45, 0x18182357};
40 static u32 ddrphy_ptr4[DRAM_FREQ_NR] = {0x0e9ad8e9, 0x10b34157};
41 static u32 ddrphy_dtpr0[DRAM_FREQ_NR] = {0x35a00d88, 0x39e40e88};
42 static u32 ddrphy_dtpr1[DRAM_FREQ_NR] = {0x2288cc2c, 0x228a04d0};
43 static u32 ddrphy_dtpr2[DRAM_FREQ_NR] = {0x50005e00, 0x50006a00};
44 static u32 ddrphy_dtpr3[DRAM_FREQ_NR] = {0x0010cb49, 0x0010ec89};
45 static u32 ddrphy_mr0[DRAM_FREQ_NR] = {0x00000115, 0x00000125};
46 static u32 ddrphy_mr2[DRAM_FREQ_NR] = {0x000002a0, 0x000002a8};
47 
48 /* dependent on package and board design */
49 static u32 ddrphy_acbdlr0[DRAM_CH_NR] = {0x0000000c, 0x0000000c, 0x00000009};
50 
51 static u32 umc_cmdctla[DRAM_FREQ_NR] = {0x66DD131D, 0x77EE1722};
52 /*
53  * The ch2 is a different generation UMC core.
54  * The register spec is different, unfortunately.
55  */
56 static u32 umc_cmdctlb_ch01[DRAM_FREQ_NR] = {0x13E87C44, 0x18F88C44};
57 static u32 umc_cmdctlb_ch2[DRAM_FREQ_NR] = {0x19E8DC44, 0x1EF8EC44};
58 static u32 umc_spcctla[DRAM_FREQ_NR][DRAM_SZ_NR] = {
59 	{0x004A071D, 0x0078071D},
60 	{0x0055081E, 0x0089081E},
61 };
62 
63 static u32 umc_spcctlb[] = {0x00FF000A, 0x00FF000B};
64 /* The ch2 is different for some reason only hardware guys know... */
65 static u32 umc_flowctla_ch01[] = {0x0800001E, 0x08000022};
66 static u32 umc_flowctla_ch2[] = {0x0800001E, 0x0800001E};
67 
68 /* DDR multiPHY */
69 static inline int ddrphy_get_rank(int dx)
70 {
71 	return dx / 2;
72 }
73 
74 static void ddrphy_fifo_reset(void __iomem *phy_base)
75 {
76 	u32 tmp;
77 
78 	tmp = readl(phy_base + DMPHY_PGCR0);
79 	tmp &= ~DMPHY_PGCR0_PHYFRST;
80 	writel(tmp, phy_base + DMPHY_PGCR0);
81 
82 	udelay(1);
83 
84 	tmp |= DMPHY_PGCR0_PHYFRST;
85 	writel(tmp, phy_base + DMPHY_PGCR0);
86 
87 	udelay(1);
88 }
89 
90 static void ddrphy_vt_ctrl(void __iomem *phy_base, int enable)
91 {
92 	u32 tmp;
93 
94 	tmp = readl(phy_base + DMPHY_PGCR1);
95 
96 	if (enable)
97 		tmp &= ~DMPHY_PGCR1_INHVT;
98 	else
99 		tmp |= DMPHY_PGCR1_INHVT;
100 
101 	writel(tmp, phy_base + DMPHY_PGCR1);
102 
103 	if (!enable) {
104 		while (!(readl(phy_base + DMPHY_PGSR1) & DMPHY_PGSR1_VTSTOP))
105 			cpu_relax();
106 	}
107 }
108 
109 static void ddrphy_dqs_delay_fixup(void __iomem *phy_base, int nr_dx, int step)
110 {
111 	int dx;
112 	u32 lcdlr1, rdqsd;
113 	void __iomem *dx_base = phy_base + DMPHY_DX_BASE;
114 
115 	ddrphy_vt_ctrl(phy_base, 0);
116 
117 	for (dx = 0; dx < nr_dx; dx++) {
118 		lcdlr1 = readl(dx_base + DMPHY_DX_LCDLR1);
119 		rdqsd = (lcdlr1 >> 8) & 0xff;
120 		rdqsd = clamp(rdqsd + step, 0U, 0xffU);
121 		lcdlr1 = (lcdlr1 & ~(0xff << 8)) | (rdqsd << 8);
122 		writel(lcdlr1, dx_base + DMPHY_DX_LCDLR1);
123 		readl(dx_base + DMPHY_DX_LCDLR1); /* relax */
124 		dx_base += DMPHY_DX_STRIDE;
125 	}
126 
127 	ddrphy_vt_ctrl(phy_base, 1);
128 }
129 
130 static int ddrphy_get_system_latency(void __iomem *phy_base, int width)
131 {
132 	void __iomem *dx_base = phy_base + DMPHY_DX_BASE;
133 	const int nr_dx = width / 8;
134 	int dx, rank;
135 	u32 gtr;
136 	int dgsl, dgsl_min = INT_MAX, dgsl_max = 0;
137 
138 	for (dx = 0; dx < nr_dx; dx++) {
139 		gtr = readl(dx_base + DMPHY_DX_GTR);
140 		for (rank = 0; rank < 4; rank++) {
141 			dgsl = gtr & 0x7;
142 			/* if dgsl is zero, this rank was not trained. skip. */
143 			if (dgsl) {
144 				dgsl_min = min(dgsl_min, dgsl);
145 				dgsl_max = max(dgsl_max, dgsl);
146 			}
147 			gtr >>= 3;
148 		}
149 		dx_base += DMPHY_DX_STRIDE;
150 	}
151 
152 	if (dgsl_min != dgsl_max)
153 		printf("DQS Gateing System Latencies are not all leveled.\n");
154 
155 	return dgsl_max;
156 }
157 
158 static void ddrphy_init(void __iomem *phy_base, enum dram_freq freq, int width,
159 			int ch)
160 {
161 	u32 tmp;
162 	void __iomem *zq_base, *dx_base;
163 	int zq, dx;
164 	int nr_dx;
165 
166 	nr_dx = width / 8;
167 
168 	writel(DMPHY_PIR_ZCALBYP,        phy_base + DMPHY_PIR);
169 	/*
170 	 * Disable RGLVT bit (Read DQS Gating LCDL Delay VT Compensation)
171 	 * to avoid read error issue.
172 	 */
173 	writel(0x07d81e37,         phy_base + DMPHY_PGCR0);
174 	writel(0x0200c4e0,         phy_base + DMPHY_PGCR1);
175 
176 	tmp = ddrphy_pgcr2[freq];
177 	if (width >= 32)
178 		tmp |= DMPHY_PGCR2_DUALCHN | DMPHY_PGCR2_ACPDDC;
179 	writel(tmp, phy_base + DMPHY_PGCR2);
180 
181 	writel(ddrphy_ptr0[freq],  phy_base + DMPHY_PTR0);
182 	writel(ddrphy_ptr1[freq],  phy_base + DMPHY_PTR1);
183 	writel(0x00083def,         phy_base + DMPHY_PTR2);
184 	writel(ddrphy_ptr3[freq],  phy_base + DMPHY_PTR3);
185 	writel(ddrphy_ptr4[freq],  phy_base + DMPHY_PTR4);
186 
187 	writel(ddrphy_acbdlr0[ch], phy_base + DMPHY_ACBDLR0);
188 
189 	writel(0x55555555, phy_base + DMPHY_ACIOCR1);
190 	writel(0x00000000, phy_base + DMPHY_ACIOCR2);
191 	writel(0x55555555, phy_base + DMPHY_ACIOCR3);
192 	writel(0x00000000, phy_base + DMPHY_ACIOCR4);
193 	writel(0x00000055, phy_base + DMPHY_ACIOCR5);
194 	writel(0x00181aa4, phy_base + DMPHY_DXCCR);
195 
196 	writel(0x0024641e, phy_base + DMPHY_DSGCR);
197 	writel(0x0000040b, phy_base + DMPHY_DCR);
198 	writel(ddrphy_dtpr0[freq], phy_base + DMPHY_DTPR0);
199 	writel(ddrphy_dtpr1[freq], phy_base + DMPHY_DTPR1);
200 	writel(ddrphy_dtpr2[freq], phy_base + DMPHY_DTPR2);
201 	writel(ddrphy_dtpr3[freq], phy_base + DMPHY_DTPR3);
202 	writel(ddrphy_mr0[freq], phy_base + DMPHY_MR0);
203 	writel(0x00000006,       phy_base + DMPHY_MR1);
204 	writel(ddrphy_mr2[freq], phy_base + DMPHY_MR2);
205 	writel(0x00000000,       phy_base + DMPHY_MR3);
206 
207 	tmp = 0;
208 	for (dx = 0; dx < nr_dx; dx++)
209 		tmp |= BIT(DMPHY_DTCR_RANKEN_SHIFT + ddrphy_get_rank(dx));
210 	writel(0x90003087 | tmp, phy_base + DMPHY_DTCR);
211 
212 	writel(0x00000000, phy_base + DMPHY_DTAR0);
213 	writel(0x00000008, phy_base + DMPHY_DTAR1);
214 	writel(0x00000010, phy_base + DMPHY_DTAR2);
215 	writel(0x00000018, phy_base + DMPHY_DTAR3);
216 	writel(0xdd22ee11, phy_base + DMPHY_DTDR0);
217 	writel(0x7788bb44, phy_base + DMPHY_DTDR1);
218 
219 	/* impedance control settings */
220 	writel(0x04048900, phy_base + DMPHY_ZQCR);
221 
222 	zq_base = phy_base + DMPHY_ZQ_BASE;
223 	for (zq = 0; zq < 4; zq++) {
224 		/*
225 		 * board-dependent
226 		 * PXS2: CH0ZQ0=0x5B, CH1ZQ0=0x5B, CH2ZQ0=0x59, others=0x5D
227 		 */
228 		writel(0x0007BB5D, zq_base + DMPHY_ZQ_PR);
229 		zq_base += DMPHY_ZQ_STRIDE;
230 	}
231 
232 	/* DATX8 settings */
233 	dx_base = phy_base + DMPHY_DX_BASE;
234 	for (dx = 0; dx < 4; dx++) {
235 		tmp = readl(dx_base + DMPHY_DX_GCR0);
236 		tmp &= ~DMPHY_DX_GCR0_WLRKEN_MASK;
237 		tmp |= BIT(DMPHY_DX_GCR0_WLRKEN_SHIFT + ddrphy_get_rank(dx)) &
238 						DMPHY_DX_GCR0_WLRKEN_MASK;
239 		writel(tmp, dx_base + DMPHY_DX_GCR0);
240 
241 		writel(0x00000000, dx_base + DMPHY_DX_GCR1);
242 		writel(0x00000000, dx_base + DMPHY_DX_GCR2);
243 		writel(0x00000000, dx_base + DMPHY_DX_GCR3);
244 		dx_base += DMPHY_DX_STRIDE;
245 	}
246 
247 	while (!(readl(phy_base + DMPHY_PGSR0) & DMPHY_PGSR0_IDONE))
248 		cpu_relax();
249 
250 	ddrphy_dqs_delay_fixup(phy_base, nr_dx, -4);
251 }
252 
253 struct ddrphy_init_sequence {
254 	char *description;
255 	u32 init_flag;
256 	u32 done_flag;
257 	u32 err_flag;
258 };
259 
260 static const struct ddrphy_init_sequence impedance_calibration_sequence[] = {
261 	{
262 		"Impedance Calibration",
263 		DMPHY_PIR_ZCAL,
264 		DMPHY_PGSR0_ZCDONE,
265 		DMPHY_PGSR0_ZCERR,
266 	},
267 	{ /* sentinel */ }
268 };
269 
270 static const struct ddrphy_init_sequence dram_init_sequence[] = {
271 	{
272 		"DRAM Initialization",
273 		DMPHY_PIR_DRAMRST | DMPHY_PIR_DRAMINIT,
274 		DMPHY_PGSR0_DIDONE,
275 		0,
276 	},
277 	{ /* sentinel */ }
278 };
279 
280 static const struct ddrphy_init_sequence training_sequence[] = {
281 	{
282 		"Write Leveling",
283 		DMPHY_PIR_WL,
284 		DMPHY_PGSR0_WLDONE,
285 		DMPHY_PGSR0_WLERR,
286 	},
287 	{
288 		"Read DQS Gate Training",
289 		DMPHY_PIR_QSGATE,
290 		DMPHY_PGSR0_QSGDONE,
291 		DMPHY_PGSR0_QSGERR,
292 	},
293 	{
294 		"Write Leveling Adjustment",
295 		DMPHY_PIR_WLADJ,
296 		DMPHY_PGSR0_WLADONE,
297 		DMPHY_PGSR0_WLAERR,
298 	},
299 	{
300 		"Read Bit Deskew",
301 		DMPHY_PIR_RDDSKW,
302 		DMPHY_PGSR0_RDDONE,
303 		DMPHY_PGSR0_RDERR,
304 	},
305 	{
306 		"Write Bit Deskew",
307 		DMPHY_PIR_WRDSKW,
308 		DMPHY_PGSR0_WDDONE,
309 		DMPHY_PGSR0_WDERR,
310 	},
311 	{
312 		"Read Eye Training",
313 		DMPHY_PIR_RDEYE,
314 		DMPHY_PGSR0_REDONE,
315 		DMPHY_PGSR0_REERR,
316 	},
317 	{
318 		"Write Eye Training",
319 		DMPHY_PIR_WREYE,
320 		DMPHY_PGSR0_WEDONE,
321 		DMPHY_PGSR0_WEERR,
322 	},
323 	{ /* sentinel */ }
324 };
325 
326 static int __ddrphy_training(void __iomem *phy_base,
327 			     const struct ddrphy_init_sequence *seq)
328 {
329 	const struct ddrphy_init_sequence *s;
330 	u32 pgsr0;
331 	u32 init_flag = DMPHY_PIR_INIT;
332 	u32 done_flag = DMPHY_PGSR0_IDONE;
333 	int timeout = 50000; /* 50 msec is long enough */
334 #ifdef DISPLAY_ELAPSED_TIME
335 	ulong start = get_timer(0);
336 #endif
337 
338 	for (s = seq; s->description; s++) {
339 		init_flag |= s->init_flag;
340 		done_flag |= s->done_flag;
341 	}
342 
343 	writel(init_flag, phy_base + DMPHY_PIR);
344 
345 	do {
346 		if (--timeout < 0) {
347 			pr_err("%s: error: timeout during DDR training\n",
348 			       __func__);
349 			return -ETIMEDOUT;
350 		}
351 		udelay(1);
352 		pgsr0 = readl(phy_base + DMPHY_PGSR0);
353 	} while ((pgsr0 & done_flag) != done_flag);
354 
355 	for (s = seq; s->description; s++) {
356 		if (pgsr0 & s->err_flag) {
357 			pr_err("%s: error: %s failed\n", __func__,
358 			       s->description);
359 			return -EIO;
360 		}
361 	}
362 
363 #ifdef DISPLAY_ELAPSED_TIME
364 	printf("%s: info: elapsed time %ld msec\n", get_timer(start));
365 #endif
366 
367 	return 0;
368 }
369 
370 static int ddrphy_impedance_calibration(void __iomem *phy_base)
371 {
372 	int ret;
373 	u32 tmp;
374 
375 	ret = __ddrphy_training(phy_base, impedance_calibration_sequence);
376 	if (ret)
377 		return ret;
378 
379 	/*
380 	 * Because of a hardware bug, IDONE flag is set when the first ZQ block
381 	 * is calibrated.  The flag does not guarantee the completion for all
382 	 * the ZQ blocks.  Wait a little more just in case.
383 	 */
384 	udelay(1);
385 
386 	/* reflect ZQ settings and enable average algorithm*/
387 	tmp = readl(phy_base + DMPHY_ZQCR);
388 	tmp |= DMPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
389 	writel(tmp, phy_base + DMPHY_ZQCR);
390 	tmp &= ~DMPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
391 	tmp |= DMPHY_ZQCR_AVGEN;
392 	writel(tmp, phy_base + DMPHY_ZQCR);
393 
394 	return 0;
395 }
396 
397 static int ddrphy_dram_init(void __iomem *phy_base)
398 {
399 	return __ddrphy_training(phy_base, dram_init_sequence);
400 }
401 
402 static int ddrphy_training(void __iomem *phy_base)
403 {
404 	return __ddrphy_training(phy_base, training_sequence);
405 }
406 
407 /* UMC */
408 static void umc_set_system_latency(void __iomem *dc_base, int phy_latency)
409 {
410 	u32 val;
411 	int latency;
412 
413 	val = readl(dc_base + UMC_RDATACTL_D0);
414 	latency = (val & UMC_RDATACTL_RADLTY_MASK) >> UMC_RDATACTL_RADLTY_SHIFT;
415 	latency += (val & UMC_RDATACTL_RAD2LTY_MASK) >>
416 						UMC_RDATACTL_RAD2LTY_SHIFT;
417 	/*
418 	 * UMC works at the half clock rate of the PHY.
419 	 * The LSB of latency is ignored
420 	 */
421 	latency += phy_latency & ~1;
422 
423 	val &= ~(UMC_RDATACTL_RADLTY_MASK | UMC_RDATACTL_RAD2LTY_MASK);
424 	if (latency > 0xf) {
425 		val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
426 		val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
427 	} else {
428 		val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
429 	}
430 
431 	writel(val, dc_base + UMC_RDATACTL_D0);
432 	writel(val, dc_base + UMC_RDATACTL_D1);
433 
434 	readl(dc_base + UMC_RDATACTL_D1); /* relax */
435 }
436 
437 /* enable/disable auto refresh */
438 void umc_refresh_ctrl(void __iomem *dc_base, int enable)
439 {
440 	u32 tmp;
441 
442 	tmp = readl(dc_base + UMC_SPCSETB);
443 	tmp &= ~UMC_SPCSETB_AREFMD_MASK;
444 
445 	if (enable)
446 		tmp |= UMC_SPCSETB_AREFMD_ARB;
447 	else
448 		tmp |= UMC_SPCSETB_AREFMD_REG;
449 
450 	writel(tmp, dc_base + UMC_SPCSETB);
451 	udelay(1);
452 }
453 
454 static void umc_ud_init(void __iomem *umc_base, int ch)
455 {
456 	writel(0x00000003, umc_base + UMC_BITPERPIXELMODE_D0);
457 
458 	if (ch == 2)
459 		writel(0x00000033, umc_base + UMC_PAIR1DOFF_D0);
460 }
461 
462 static int umc_dc_init(void __iomem *dc_base, enum dram_freq freq,
463 		       unsigned long size, int width, int ch)
464 {
465 	enum dram_size size_e;
466 	int latency;
467 	u32 val;
468 
469 	switch (size) {
470 	case 0:
471 		return 0;
472 	case SZ_256M:
473 		size_e = DRAM_SZ_256M;
474 		break;
475 	case SZ_512M:
476 		size_e = DRAM_SZ_512M;
477 		break;
478 	default:
479 		pr_err("unsupported DRAM size 0x%08lx (per 16bit) for ch%d\n",
480 		       size, ch);
481 		return -EINVAL;
482 	}
483 
484 	writel(umc_cmdctla[freq], dc_base + UMC_CMDCTLA);
485 
486 	writel(ch == 2 ? umc_cmdctlb_ch2[freq] : umc_cmdctlb_ch01[freq],
487 	       dc_base + UMC_CMDCTLB);
488 
489 	writel(umc_spcctla[freq][size_e], dc_base + UMC_SPCCTLA);
490 	writel(umc_spcctlb[freq], dc_base + UMC_SPCCTLB);
491 
492 	val = 0x000e000e;
493 	latency = 12;
494 	/* ES2 inserted one more FF to the logic. */
495 	if (uniphier_get_soc_model() >= 2)
496 		latency += 2;
497 
498 	if (latency > 0xf) {
499 		val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
500 		val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
501 	} else {
502 		val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
503 	}
504 
505 	writel(val, dc_base + UMC_RDATACTL_D0);
506 	if (width >= 32)
507 		writel(val, dc_base + UMC_RDATACTL_D1);
508 
509 	writel(0x04060A02, dc_base + UMC_WDATACTL_D0);
510 	if (width >= 32)
511 		writel(0x04060A02, dc_base + UMC_WDATACTL_D1);
512 	writel(0x04000000, dc_base + UMC_DATASET);
513 	writel(0x00400020, dc_base + UMC_DCCGCTL);
514 	writel(0x00000084, dc_base + UMC_FLOWCTLG);
515 	writel(0x00000000, dc_base + UMC_ACSSETA);
516 
517 	writel(ch == 2 ? umc_flowctla_ch2[freq] : umc_flowctla_ch01[freq],
518 	       dc_base + UMC_FLOWCTLA);
519 
520 	writel(0x00004400, dc_base + UMC_FLOWCTLC);
521 	writel(0x200A0A00, dc_base + UMC_SPCSETB);
522 	writel(0x00000520, dc_base + UMC_DFICUPDCTLA);
523 	writel(0x0000000D, dc_base + UMC_RESPCTL);
524 
525 	if (ch != 2) {
526 		writel(0x00202000, dc_base + UMC_FLOWCTLB);
527 		writel(0xFDBFFFFF, dc_base + UMC_FLOWCTLOB0);
528 		writel(0xFFFFFFFF, dc_base + UMC_FLOWCTLOB1);
529 		writel(0x00080700, dc_base + UMC_BSICMAPSET);
530 	} else {
531 		writel(0x00200000, dc_base + UMC_FLOWCTLB);
532 		writel(0x00000000, dc_base + UMC_BSICMAPSET);
533 	}
534 
535 	writel(0x00000000, dc_base + UMC_ERRMASKA);
536 	writel(0x00000000, dc_base + UMC_ERRMASKB);
537 
538 	return 0;
539 }
540 
541 static int umc_ch_init(void __iomem *umc_ch_base, enum dram_freq freq,
542 		       unsigned long size, unsigned int width, int ch)
543 {
544 	void __iomem *dc_base = umc_ch_base + 0x00011000;
545 	void __iomem *phy_base = umc_ch_base + 0x00030000;
546 	int ret;
547 
548 	writel(0x00000002, dc_base + UMC_INITSET);
549 	while (readl(dc_base + UMC_INITSTAT) & BIT(2))
550 		cpu_relax();
551 
552 	/* deassert PHY reset signals */
553 	writel(UMC_DIOCTLA_CTL_NRST | UMC_DIOCTLA_CFG_NRST,
554 	       dc_base + UMC_DIOCTLA);
555 
556 	ddrphy_init(phy_base, freq, width, ch);
557 
558 	ret = ddrphy_impedance_calibration(phy_base);
559 	if (ret)
560 		return ret;
561 
562 	ddrphy_dram_init(phy_base);
563 	if (ret)
564 		return ret;
565 
566 	ret = umc_dc_init(dc_base, freq, size, width, ch);
567 	if (ret)
568 		return ret;
569 
570 	umc_ud_init(umc_ch_base, ch);
571 
572 	ret = ddrphy_training(phy_base);
573 	if (ret)
574 		return ret;
575 
576 	udelay(1);
577 
578 	/* match the system latency between UMC and PHY */
579 	umc_set_system_latency(dc_base,
580 			       ddrphy_get_system_latency(phy_base, width));
581 
582 	udelay(1);
583 
584 	/* stop auto refresh before clearing FIFO in PHY */
585 	umc_refresh_ctrl(dc_base, 0);
586 	ddrphy_fifo_reset(phy_base);
587 	umc_refresh_ctrl(dc_base, 1);
588 
589 	udelay(10);
590 
591 	return 0;
592 }
593 
594 static void um_init(void __iomem *um_base)
595 {
596 	writel(0x000000ff, um_base + UMC_MBUS0);
597 	writel(0x000000ff, um_base + UMC_MBUS1);
598 	writel(0x000000ff, um_base + UMC_MBUS2);
599 	writel(0x000000ff, um_base + UMC_MBUS3);
600 }
601 
602 int uniphier_pxs2_umc_init(const struct uniphier_board_data *bd)
603 {
604 	void __iomem *um_base = (void __iomem *)0x5b600000;
605 	void __iomem *umc_ch_base = (void __iomem *)0x5b800000;
606 	enum dram_freq freq;
607 	int ch, ret;
608 
609 	switch (bd->dram_freq) {
610 	case 1866:
611 		freq = DRAM_FREQ_1866M;
612 		break;
613 	case 2133:
614 		freq = DRAM_FREQ_2133M;
615 		break;
616 	default:
617 		pr_err("unsupported DRAM frequency %d MHz\n", bd->dram_freq);
618 		return -EINVAL;
619 	}
620 
621 	for (ch = 0; ch < bd->dram_nr_ch; ch++) {
622 		unsigned long size = bd->dram_ch[ch].size;
623 		unsigned int width = bd->dram_ch[ch].width;
624 
625 		ret = umc_ch_init(umc_ch_base, freq, size / (width / 16),
626 				  width, ch);
627 		if (ret) {
628 			pr_err("failed to initialize UMC ch%d\n", ch);
629 			return ret;
630 		}
631 
632 		umc_ch_base += 0x00200000;
633 	}
634 
635 	um_init(um_base);
636 
637 	return 0;
638 }
639