1 /*
2  * Copyright (C) 2015-2017 Socionext Inc.
3  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
4  *
5  * based on commit 21b6e480f92ccc38fe0502e3116411d6509d3bf2 of Diag by:
6  * Copyright (C) 2015 Socionext Inc.
7  *
8  * SPDX-License-Identifier:	GPL-2.0+
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/io.h>
14 #include <linux/printk.h>
15 #include <linux/sizes.h>
16 #include <asm/processor.h>
17 #include <time.h>
18 
19 #include "../init.h"
20 #include "../soc-info.h"
21 #include "ddrmphy-regs.h"
22 #include "umc-regs.h"
23 
24 #define DRAM_CH_NR	3
25 
26 enum dram_freq {
27 	DRAM_FREQ_1866M,
28 	DRAM_FREQ_2133M,
29 	DRAM_FREQ_NR,
30 };
31 
32 enum dram_size {
33 	DRAM_SZ_256M,
34 	DRAM_SZ_512M,
35 	DRAM_SZ_NR,
36 };
37 
38 /* PHY */
39 static u32 ddrphy_pgcr2[DRAM_FREQ_NR] = {0x00FC7E5D, 0x00FC90AB};
40 static u32 ddrphy_ptr0[DRAM_FREQ_NR] = {0x0EA09205, 0x10C0A6C6};
41 static u32 ddrphy_ptr1[DRAM_FREQ_NR] = {0x0DAC041B, 0x0FA104B1};
42 static u32 ddrphy_ptr3[DRAM_FREQ_NR] = {0x15171e45, 0x18182357};
43 static u32 ddrphy_ptr4[DRAM_FREQ_NR] = {0x0e9ad8e9, 0x10b34157};
44 static u32 ddrphy_dtpr0[DRAM_FREQ_NR] = {0x35a00d88, 0x39e40e88};
45 static u32 ddrphy_dtpr1[DRAM_FREQ_NR] = {0x2288cc2c, 0x228a04d0};
46 static u32 ddrphy_dtpr2[DRAM_FREQ_NR] = {0x50005e00, 0x50006a00};
47 static u32 ddrphy_dtpr3[DRAM_FREQ_NR] = {0x0010cb49, 0x0010ec89};
48 static u32 ddrphy_mr0[DRAM_FREQ_NR] = {0x00000115, 0x00000125};
49 static u32 ddrphy_mr2[DRAM_FREQ_NR] = {0x000002a0, 0x000002a8};
50 
51 /* dependent on package and board design */
52 static u32 ddrphy_acbdlr0[DRAM_CH_NR] = {0x0000000c, 0x0000000c, 0x00000009};
53 
54 /* DDR multiPHY */
55 static inline int ddrphy_get_rank(int dx)
56 {
57 	return dx / 2;
58 }
59 
60 static void ddrphy_fifo_reset(void __iomem *phy_base)
61 {
62 	u32 tmp;
63 
64 	tmp = readl(phy_base + MPHY_PGCR0);
65 	tmp &= ~MPHY_PGCR0_PHYFRST;
66 	writel(tmp, phy_base + MPHY_PGCR0);
67 
68 	udelay(1);
69 
70 	tmp |= MPHY_PGCR0_PHYFRST;
71 	writel(tmp, phy_base + MPHY_PGCR0);
72 
73 	udelay(1);
74 }
75 
76 static void ddrphy_vt_ctrl(void __iomem *phy_base, int enable)
77 {
78 	u32 tmp;
79 
80 	tmp = readl(phy_base + MPHY_PGCR1);
81 
82 	if (enable)
83 		tmp &= ~MPHY_PGCR1_INHVT;
84 	else
85 		tmp |= MPHY_PGCR1_INHVT;
86 
87 	writel(tmp, phy_base + MPHY_PGCR1);
88 
89 	if (!enable) {
90 		while (!(readl(phy_base + MPHY_PGSR1) & MPHY_PGSR1_VTSTOP))
91 			cpu_relax();
92 	}
93 }
94 
95 static void ddrphy_dqs_delay_fixup(void __iomem *phy_base, int nr_dx, int step)
96 {
97 	int dx;
98 	u32 lcdlr1, rdqsd;
99 	void __iomem *dx_base = phy_base + MPHY_DX_BASE;
100 
101 	ddrphy_vt_ctrl(phy_base, 0);
102 
103 	for (dx = 0; dx < nr_dx; dx++) {
104 		lcdlr1 = readl(dx_base + MPHY_DX_LCDLR1);
105 		rdqsd = (lcdlr1 >> 8) & 0xff;
106 		rdqsd = clamp(rdqsd + step, 0U, 0xffU);
107 		lcdlr1 = (lcdlr1 & ~(0xff << 8)) | (rdqsd << 8);
108 		writel(lcdlr1, dx_base + MPHY_DX_LCDLR1);
109 		readl(dx_base + MPHY_DX_LCDLR1); /* relax */
110 		dx_base += MPHY_DX_STRIDE;
111 	}
112 
113 	ddrphy_vt_ctrl(phy_base, 1);
114 }
115 
116 static int ddrphy_get_system_latency(void __iomem *phy_base, int width)
117 {
118 	void __iomem *dx_base = phy_base + MPHY_DX_BASE;
119 	const int nr_dx = width / 8;
120 	int dx, rank;
121 	u32 gtr;
122 	int dgsl, dgsl_min = INT_MAX, dgsl_max = 0;
123 
124 	for (dx = 0; dx < nr_dx; dx++) {
125 		gtr = readl(dx_base + MPHY_DX_GTR);
126 		for (rank = 0; rank < 4; rank++) {
127 			dgsl = gtr & 0x7;
128 			/* if dgsl is zero, this rank was not trained. skip. */
129 			if (dgsl) {
130 				dgsl_min = min(dgsl_min, dgsl);
131 				dgsl_max = max(dgsl_max, dgsl);
132 			}
133 			gtr >>= 3;
134 		}
135 		dx_base += MPHY_DX_STRIDE;
136 	}
137 
138 	if (dgsl_min != dgsl_max)
139 		pr_warn("DQS Gateing System Latencies are not all leveled.\n");
140 
141 	return dgsl_max;
142 }
143 
144 static void ddrphy_init(void __iomem *phy_base, enum dram_freq freq, int width,
145 			int ch)
146 {
147 	u32 tmp;
148 	void __iomem *zq_base, *dx_base;
149 	int zq, dx;
150 	int nr_dx;
151 
152 	nr_dx = width / 8;
153 
154 	writel(MPHY_PIR_ZCALBYP, phy_base + MPHY_PIR);
155 	/*
156 	 * Disable RGLVT bit (Read DQS Gating LCDL Delay VT Compensation)
157 	 * to avoid read error issue.
158 	 */
159 	writel(0x07d81e37, phy_base + MPHY_PGCR0);
160 	writel(0x0200c4e0, phy_base + MPHY_PGCR1);
161 
162 	tmp = ddrphy_pgcr2[freq];
163 	if (width >= 32)
164 		tmp |= MPHY_PGCR2_DUALCHN | MPHY_PGCR2_ACPDDC;
165 	writel(tmp, phy_base + MPHY_PGCR2);
166 
167 	writel(ddrphy_ptr0[freq], phy_base + MPHY_PTR0);
168 	writel(ddrphy_ptr1[freq], phy_base + MPHY_PTR1);
169 	writel(0x00083def, phy_base + MPHY_PTR2);
170 	writel(ddrphy_ptr3[freq], phy_base + MPHY_PTR3);
171 	writel(ddrphy_ptr4[freq], phy_base + MPHY_PTR4);
172 
173 	writel(ddrphy_acbdlr0[ch], phy_base + MPHY_ACBDLR0);
174 
175 	writel(0x55555555, phy_base + MPHY_ACIOCR1);
176 	writel(0x00000000, phy_base + MPHY_ACIOCR2);
177 	writel(0x55555555, phy_base + MPHY_ACIOCR3);
178 	writel(0x00000000, phy_base + MPHY_ACIOCR4);
179 	writel(0x00000055, phy_base + MPHY_ACIOCR5);
180 	writel(0x00181aa4, phy_base + MPHY_DXCCR);
181 
182 	writel(0x0024641e, phy_base + MPHY_DSGCR);
183 	writel(0x0000040b, phy_base + MPHY_DCR);
184 	writel(ddrphy_dtpr0[freq], phy_base + MPHY_DTPR0);
185 	writel(ddrphy_dtpr1[freq], phy_base + MPHY_DTPR1);
186 	writel(ddrphy_dtpr2[freq], phy_base + MPHY_DTPR2);
187 	writel(ddrphy_dtpr3[freq], phy_base + MPHY_DTPR3);
188 	writel(ddrphy_mr0[freq], phy_base + MPHY_MR0);
189 	writel(0x00000006, phy_base + MPHY_MR1);
190 	writel(ddrphy_mr2[freq], phy_base + MPHY_MR2);
191 	writel(0x00000000, phy_base + MPHY_MR3);
192 
193 	tmp = 0;
194 	for (dx = 0; dx < nr_dx; dx++)
195 		tmp |= BIT(MPHY_DTCR_RANKEN_SHIFT + ddrphy_get_rank(dx));
196 	writel(0x90003087 | tmp, phy_base + MPHY_DTCR);
197 
198 	writel(0x00000000, phy_base + MPHY_DTAR0);
199 	writel(0x00000008, phy_base + MPHY_DTAR1);
200 	writel(0x00000010, phy_base + MPHY_DTAR2);
201 	writel(0x00000018, phy_base + MPHY_DTAR3);
202 	writel(0xdd22ee11, phy_base + MPHY_DTDR0);
203 	writel(0x7788bb44, phy_base + MPHY_DTDR1);
204 
205 	/* impedance control settings */
206 	writel(0x04048900, phy_base + MPHY_ZQCR);
207 
208 	zq_base = phy_base + MPHY_ZQ_BASE;
209 	for (zq = 0; zq < 4; zq++) {
210 		/*
211 		 * board-dependent
212 		 * PXS2: CH0ZQ0=0x5B, CH1ZQ0=0x5B, CH2ZQ0=0x59, others=0x5D
213 		 */
214 		writel(0x0007BB5D, zq_base + MPHY_ZQ_PR);
215 		zq_base += MPHY_ZQ_STRIDE;
216 	}
217 
218 	/* DATX8 settings */
219 	dx_base = phy_base + MPHY_DX_BASE;
220 	for (dx = 0; dx < 4; dx++) {
221 		tmp = readl(dx_base + MPHY_DX_GCR0);
222 		tmp &= ~MPHY_DX_GCR0_WLRKEN_MASK;
223 		tmp |= BIT(MPHY_DX_GCR0_WLRKEN_SHIFT + ddrphy_get_rank(dx)) &
224 						MPHY_DX_GCR0_WLRKEN_MASK;
225 		writel(tmp, dx_base + MPHY_DX_GCR0);
226 
227 		writel(0x00000000, dx_base + MPHY_DX_GCR1);
228 		writel(0x00000000, dx_base + MPHY_DX_GCR2);
229 		writel(0x00000000, dx_base + MPHY_DX_GCR3);
230 		dx_base += MPHY_DX_STRIDE;
231 	}
232 
233 	while (!(readl(phy_base + MPHY_PGSR0) & MPHY_PGSR0_IDONE))
234 		cpu_relax();
235 
236 	ddrphy_dqs_delay_fixup(phy_base, nr_dx, -4);
237 }
238 
239 struct ddrphy_init_sequence {
240 	char *description;
241 	u32 init_flag;
242 	u32 done_flag;
243 	u32 err_flag;
244 };
245 
246 static const struct ddrphy_init_sequence impedance_calibration_sequence[] = {
247 	{
248 		"Impedance Calibration",
249 		MPHY_PIR_ZCAL,
250 		MPHY_PGSR0_ZCDONE,
251 		MPHY_PGSR0_ZCERR,
252 	},
253 	{ /* sentinel */ }
254 };
255 
256 static const struct ddrphy_init_sequence dram_init_sequence[] = {
257 	{
258 		"DRAM Initialization",
259 		MPHY_PIR_DRAMRST | MPHY_PIR_DRAMINIT,
260 		MPHY_PGSR0_DIDONE,
261 		0,
262 	},
263 	{ /* sentinel */ }
264 };
265 
266 static const struct ddrphy_init_sequence training_sequence[] = {
267 	{
268 		"Write Leveling",
269 		MPHY_PIR_WL,
270 		MPHY_PGSR0_WLDONE,
271 		MPHY_PGSR0_WLERR,
272 	},
273 	{
274 		"Read DQS Gate Training",
275 		MPHY_PIR_QSGATE,
276 		MPHY_PGSR0_QSGDONE,
277 		MPHY_PGSR0_QSGERR,
278 	},
279 	{
280 		"Write Leveling Adjustment",
281 		MPHY_PIR_WLADJ,
282 		MPHY_PGSR0_WLADONE,
283 		MPHY_PGSR0_WLAERR,
284 	},
285 	{
286 		"Read Bit Deskew",
287 		MPHY_PIR_RDDSKW,
288 		MPHY_PGSR0_RDDONE,
289 		MPHY_PGSR0_RDERR,
290 	},
291 	{
292 		"Write Bit Deskew",
293 		MPHY_PIR_WRDSKW,
294 		MPHY_PGSR0_WDDONE,
295 		MPHY_PGSR0_WDERR,
296 	},
297 	{
298 		"Read Eye Training",
299 		MPHY_PIR_RDEYE,
300 		MPHY_PGSR0_REDONE,
301 		MPHY_PGSR0_REERR,
302 	},
303 	{
304 		"Write Eye Training",
305 		MPHY_PIR_WREYE,
306 		MPHY_PGSR0_WEDONE,
307 		MPHY_PGSR0_WEERR,
308 	},
309 	{ /* sentinel */ }
310 };
311 
312 static int __ddrphy_training(void __iomem *phy_base,
313 			     const struct ddrphy_init_sequence *seq)
314 {
315 	const struct ddrphy_init_sequence *s;
316 	u32 pgsr0;
317 	u32 init_flag = MPHY_PIR_INIT;
318 	u32 done_flag = MPHY_PGSR0_IDONE;
319 	int timeout = 50000; /* 50 msec is long enough */
320 	unsigned long start = 0;
321 
322 #ifdef DEBUG
323 	start = get_timer(0);
324 #endif
325 
326 	for (s = seq; s->description; s++) {
327 		init_flag |= s->init_flag;
328 		done_flag |= s->done_flag;
329 	}
330 
331 	writel(init_flag, phy_base + MPHY_PIR);
332 
333 	do {
334 		if (--timeout < 0) {
335 			pr_err("%s: error: timeout during DDR training\n",
336 			       __func__);
337 			return -ETIMEDOUT;
338 		}
339 		udelay(1);
340 		pgsr0 = readl(phy_base + MPHY_PGSR0);
341 	} while ((pgsr0 & done_flag) != done_flag);
342 
343 	for (s = seq; s->description; s++) {
344 		if (pgsr0 & s->err_flag) {
345 			pr_err("%s: error: %s failed\n", __func__,
346 			       s->description);
347 			return -EIO;
348 		}
349 	}
350 
351 	pr_debug("DDRPHY training: elapsed time %ld msec\n", get_timer(start));
352 
353 	return 0;
354 }
355 
356 static int ddrphy_impedance_calibration(void __iomem *phy_base)
357 {
358 	int ret;
359 	u32 tmp;
360 
361 	ret = __ddrphy_training(phy_base, impedance_calibration_sequence);
362 	if (ret)
363 		return ret;
364 
365 	/*
366 	 * Because of a hardware bug, IDONE flag is set when the first ZQ block
367 	 * is calibrated.  The flag does not guarantee the completion for all
368 	 * the ZQ blocks.  Wait a little more just in case.
369 	 */
370 	udelay(1);
371 
372 	/* reflect ZQ settings and enable average algorithm*/
373 	tmp = readl(phy_base + MPHY_ZQCR);
374 	tmp |= MPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
375 	writel(tmp, phy_base + MPHY_ZQCR);
376 	tmp &= ~MPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
377 	tmp |= MPHY_ZQCR_AVGEN;
378 	writel(tmp, phy_base + MPHY_ZQCR);
379 
380 	return 0;
381 }
382 
383 static int ddrphy_dram_init(void __iomem *phy_base)
384 {
385 	return __ddrphy_training(phy_base, dram_init_sequence);
386 }
387 
388 static int ddrphy_training(void __iomem *phy_base)
389 {
390 	return __ddrphy_training(phy_base, training_sequence);
391 }
392 
393 /* UMC */
394 static u32 umc_cmdctla[DRAM_FREQ_NR] = {0x66DD131D, 0x77EE1722};
395 /*
396  * The ch2 is a different generation UMC core.
397  * The register spec is different, unfortunately.
398  */
399 static u32 umc_cmdctlb_ch01[DRAM_FREQ_NR] = {0x13E87C44, 0x18F88C44};
400 static u32 umc_cmdctlb_ch2[DRAM_FREQ_NR] = {0x19E8DC44, 0x1EF8EC44};
401 static u32 umc_spcctla[DRAM_FREQ_NR][DRAM_SZ_NR] = {
402 	{0x004A071D, 0x0078071D},
403 	{0x0055081E, 0x0089081E},
404 };
405 
406 static u32 umc_spcctlb[] = {0x00FF000A, 0x00FF000B};
407 /* The ch2 is different for some reason only hardware guys know... */
408 static u32 umc_flowctla_ch01[] = {0x0800001E, 0x08000022};
409 static u32 umc_flowctla_ch2[] = {0x0800001E, 0x0800001E};
410 
411 static void umc_set_system_latency(void __iomem *dc_base, int phy_latency)
412 {
413 	u32 val;
414 	int latency;
415 
416 	val = readl(dc_base + UMC_RDATACTL_D0);
417 	latency = (val & UMC_RDATACTL_RADLTY_MASK) >> UMC_RDATACTL_RADLTY_SHIFT;
418 	latency += (val & UMC_RDATACTL_RAD2LTY_MASK) >>
419 						UMC_RDATACTL_RAD2LTY_SHIFT;
420 	/*
421 	 * UMC works at the half clock rate of the PHY.
422 	 * The LSB of latency is ignored
423 	 */
424 	latency += phy_latency & ~1;
425 
426 	val &= ~(UMC_RDATACTL_RADLTY_MASK | UMC_RDATACTL_RAD2LTY_MASK);
427 	if (latency > 0xf) {
428 		val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
429 		val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
430 	} else {
431 		val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
432 	}
433 
434 	writel(val, dc_base + UMC_RDATACTL_D0);
435 	writel(val, dc_base + UMC_RDATACTL_D1);
436 
437 	readl(dc_base + UMC_RDATACTL_D1); /* relax */
438 }
439 
440 /* enable/disable auto refresh */
441 static void umc_refresh_ctrl(void __iomem *dc_base, int enable)
442 {
443 	u32 tmp;
444 
445 	tmp = readl(dc_base + UMC_SPCSETB);
446 	tmp &= ~UMC_SPCSETB_AREFMD_MASK;
447 
448 	if (enable)
449 		tmp |= UMC_SPCSETB_AREFMD_ARB;
450 	else
451 		tmp |= UMC_SPCSETB_AREFMD_REG;
452 
453 	writel(tmp, dc_base + UMC_SPCSETB);
454 	udelay(1);
455 }
456 
457 static void umc_ud_init(void __iomem *umc_base, int ch)
458 {
459 	writel(0x00000003, umc_base + UMC_BITPERPIXELMODE_D0);
460 
461 	if (ch == 2)
462 		writel(0x00000033, umc_base + UMC_PAIR1DOFF_D0);
463 }
464 
465 static int umc_dc_init(void __iomem *dc_base, enum dram_freq freq,
466 		       unsigned long size, int width, int ch)
467 {
468 	enum dram_size size_e;
469 	int latency;
470 	u32 val;
471 
472 	switch (size) {
473 	case 0:
474 		return 0;
475 	case SZ_256M:
476 		size_e = DRAM_SZ_256M;
477 		break;
478 	case SZ_512M:
479 		size_e = DRAM_SZ_512M;
480 		break;
481 	default:
482 		pr_err("unsupported DRAM size 0x%08lx (per 16bit) for ch%d\n",
483 		       size, ch);
484 		return -EINVAL;
485 	}
486 
487 	writel(umc_cmdctla[freq], dc_base + UMC_CMDCTLA);
488 
489 	writel(ch == 2 ? umc_cmdctlb_ch2[freq] : umc_cmdctlb_ch01[freq],
490 	       dc_base + UMC_CMDCTLB);
491 
492 	writel(umc_spcctla[freq][size_e], dc_base + UMC_SPCCTLA);
493 	writel(umc_spcctlb[freq], dc_base + UMC_SPCCTLB);
494 
495 	val = 0x000e000e;
496 	latency = 12;
497 	/* ES2 inserted one more FF to the logic. */
498 	if (uniphier_get_soc_model() >= 2)
499 		latency += 2;
500 
501 	if (latency > 0xf) {
502 		val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
503 		val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
504 	} else {
505 		val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
506 	}
507 
508 	writel(val, dc_base + UMC_RDATACTL_D0);
509 	if (width >= 32)
510 		writel(val, dc_base + UMC_RDATACTL_D1);
511 
512 	writel(0x04060A02, dc_base + UMC_WDATACTL_D0);
513 	if (width >= 32)
514 		writel(0x04060A02, dc_base + UMC_WDATACTL_D1);
515 	writel(0x04000000, dc_base + UMC_DATASET);
516 	writel(0x00400020, dc_base + UMC_DCCGCTL);
517 	writel(0x00000084, dc_base + UMC_FLOWCTLG);
518 	writel(0x00000000, dc_base + UMC_ACSSETA);
519 
520 	writel(ch == 2 ? umc_flowctla_ch2[freq] : umc_flowctla_ch01[freq],
521 	       dc_base + UMC_FLOWCTLA);
522 
523 	writel(0x00004400, dc_base + UMC_FLOWCTLC);
524 	writel(0x200A0A00, dc_base + UMC_SPCSETB);
525 	writel(0x00000520, dc_base + UMC_DFICUPDCTLA);
526 	writel(0x0000000D, dc_base + UMC_RESPCTL);
527 
528 	if (ch != 2) {
529 		writel(0x00202000, dc_base + UMC_FLOWCTLB);
530 		writel(0xFDBFFFFF, dc_base + UMC_FLOWCTLOB0);
531 		writel(0xFFFFFFFF, dc_base + UMC_FLOWCTLOB1);
532 		writel(0x00080700, dc_base + UMC_BSICMAPSET);
533 	} else {
534 		writel(0x00200000, dc_base + UMC_FLOWCTLB);
535 		writel(0x00000000, dc_base + UMC_BSICMAPSET);
536 	}
537 
538 	writel(0x00000000, dc_base + UMC_ERRMASKA);
539 	writel(0x00000000, dc_base + UMC_ERRMASKB);
540 
541 	return 0;
542 }
543 
544 static int umc_ch_init(void __iomem *umc_ch_base, enum dram_freq freq,
545 		       unsigned long size, unsigned int width, int ch)
546 {
547 	void __iomem *dc_base = umc_ch_base + 0x00011000;
548 	void __iomem *phy_base = umc_ch_base + 0x00030000;
549 	int ret;
550 
551 	writel(0x00000002, dc_base + UMC_INITSET);
552 	while (readl(dc_base + UMC_INITSTAT) & BIT(2))
553 		cpu_relax();
554 
555 	/* deassert PHY reset signals */
556 	writel(UMC_DIOCTLA_CTL_NRST | UMC_DIOCTLA_CFG_NRST,
557 	       dc_base + UMC_DIOCTLA);
558 
559 	ddrphy_init(phy_base, freq, width, ch);
560 
561 	ret = ddrphy_impedance_calibration(phy_base);
562 	if (ret)
563 		return ret;
564 
565 	ddrphy_dram_init(phy_base);
566 	if (ret)
567 		return ret;
568 
569 	ret = umc_dc_init(dc_base, freq, size, width, ch);
570 	if (ret)
571 		return ret;
572 
573 	umc_ud_init(umc_ch_base, ch);
574 
575 	ret = ddrphy_training(phy_base);
576 	if (ret)
577 		return ret;
578 
579 	udelay(1);
580 
581 	/* match the system latency between UMC and PHY */
582 	umc_set_system_latency(dc_base,
583 			       ddrphy_get_system_latency(phy_base, width));
584 
585 	udelay(1);
586 
587 	/* stop auto refresh before clearing FIFO in PHY */
588 	umc_refresh_ctrl(dc_base, 0);
589 	ddrphy_fifo_reset(phy_base);
590 	umc_refresh_ctrl(dc_base, 1);
591 
592 	udelay(10);
593 
594 	return 0;
595 }
596 
597 static void um_init(void __iomem *um_base)
598 {
599 	writel(0x000000ff, um_base + UMC_MBUS0);
600 	writel(0x000000ff, um_base + UMC_MBUS1);
601 	writel(0x000000ff, um_base + UMC_MBUS2);
602 	writel(0x000000ff, um_base + UMC_MBUS3);
603 }
604 
605 int uniphier_pxs2_umc_init(const struct uniphier_board_data *bd)
606 {
607 	void __iomem *um_base = (void __iomem *)0x5b600000;
608 	void __iomem *umc_ch_base = (void __iomem *)0x5b800000;
609 	enum dram_freq freq;
610 	int ch, ret;
611 
612 	switch (bd->dram_freq) {
613 	case 1866:
614 		freq = DRAM_FREQ_1866M;
615 		break;
616 	case 2133:
617 		freq = DRAM_FREQ_2133M;
618 		break;
619 	default:
620 		pr_err("unsupported DRAM frequency %d MHz\n", bd->dram_freq);
621 		return -EINVAL;
622 	}
623 
624 	for (ch = 0; ch < DRAM_CH_NR; ch++) {
625 		unsigned long size = bd->dram_ch[ch].size;
626 		unsigned int width = bd->dram_ch[ch].width;
627 
628 		if (size) {
629 			ret = umc_ch_init(umc_ch_base, freq,
630 					  size / (width / 16), width, ch);
631 			if (ret) {
632 				pr_err("failed to initialize UMC ch%d\n", ch);
633 				return ret;
634 			}
635 		}
636 
637 		umc_ch_base += 0x00200000;
638 	}
639 
640 	um_init(um_base);
641 
642 	return 0;
643 }
644