1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015-2017 Socionext Inc.
4  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5  *
6  * based on commit 21b6e480f92ccc38fe0502e3116411d6509d3bf2 of Diag by:
7  * Copyright (C) 2015 Socionext Inc.
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/io.h>
13 #include <linux/printk.h>
14 #include <linux/sizes.h>
15 #include <asm/processor.h>
16 #include <time.h>
17 
18 #include "../init.h"
19 #include "../soc-info.h"
20 #include "ddrmphy-regs.h"
21 #include "umc-regs.h"
22 
23 #define DRAM_CH_NR	3
24 
25 enum dram_freq {
26 	DRAM_FREQ_1866M,
27 	DRAM_FREQ_2133M,
28 	DRAM_FREQ_NR,
29 };
30 
31 enum dram_size {
32 	DRAM_SZ_256M,
33 	DRAM_SZ_512M,
34 	DRAM_SZ_NR,
35 };
36 
37 /* PHY */
38 static u32 ddrphy_pgcr2[DRAM_FREQ_NR] = {0x00FC7E5D, 0x00FC90AB};
39 static u32 ddrphy_ptr0[DRAM_FREQ_NR] = {0x0EA09205, 0x10C0A6C6};
40 static u32 ddrphy_ptr1[DRAM_FREQ_NR] = {0x0DAC041B, 0x0FA104B1};
41 static u32 ddrphy_ptr3[DRAM_FREQ_NR] = {0x15171e45, 0x18182357};
42 static u32 ddrphy_ptr4[DRAM_FREQ_NR] = {0x0e9ad8e9, 0x10b34157};
43 static u32 ddrphy_dtpr0[DRAM_FREQ_NR] = {0x35a00d88, 0x39e40e88};
44 static u32 ddrphy_dtpr1[DRAM_FREQ_NR] = {0x2288cc2c, 0x228a04d0};
45 static u32 ddrphy_dtpr2[DRAM_FREQ_NR] = {0x50005e00, 0x50006a00};
46 static u32 ddrphy_dtpr3[DRAM_FREQ_NR] = {0x0010cb49, 0x0010ec89};
47 static u32 ddrphy_mr0[DRAM_FREQ_NR] = {0x00000115, 0x00000125};
48 static u32 ddrphy_mr2[DRAM_FREQ_NR] = {0x000002a0, 0x000002a8};
49 
50 /* dependent on package and board design */
51 static u32 ddrphy_acbdlr0[DRAM_CH_NR] = {0x0000000c, 0x0000000c, 0x00000009};
52 
53 /* DDR multiPHY */
54 static inline int ddrphy_get_rank(int dx)
55 {
56 	return dx / 2;
57 }
58 
59 static void ddrphy_fifo_reset(void __iomem *phy_base)
60 {
61 	u32 tmp;
62 
63 	tmp = readl(phy_base + MPHY_PGCR0);
64 	tmp &= ~MPHY_PGCR0_PHYFRST;
65 	writel(tmp, phy_base + MPHY_PGCR0);
66 
67 	udelay(1);
68 
69 	tmp |= MPHY_PGCR0_PHYFRST;
70 	writel(tmp, phy_base + MPHY_PGCR0);
71 
72 	udelay(1);
73 }
74 
75 static void ddrphy_vt_ctrl(void __iomem *phy_base, int enable)
76 {
77 	u32 tmp;
78 
79 	tmp = readl(phy_base + MPHY_PGCR1);
80 
81 	if (enable)
82 		tmp &= ~MPHY_PGCR1_INHVT;
83 	else
84 		tmp |= MPHY_PGCR1_INHVT;
85 
86 	writel(tmp, phy_base + MPHY_PGCR1);
87 
88 	if (!enable) {
89 		while (!(readl(phy_base + MPHY_PGSR1) & MPHY_PGSR1_VTSTOP))
90 			cpu_relax();
91 	}
92 }
93 
94 static void ddrphy_dqs_delay_fixup(void __iomem *phy_base, int nr_dx, int step)
95 {
96 	int dx;
97 	u32 lcdlr1, rdqsd;
98 	void __iomem *dx_base = phy_base + MPHY_DX_BASE;
99 
100 	ddrphy_vt_ctrl(phy_base, 0);
101 
102 	for (dx = 0; dx < nr_dx; dx++) {
103 		lcdlr1 = readl(dx_base + MPHY_DX_LCDLR1);
104 		rdqsd = (lcdlr1 >> 8) & 0xff;
105 		rdqsd = clamp(rdqsd + step, 0U, 0xffU);
106 		lcdlr1 = (lcdlr1 & ~(0xff << 8)) | (rdqsd << 8);
107 		writel(lcdlr1, dx_base + MPHY_DX_LCDLR1);
108 		readl(dx_base + MPHY_DX_LCDLR1); /* relax */
109 		dx_base += MPHY_DX_STRIDE;
110 	}
111 
112 	ddrphy_vt_ctrl(phy_base, 1);
113 }
114 
115 static int ddrphy_get_system_latency(void __iomem *phy_base, int width)
116 {
117 	void __iomem *dx_base = phy_base + MPHY_DX_BASE;
118 	const int nr_dx = width / 8;
119 	int dx, rank;
120 	u32 gtr;
121 	int dgsl, dgsl_min = INT_MAX, dgsl_max = 0;
122 
123 	for (dx = 0; dx < nr_dx; dx++) {
124 		gtr = readl(dx_base + MPHY_DX_GTR);
125 		for (rank = 0; rank < 4; rank++) {
126 			dgsl = gtr & 0x7;
127 			/* if dgsl is zero, this rank was not trained. skip. */
128 			if (dgsl) {
129 				dgsl_min = min(dgsl_min, dgsl);
130 				dgsl_max = max(dgsl_max, dgsl);
131 			}
132 			gtr >>= 3;
133 		}
134 		dx_base += MPHY_DX_STRIDE;
135 	}
136 
137 	if (dgsl_min != dgsl_max)
138 		pr_warn("DQS Gateing System Latencies are not all leveled.\n");
139 
140 	return dgsl_max;
141 }
142 
143 static void ddrphy_init(void __iomem *phy_base, enum dram_freq freq, int width,
144 			int ch)
145 {
146 	u32 tmp;
147 	void __iomem *zq_base, *dx_base;
148 	int zq, dx;
149 	int nr_dx;
150 
151 	nr_dx = width / 8;
152 
153 	writel(MPHY_PIR_ZCALBYP, phy_base + MPHY_PIR);
154 	/*
155 	 * Disable RGLVT bit (Read DQS Gating LCDL Delay VT Compensation)
156 	 * to avoid read error issue.
157 	 */
158 	writel(0x07d81e37, phy_base + MPHY_PGCR0);
159 	writel(0x0200c4e0, phy_base + MPHY_PGCR1);
160 
161 	tmp = ddrphy_pgcr2[freq];
162 	if (width >= 32)
163 		tmp |= MPHY_PGCR2_DUALCHN | MPHY_PGCR2_ACPDDC;
164 	writel(tmp, phy_base + MPHY_PGCR2);
165 
166 	writel(ddrphy_ptr0[freq], phy_base + MPHY_PTR0);
167 	writel(ddrphy_ptr1[freq], phy_base + MPHY_PTR1);
168 	writel(0x00083def, phy_base + MPHY_PTR2);
169 	writel(ddrphy_ptr3[freq], phy_base + MPHY_PTR3);
170 	writel(ddrphy_ptr4[freq], phy_base + MPHY_PTR4);
171 
172 	writel(ddrphy_acbdlr0[ch], phy_base + MPHY_ACBDLR0);
173 
174 	writel(0x55555555, phy_base + MPHY_ACIOCR1);
175 	writel(0x00000000, phy_base + MPHY_ACIOCR2);
176 	writel(0x55555555, phy_base + MPHY_ACIOCR3);
177 	writel(0x00000000, phy_base + MPHY_ACIOCR4);
178 	writel(0x00000055, phy_base + MPHY_ACIOCR5);
179 	writel(0x00181aa4, phy_base + MPHY_DXCCR);
180 
181 	writel(0x0024641e, phy_base + MPHY_DSGCR);
182 	writel(0x0000040b, phy_base + MPHY_DCR);
183 	writel(ddrphy_dtpr0[freq], phy_base + MPHY_DTPR0);
184 	writel(ddrphy_dtpr1[freq], phy_base + MPHY_DTPR1);
185 	writel(ddrphy_dtpr2[freq], phy_base + MPHY_DTPR2);
186 	writel(ddrphy_dtpr3[freq], phy_base + MPHY_DTPR3);
187 	writel(ddrphy_mr0[freq], phy_base + MPHY_MR0);
188 	writel(0x00000006, phy_base + MPHY_MR1);
189 	writel(ddrphy_mr2[freq], phy_base + MPHY_MR2);
190 	writel(0x00000000, phy_base + MPHY_MR3);
191 
192 	tmp = 0;
193 	for (dx = 0; dx < nr_dx; dx++)
194 		tmp |= BIT(MPHY_DTCR_RANKEN_SHIFT + ddrphy_get_rank(dx));
195 	writel(0x90003087 | tmp, phy_base + MPHY_DTCR);
196 
197 	writel(0x00000000, phy_base + MPHY_DTAR0);
198 	writel(0x00000008, phy_base + MPHY_DTAR1);
199 	writel(0x00000010, phy_base + MPHY_DTAR2);
200 	writel(0x00000018, phy_base + MPHY_DTAR3);
201 	writel(0xdd22ee11, phy_base + MPHY_DTDR0);
202 	writel(0x7788bb44, phy_base + MPHY_DTDR1);
203 
204 	/* impedance control settings */
205 	writel(0x04048900, phy_base + MPHY_ZQCR);
206 
207 	zq_base = phy_base + MPHY_ZQ_BASE;
208 	for (zq = 0; zq < 4; zq++) {
209 		/*
210 		 * board-dependent
211 		 * PXS2: CH0ZQ0=0x5B, CH1ZQ0=0x5B, CH2ZQ0=0x59, others=0x5D
212 		 */
213 		writel(0x0007BB5D, zq_base + MPHY_ZQ_PR);
214 		zq_base += MPHY_ZQ_STRIDE;
215 	}
216 
217 	/* DATX8 settings */
218 	dx_base = phy_base + MPHY_DX_BASE;
219 	for (dx = 0; dx < 4; dx++) {
220 		tmp = readl(dx_base + MPHY_DX_GCR0);
221 		tmp &= ~MPHY_DX_GCR0_WLRKEN_MASK;
222 		tmp |= BIT(MPHY_DX_GCR0_WLRKEN_SHIFT + ddrphy_get_rank(dx)) &
223 						MPHY_DX_GCR0_WLRKEN_MASK;
224 		writel(tmp, dx_base + MPHY_DX_GCR0);
225 
226 		writel(0x00000000, dx_base + MPHY_DX_GCR1);
227 		writel(0x00000000, dx_base + MPHY_DX_GCR2);
228 		writel(0x00000000, dx_base + MPHY_DX_GCR3);
229 		dx_base += MPHY_DX_STRIDE;
230 	}
231 
232 	while (!(readl(phy_base + MPHY_PGSR0) & MPHY_PGSR0_IDONE))
233 		cpu_relax();
234 
235 	ddrphy_dqs_delay_fixup(phy_base, nr_dx, -4);
236 }
237 
238 struct ddrphy_init_sequence {
239 	char *description;
240 	u32 init_flag;
241 	u32 done_flag;
242 	u32 err_flag;
243 };
244 
245 static const struct ddrphy_init_sequence impedance_calibration_sequence[] = {
246 	{
247 		"Impedance Calibration",
248 		MPHY_PIR_ZCAL,
249 		MPHY_PGSR0_ZCDONE,
250 		MPHY_PGSR0_ZCERR,
251 	},
252 	{ /* sentinel */ }
253 };
254 
255 static const struct ddrphy_init_sequence dram_init_sequence[] = {
256 	{
257 		"DRAM Initialization",
258 		MPHY_PIR_DRAMRST | MPHY_PIR_DRAMINIT,
259 		MPHY_PGSR0_DIDONE,
260 		0,
261 	},
262 	{ /* sentinel */ }
263 };
264 
265 static const struct ddrphy_init_sequence training_sequence[] = {
266 	{
267 		"Write Leveling",
268 		MPHY_PIR_WL,
269 		MPHY_PGSR0_WLDONE,
270 		MPHY_PGSR0_WLERR,
271 	},
272 	{
273 		"Read DQS Gate Training",
274 		MPHY_PIR_QSGATE,
275 		MPHY_PGSR0_QSGDONE,
276 		MPHY_PGSR0_QSGERR,
277 	},
278 	{
279 		"Write Leveling Adjustment",
280 		MPHY_PIR_WLADJ,
281 		MPHY_PGSR0_WLADONE,
282 		MPHY_PGSR0_WLAERR,
283 	},
284 	{
285 		"Read Bit Deskew",
286 		MPHY_PIR_RDDSKW,
287 		MPHY_PGSR0_RDDONE,
288 		MPHY_PGSR0_RDERR,
289 	},
290 	{
291 		"Write Bit Deskew",
292 		MPHY_PIR_WRDSKW,
293 		MPHY_PGSR0_WDDONE,
294 		MPHY_PGSR0_WDERR,
295 	},
296 	{
297 		"Read Eye Training",
298 		MPHY_PIR_RDEYE,
299 		MPHY_PGSR0_REDONE,
300 		MPHY_PGSR0_REERR,
301 	},
302 	{
303 		"Write Eye Training",
304 		MPHY_PIR_WREYE,
305 		MPHY_PGSR0_WEDONE,
306 		MPHY_PGSR0_WEERR,
307 	},
308 	{ /* sentinel */ }
309 };
310 
311 static int __ddrphy_training(void __iomem *phy_base,
312 			     const struct ddrphy_init_sequence *seq)
313 {
314 	const struct ddrphy_init_sequence *s;
315 	u32 pgsr0;
316 	u32 init_flag = MPHY_PIR_INIT;
317 	u32 done_flag = MPHY_PGSR0_IDONE;
318 	int timeout = 50000; /* 50 msec is long enough */
319 	unsigned long start = 0;
320 
321 #ifdef DEBUG
322 	start = get_timer(0);
323 #endif
324 
325 	for (s = seq; s->description; s++) {
326 		init_flag |= s->init_flag;
327 		done_flag |= s->done_flag;
328 	}
329 
330 	writel(init_flag, phy_base + MPHY_PIR);
331 
332 	do {
333 		if (--timeout < 0) {
334 			pr_err("%s: error: timeout during DDR training\n",
335 			       __func__);
336 			return -ETIMEDOUT;
337 		}
338 		udelay(1);
339 		pgsr0 = readl(phy_base + MPHY_PGSR0);
340 	} while ((pgsr0 & done_flag) != done_flag);
341 
342 	for (s = seq; s->description; s++) {
343 		if (pgsr0 & s->err_flag) {
344 			pr_err("%s: error: %s failed\n", __func__,
345 			       s->description);
346 			return -EIO;
347 		}
348 	}
349 
350 	pr_debug("DDRPHY training: elapsed time %ld msec\n", get_timer(start));
351 
352 	return 0;
353 }
354 
355 static int ddrphy_impedance_calibration(void __iomem *phy_base)
356 {
357 	int ret;
358 	u32 tmp;
359 
360 	ret = __ddrphy_training(phy_base, impedance_calibration_sequence);
361 	if (ret)
362 		return ret;
363 
364 	/*
365 	 * Because of a hardware bug, IDONE flag is set when the first ZQ block
366 	 * is calibrated.  The flag does not guarantee the completion for all
367 	 * the ZQ blocks.  Wait a little more just in case.
368 	 */
369 	udelay(1);
370 
371 	/* reflect ZQ settings and enable average algorithm*/
372 	tmp = readl(phy_base + MPHY_ZQCR);
373 	tmp |= MPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
374 	writel(tmp, phy_base + MPHY_ZQCR);
375 	tmp &= ~MPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
376 	tmp |= MPHY_ZQCR_AVGEN;
377 	writel(tmp, phy_base + MPHY_ZQCR);
378 
379 	return 0;
380 }
381 
382 static int ddrphy_dram_init(void __iomem *phy_base)
383 {
384 	return __ddrphy_training(phy_base, dram_init_sequence);
385 }
386 
387 static int ddrphy_training(void __iomem *phy_base)
388 {
389 	return __ddrphy_training(phy_base, training_sequence);
390 }
391 
392 /* UMC */
393 static u32 umc_cmdctla[DRAM_FREQ_NR] = {0x66DD131D, 0x77EE1722};
394 /*
395  * The ch2 is a different generation UMC core.
396  * The register spec is different, unfortunately.
397  */
398 static u32 umc_cmdctlb_ch01[DRAM_FREQ_NR] = {0x13E87C44, 0x18F88C44};
399 static u32 umc_cmdctlb_ch2[DRAM_FREQ_NR] = {0x19E8DC44, 0x1EF8EC44};
400 static u32 umc_spcctla[DRAM_FREQ_NR][DRAM_SZ_NR] = {
401 	{0x004A071D, 0x0078071D},
402 	{0x0055081E, 0x0089081E},
403 };
404 
405 static u32 umc_spcctlb[] = {0x00FF000A, 0x00FF000B};
406 /* The ch2 is different for some reason only hardware guys know... */
407 static u32 umc_flowctla_ch01[] = {0x0800001E, 0x08000022};
408 static u32 umc_flowctla_ch2[] = {0x0800001E, 0x0800001E};
409 
410 static void umc_set_system_latency(void __iomem *dc_base, int phy_latency)
411 {
412 	u32 val;
413 	int latency;
414 
415 	val = readl(dc_base + UMC_RDATACTL_D0);
416 	latency = (val & UMC_RDATACTL_RADLTY_MASK) >> UMC_RDATACTL_RADLTY_SHIFT;
417 	latency += (val & UMC_RDATACTL_RAD2LTY_MASK) >>
418 						UMC_RDATACTL_RAD2LTY_SHIFT;
419 	/*
420 	 * UMC works at the half clock rate of the PHY.
421 	 * The LSB of latency is ignored
422 	 */
423 	latency += phy_latency & ~1;
424 
425 	val &= ~(UMC_RDATACTL_RADLTY_MASK | UMC_RDATACTL_RAD2LTY_MASK);
426 	if (latency > 0xf) {
427 		val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
428 		val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
429 	} else {
430 		val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
431 	}
432 
433 	writel(val, dc_base + UMC_RDATACTL_D0);
434 	writel(val, dc_base + UMC_RDATACTL_D1);
435 
436 	readl(dc_base + UMC_RDATACTL_D1); /* relax */
437 }
438 
439 /* enable/disable auto refresh */
440 static void umc_refresh_ctrl(void __iomem *dc_base, int enable)
441 {
442 	u32 tmp;
443 
444 	tmp = readl(dc_base + UMC_SPCSETB);
445 	tmp &= ~UMC_SPCSETB_AREFMD_MASK;
446 
447 	if (enable)
448 		tmp |= UMC_SPCSETB_AREFMD_ARB;
449 	else
450 		tmp |= UMC_SPCSETB_AREFMD_REG;
451 
452 	writel(tmp, dc_base + UMC_SPCSETB);
453 	udelay(1);
454 }
455 
456 static void umc_ud_init(void __iomem *umc_base, int ch)
457 {
458 	writel(0x00000003, umc_base + UMC_BITPERPIXELMODE_D0);
459 
460 	if (ch == 2)
461 		writel(0x00000033, umc_base + UMC_PAIR1DOFF_D0);
462 }
463 
464 static int umc_dc_init(void __iomem *dc_base, enum dram_freq freq,
465 		       unsigned long size, int width, int ch)
466 {
467 	enum dram_size size_e;
468 	int latency;
469 	u32 val;
470 
471 	switch (size) {
472 	case 0:
473 		return 0;
474 	case SZ_256M:
475 		size_e = DRAM_SZ_256M;
476 		break;
477 	case SZ_512M:
478 		size_e = DRAM_SZ_512M;
479 		break;
480 	default:
481 		pr_err("unsupported DRAM size 0x%08lx (per 16bit) for ch%d\n",
482 		       size, ch);
483 		return -EINVAL;
484 	}
485 
486 	writel(umc_cmdctla[freq], dc_base + UMC_CMDCTLA);
487 
488 	writel(ch == 2 ? umc_cmdctlb_ch2[freq] : umc_cmdctlb_ch01[freq],
489 	       dc_base + UMC_CMDCTLB);
490 
491 	writel(umc_spcctla[freq][size_e], dc_base + UMC_SPCCTLA);
492 	writel(umc_spcctlb[freq], dc_base + UMC_SPCCTLB);
493 
494 	val = 0x000e000e;
495 	latency = 12;
496 	/* ES2 inserted one more FF to the logic. */
497 	if (uniphier_get_soc_model() >= 2)
498 		latency += 2;
499 
500 	if (latency > 0xf) {
501 		val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
502 		val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
503 	} else {
504 		val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
505 	}
506 
507 	writel(val, dc_base + UMC_RDATACTL_D0);
508 	if (width >= 32)
509 		writel(val, dc_base + UMC_RDATACTL_D1);
510 
511 	writel(0x04060A02, dc_base + UMC_WDATACTL_D0);
512 	if (width >= 32)
513 		writel(0x04060A02, dc_base + UMC_WDATACTL_D1);
514 	writel(0x04000000, dc_base + UMC_DATASET);
515 	writel(0x00400020, dc_base + UMC_DCCGCTL);
516 	writel(0x00000084, dc_base + UMC_FLOWCTLG);
517 	writel(0x00000000, dc_base + UMC_ACSSETA);
518 
519 	writel(ch == 2 ? umc_flowctla_ch2[freq] : umc_flowctla_ch01[freq],
520 	       dc_base + UMC_FLOWCTLA);
521 
522 	writel(0x00004400, dc_base + UMC_FLOWCTLC);
523 	writel(0x200A0A00, dc_base + UMC_SPCSETB);
524 	writel(0x00000520, dc_base + UMC_DFICUPDCTLA);
525 	writel(0x0000000D, dc_base + UMC_RESPCTL);
526 
527 	if (ch != 2) {
528 		writel(0x00202000, dc_base + UMC_FLOWCTLB);
529 		writel(0xFDBFFFFF, dc_base + UMC_FLOWCTLOB0);
530 		writel(0xFFFFFFFF, dc_base + UMC_FLOWCTLOB1);
531 		writel(0x00080700, dc_base + UMC_BSICMAPSET);
532 	} else {
533 		writel(0x00200000, dc_base + UMC_FLOWCTLB);
534 		writel(0x00000000, dc_base + UMC_BSICMAPSET);
535 	}
536 
537 	writel(0x00000000, dc_base + UMC_ERRMASKA);
538 	writel(0x00000000, dc_base + UMC_ERRMASKB);
539 
540 	return 0;
541 }
542 
543 static int umc_ch_init(void __iomem *umc_ch_base, enum dram_freq freq,
544 		       unsigned long size, unsigned int width, int ch)
545 {
546 	void __iomem *dc_base = umc_ch_base + 0x00011000;
547 	void __iomem *phy_base = umc_ch_base + 0x00030000;
548 	int ret;
549 
550 	writel(0x00000002, dc_base + UMC_INITSET);
551 	while (readl(dc_base + UMC_INITSTAT) & BIT(2))
552 		cpu_relax();
553 
554 	/* deassert PHY reset signals */
555 	writel(UMC_DIOCTLA_CTL_NRST | UMC_DIOCTLA_CFG_NRST,
556 	       dc_base + UMC_DIOCTLA);
557 
558 	ddrphy_init(phy_base, freq, width, ch);
559 
560 	ret = ddrphy_impedance_calibration(phy_base);
561 	if (ret)
562 		return ret;
563 
564 	ddrphy_dram_init(phy_base);
565 	if (ret)
566 		return ret;
567 
568 	ret = umc_dc_init(dc_base, freq, size, width, ch);
569 	if (ret)
570 		return ret;
571 
572 	umc_ud_init(umc_ch_base, ch);
573 
574 	ret = ddrphy_training(phy_base);
575 	if (ret)
576 		return ret;
577 
578 	udelay(1);
579 
580 	/* match the system latency between UMC and PHY */
581 	umc_set_system_latency(dc_base,
582 			       ddrphy_get_system_latency(phy_base, width));
583 
584 	udelay(1);
585 
586 	/* stop auto refresh before clearing FIFO in PHY */
587 	umc_refresh_ctrl(dc_base, 0);
588 	ddrphy_fifo_reset(phy_base);
589 	umc_refresh_ctrl(dc_base, 1);
590 
591 	udelay(10);
592 
593 	return 0;
594 }
595 
596 static void um_init(void __iomem *um_base)
597 {
598 	writel(0x000000ff, um_base + UMC_MBUS0);
599 	writel(0x000000ff, um_base + UMC_MBUS1);
600 	writel(0x000000ff, um_base + UMC_MBUS2);
601 	writel(0x000000ff, um_base + UMC_MBUS3);
602 }
603 
604 int uniphier_pxs2_umc_init(const struct uniphier_board_data *bd)
605 {
606 	void __iomem *um_base = (void __iomem *)0x5b600000;
607 	void __iomem *umc_ch_base = (void __iomem *)0x5b800000;
608 	enum dram_freq freq;
609 	int ch, ret;
610 
611 	switch (bd->dram_freq) {
612 	case 1866:
613 		freq = DRAM_FREQ_1866M;
614 		break;
615 	case 2133:
616 		freq = DRAM_FREQ_2133M;
617 		break;
618 	default:
619 		pr_err("unsupported DRAM frequency %d MHz\n", bd->dram_freq);
620 		return -EINVAL;
621 	}
622 
623 	for (ch = 0; ch < DRAM_CH_NR; ch++) {
624 		unsigned long size = bd->dram_ch[ch].size;
625 		unsigned int width = bd->dram_ch[ch].width;
626 
627 		if (size) {
628 			ret = umc_ch_init(umc_ch_base, freq,
629 					  size / (width / 16), width, ch);
630 			if (ret) {
631 				pr_err("failed to initialize UMC ch%d\n", ch);
632 				return ret;
633 			}
634 		}
635 
636 		umc_ch_base += 0x00200000;
637 	}
638 
639 	um_init(um_base);
640 
641 	return 0;
642 }
643