1 /* 2 * Copyright (C) 2015-2017 Socionext Inc. 3 * Author: Masahiro Yamada <yamada.masahiro@socionext.com> 4 * 5 * based on commit 21b6e480f92ccc38fe0502e3116411d6509d3bf2 of Diag by: 6 * Copyright (C) 2015 Socionext Inc. 7 * 8 * SPDX-License-Identifier: GPL-2.0+ 9 */ 10 11 #include <common.h> 12 #include <linux/errno.h> 13 #include <linux/io.h> 14 #include <linux/sizes.h> 15 #include <asm/processor.h> 16 17 #include "../init.h" 18 #include "../soc-info.h" 19 #include "ddrmphy-regs.h" 20 #include "umc-regs.h" 21 22 #define DRAM_CH_NR 3 23 24 enum dram_freq { 25 DRAM_FREQ_1866M, 26 DRAM_FREQ_2133M, 27 DRAM_FREQ_NR, 28 }; 29 30 enum dram_size { 31 DRAM_SZ_256M, 32 DRAM_SZ_512M, 33 DRAM_SZ_NR, 34 }; 35 36 /* PHY */ 37 static u32 ddrphy_pgcr2[DRAM_FREQ_NR] = {0x00FC7E5D, 0x00FC90AB}; 38 static u32 ddrphy_ptr0[DRAM_FREQ_NR] = {0x0EA09205, 0x10C0A6C6}; 39 static u32 ddrphy_ptr1[DRAM_FREQ_NR] = {0x0DAC041B, 0x0FA104B1}; 40 static u32 ddrphy_ptr3[DRAM_FREQ_NR] = {0x15171e45, 0x18182357}; 41 static u32 ddrphy_ptr4[DRAM_FREQ_NR] = {0x0e9ad8e9, 0x10b34157}; 42 static u32 ddrphy_dtpr0[DRAM_FREQ_NR] = {0x35a00d88, 0x39e40e88}; 43 static u32 ddrphy_dtpr1[DRAM_FREQ_NR] = {0x2288cc2c, 0x228a04d0}; 44 static u32 ddrphy_dtpr2[DRAM_FREQ_NR] = {0x50005e00, 0x50006a00}; 45 static u32 ddrphy_dtpr3[DRAM_FREQ_NR] = {0x0010cb49, 0x0010ec89}; 46 static u32 ddrphy_mr0[DRAM_FREQ_NR] = {0x00000115, 0x00000125}; 47 static u32 ddrphy_mr2[DRAM_FREQ_NR] = {0x000002a0, 0x000002a8}; 48 49 /* dependent on package and board design */ 50 static u32 ddrphy_acbdlr0[DRAM_CH_NR] = {0x0000000c, 0x0000000c, 0x00000009}; 51 52 /* DDR multiPHY */ 53 static inline int ddrphy_get_rank(int dx) 54 { 55 return dx / 2; 56 } 57 58 static void ddrphy_fifo_reset(void __iomem *phy_base) 59 { 60 u32 tmp; 61 62 tmp = readl(phy_base + MPHY_PGCR0); 63 tmp &= ~MPHY_PGCR0_PHYFRST; 64 writel(tmp, phy_base + MPHY_PGCR0); 65 66 udelay(1); 67 68 tmp |= MPHY_PGCR0_PHYFRST; 69 writel(tmp, phy_base + MPHY_PGCR0); 70 71 udelay(1); 72 } 73 74 static void ddrphy_vt_ctrl(void __iomem *phy_base, int enable) 75 { 76 u32 tmp; 77 78 tmp = readl(phy_base + MPHY_PGCR1); 79 80 if (enable) 81 tmp &= ~MPHY_PGCR1_INHVT; 82 else 83 tmp |= MPHY_PGCR1_INHVT; 84 85 writel(tmp, phy_base + MPHY_PGCR1); 86 87 if (!enable) { 88 while (!(readl(phy_base + MPHY_PGSR1) & MPHY_PGSR1_VTSTOP)) 89 cpu_relax(); 90 } 91 } 92 93 static void ddrphy_dqs_delay_fixup(void __iomem *phy_base, int nr_dx, int step) 94 { 95 int dx; 96 u32 lcdlr1, rdqsd; 97 void __iomem *dx_base = phy_base + MPHY_DX_BASE; 98 99 ddrphy_vt_ctrl(phy_base, 0); 100 101 for (dx = 0; dx < nr_dx; dx++) { 102 lcdlr1 = readl(dx_base + MPHY_DX_LCDLR1); 103 rdqsd = (lcdlr1 >> 8) & 0xff; 104 rdqsd = clamp(rdqsd + step, 0U, 0xffU); 105 lcdlr1 = (lcdlr1 & ~(0xff << 8)) | (rdqsd << 8); 106 writel(lcdlr1, dx_base + MPHY_DX_LCDLR1); 107 readl(dx_base + MPHY_DX_LCDLR1); /* relax */ 108 dx_base += MPHY_DX_STRIDE; 109 } 110 111 ddrphy_vt_ctrl(phy_base, 1); 112 } 113 114 static int ddrphy_get_system_latency(void __iomem *phy_base, int width) 115 { 116 void __iomem *dx_base = phy_base + MPHY_DX_BASE; 117 const int nr_dx = width / 8; 118 int dx, rank; 119 u32 gtr; 120 int dgsl, dgsl_min = INT_MAX, dgsl_max = 0; 121 122 for (dx = 0; dx < nr_dx; dx++) { 123 gtr = readl(dx_base + MPHY_DX_GTR); 124 for (rank = 0; rank < 4; rank++) { 125 dgsl = gtr & 0x7; 126 /* if dgsl is zero, this rank was not trained. skip. */ 127 if (dgsl) { 128 dgsl_min = min(dgsl_min, dgsl); 129 dgsl_max = max(dgsl_max, dgsl); 130 } 131 gtr >>= 3; 132 } 133 dx_base += MPHY_DX_STRIDE; 134 } 135 136 if (dgsl_min != dgsl_max) 137 printf("DQS Gateing System Latencies are not all leveled.\n"); 138 139 return dgsl_max; 140 } 141 142 static void ddrphy_init(void __iomem *phy_base, enum dram_freq freq, int width, 143 int ch) 144 { 145 u32 tmp; 146 void __iomem *zq_base, *dx_base; 147 int zq, dx; 148 int nr_dx; 149 150 nr_dx = width / 8; 151 152 writel(MPHY_PIR_ZCALBYP, phy_base + MPHY_PIR); 153 /* 154 * Disable RGLVT bit (Read DQS Gating LCDL Delay VT Compensation) 155 * to avoid read error issue. 156 */ 157 writel(0x07d81e37, phy_base + MPHY_PGCR0); 158 writel(0x0200c4e0, phy_base + MPHY_PGCR1); 159 160 tmp = ddrphy_pgcr2[freq]; 161 if (width >= 32) 162 tmp |= MPHY_PGCR2_DUALCHN | MPHY_PGCR2_ACPDDC; 163 writel(tmp, phy_base + MPHY_PGCR2); 164 165 writel(ddrphy_ptr0[freq], phy_base + MPHY_PTR0); 166 writel(ddrphy_ptr1[freq], phy_base + MPHY_PTR1); 167 writel(0x00083def, phy_base + MPHY_PTR2); 168 writel(ddrphy_ptr3[freq], phy_base + MPHY_PTR3); 169 writel(ddrphy_ptr4[freq], phy_base + MPHY_PTR4); 170 171 writel(ddrphy_acbdlr0[ch], phy_base + MPHY_ACBDLR0); 172 173 writel(0x55555555, phy_base + MPHY_ACIOCR1); 174 writel(0x00000000, phy_base + MPHY_ACIOCR2); 175 writel(0x55555555, phy_base + MPHY_ACIOCR3); 176 writel(0x00000000, phy_base + MPHY_ACIOCR4); 177 writel(0x00000055, phy_base + MPHY_ACIOCR5); 178 writel(0x00181aa4, phy_base + MPHY_DXCCR); 179 180 writel(0x0024641e, phy_base + MPHY_DSGCR); 181 writel(0x0000040b, phy_base + MPHY_DCR); 182 writel(ddrphy_dtpr0[freq], phy_base + MPHY_DTPR0); 183 writel(ddrphy_dtpr1[freq], phy_base + MPHY_DTPR1); 184 writel(ddrphy_dtpr2[freq], phy_base + MPHY_DTPR2); 185 writel(ddrphy_dtpr3[freq], phy_base + MPHY_DTPR3); 186 writel(ddrphy_mr0[freq], phy_base + MPHY_MR0); 187 writel(0x00000006, phy_base + MPHY_MR1); 188 writel(ddrphy_mr2[freq], phy_base + MPHY_MR2); 189 writel(0x00000000, phy_base + MPHY_MR3); 190 191 tmp = 0; 192 for (dx = 0; dx < nr_dx; dx++) 193 tmp |= BIT(MPHY_DTCR_RANKEN_SHIFT + ddrphy_get_rank(dx)); 194 writel(0x90003087 | tmp, phy_base + MPHY_DTCR); 195 196 writel(0x00000000, phy_base + MPHY_DTAR0); 197 writel(0x00000008, phy_base + MPHY_DTAR1); 198 writel(0x00000010, phy_base + MPHY_DTAR2); 199 writel(0x00000018, phy_base + MPHY_DTAR3); 200 writel(0xdd22ee11, phy_base + MPHY_DTDR0); 201 writel(0x7788bb44, phy_base + MPHY_DTDR1); 202 203 /* impedance control settings */ 204 writel(0x04048900, phy_base + MPHY_ZQCR); 205 206 zq_base = phy_base + MPHY_ZQ_BASE; 207 for (zq = 0; zq < 4; zq++) { 208 /* 209 * board-dependent 210 * PXS2: CH0ZQ0=0x5B, CH1ZQ0=0x5B, CH2ZQ0=0x59, others=0x5D 211 */ 212 writel(0x0007BB5D, zq_base + MPHY_ZQ_PR); 213 zq_base += MPHY_ZQ_STRIDE; 214 } 215 216 /* DATX8 settings */ 217 dx_base = phy_base + MPHY_DX_BASE; 218 for (dx = 0; dx < 4; dx++) { 219 tmp = readl(dx_base + MPHY_DX_GCR0); 220 tmp &= ~MPHY_DX_GCR0_WLRKEN_MASK; 221 tmp |= BIT(MPHY_DX_GCR0_WLRKEN_SHIFT + ddrphy_get_rank(dx)) & 222 MPHY_DX_GCR0_WLRKEN_MASK; 223 writel(tmp, dx_base + MPHY_DX_GCR0); 224 225 writel(0x00000000, dx_base + MPHY_DX_GCR1); 226 writel(0x00000000, dx_base + MPHY_DX_GCR2); 227 writel(0x00000000, dx_base + MPHY_DX_GCR3); 228 dx_base += MPHY_DX_STRIDE; 229 } 230 231 while (!(readl(phy_base + MPHY_PGSR0) & MPHY_PGSR0_IDONE)) 232 cpu_relax(); 233 234 ddrphy_dqs_delay_fixup(phy_base, nr_dx, -4); 235 } 236 237 struct ddrphy_init_sequence { 238 char *description; 239 u32 init_flag; 240 u32 done_flag; 241 u32 err_flag; 242 }; 243 244 static const struct ddrphy_init_sequence impedance_calibration_sequence[] = { 245 { 246 "Impedance Calibration", 247 MPHY_PIR_ZCAL, 248 MPHY_PGSR0_ZCDONE, 249 MPHY_PGSR0_ZCERR, 250 }, 251 { /* sentinel */ } 252 }; 253 254 static const struct ddrphy_init_sequence dram_init_sequence[] = { 255 { 256 "DRAM Initialization", 257 MPHY_PIR_DRAMRST | MPHY_PIR_DRAMINIT, 258 MPHY_PGSR0_DIDONE, 259 0, 260 }, 261 { /* sentinel */ } 262 }; 263 264 static const struct ddrphy_init_sequence training_sequence[] = { 265 { 266 "Write Leveling", 267 MPHY_PIR_WL, 268 MPHY_PGSR0_WLDONE, 269 MPHY_PGSR0_WLERR, 270 }, 271 { 272 "Read DQS Gate Training", 273 MPHY_PIR_QSGATE, 274 MPHY_PGSR0_QSGDONE, 275 MPHY_PGSR0_QSGERR, 276 }, 277 { 278 "Write Leveling Adjustment", 279 MPHY_PIR_WLADJ, 280 MPHY_PGSR0_WLADONE, 281 MPHY_PGSR0_WLAERR, 282 }, 283 { 284 "Read Bit Deskew", 285 MPHY_PIR_RDDSKW, 286 MPHY_PGSR0_RDDONE, 287 MPHY_PGSR0_RDERR, 288 }, 289 { 290 "Write Bit Deskew", 291 MPHY_PIR_WRDSKW, 292 MPHY_PGSR0_WDDONE, 293 MPHY_PGSR0_WDERR, 294 }, 295 { 296 "Read Eye Training", 297 MPHY_PIR_RDEYE, 298 MPHY_PGSR0_REDONE, 299 MPHY_PGSR0_REERR, 300 }, 301 { 302 "Write Eye Training", 303 MPHY_PIR_WREYE, 304 MPHY_PGSR0_WEDONE, 305 MPHY_PGSR0_WEERR, 306 }, 307 { /* sentinel */ } 308 }; 309 310 static int __ddrphy_training(void __iomem *phy_base, 311 const struct ddrphy_init_sequence *seq) 312 { 313 const struct ddrphy_init_sequence *s; 314 u32 pgsr0; 315 u32 init_flag = MPHY_PIR_INIT; 316 u32 done_flag = MPHY_PGSR0_IDONE; 317 int timeout = 50000; /* 50 msec is long enough */ 318 #ifdef DISPLAY_ELAPSED_TIME 319 ulong start = get_timer(0); 320 #endif 321 322 for (s = seq; s->description; s++) { 323 init_flag |= s->init_flag; 324 done_flag |= s->done_flag; 325 } 326 327 writel(init_flag, phy_base + MPHY_PIR); 328 329 do { 330 if (--timeout < 0) { 331 pr_err("%s: error: timeout during DDR training\n", 332 __func__); 333 return -ETIMEDOUT; 334 } 335 udelay(1); 336 pgsr0 = readl(phy_base + MPHY_PGSR0); 337 } while ((pgsr0 & done_flag) != done_flag); 338 339 for (s = seq; s->description; s++) { 340 if (pgsr0 & s->err_flag) { 341 pr_err("%s: error: %s failed\n", __func__, 342 s->description); 343 return -EIO; 344 } 345 } 346 347 #ifdef DISPLAY_ELAPSED_TIME 348 printf("%s: info: elapsed time %ld msec\n", get_timer(start)); 349 #endif 350 351 return 0; 352 } 353 354 static int ddrphy_impedance_calibration(void __iomem *phy_base) 355 { 356 int ret; 357 u32 tmp; 358 359 ret = __ddrphy_training(phy_base, impedance_calibration_sequence); 360 if (ret) 361 return ret; 362 363 /* 364 * Because of a hardware bug, IDONE flag is set when the first ZQ block 365 * is calibrated. The flag does not guarantee the completion for all 366 * the ZQ blocks. Wait a little more just in case. 367 */ 368 udelay(1); 369 370 /* reflect ZQ settings and enable average algorithm*/ 371 tmp = readl(phy_base + MPHY_ZQCR); 372 tmp |= MPHY_ZQCR_FORCE_ZCAL_VT_UPDATE; 373 writel(tmp, phy_base + MPHY_ZQCR); 374 tmp &= ~MPHY_ZQCR_FORCE_ZCAL_VT_UPDATE; 375 tmp |= MPHY_ZQCR_AVGEN; 376 writel(tmp, phy_base + MPHY_ZQCR); 377 378 return 0; 379 } 380 381 static int ddrphy_dram_init(void __iomem *phy_base) 382 { 383 return __ddrphy_training(phy_base, dram_init_sequence); 384 } 385 386 static int ddrphy_training(void __iomem *phy_base) 387 { 388 return __ddrphy_training(phy_base, training_sequence); 389 } 390 391 /* UMC */ 392 static u32 umc_cmdctla[DRAM_FREQ_NR] = {0x66DD131D, 0x77EE1722}; 393 /* 394 * The ch2 is a different generation UMC core. 395 * The register spec is different, unfortunately. 396 */ 397 static u32 umc_cmdctlb_ch01[DRAM_FREQ_NR] = {0x13E87C44, 0x18F88C44}; 398 static u32 umc_cmdctlb_ch2[DRAM_FREQ_NR] = {0x19E8DC44, 0x1EF8EC44}; 399 static u32 umc_spcctla[DRAM_FREQ_NR][DRAM_SZ_NR] = { 400 {0x004A071D, 0x0078071D}, 401 {0x0055081E, 0x0089081E}, 402 }; 403 404 static u32 umc_spcctlb[] = {0x00FF000A, 0x00FF000B}; 405 /* The ch2 is different for some reason only hardware guys know... */ 406 static u32 umc_flowctla_ch01[] = {0x0800001E, 0x08000022}; 407 static u32 umc_flowctla_ch2[] = {0x0800001E, 0x0800001E}; 408 409 static void umc_set_system_latency(void __iomem *dc_base, int phy_latency) 410 { 411 u32 val; 412 int latency; 413 414 val = readl(dc_base + UMC_RDATACTL_D0); 415 latency = (val & UMC_RDATACTL_RADLTY_MASK) >> UMC_RDATACTL_RADLTY_SHIFT; 416 latency += (val & UMC_RDATACTL_RAD2LTY_MASK) >> 417 UMC_RDATACTL_RAD2LTY_SHIFT; 418 /* 419 * UMC works at the half clock rate of the PHY. 420 * The LSB of latency is ignored 421 */ 422 latency += phy_latency & ~1; 423 424 val &= ~(UMC_RDATACTL_RADLTY_MASK | UMC_RDATACTL_RAD2LTY_MASK); 425 if (latency > 0xf) { 426 val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT; 427 val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT; 428 } else { 429 val |= latency << UMC_RDATACTL_RADLTY_SHIFT; 430 } 431 432 writel(val, dc_base + UMC_RDATACTL_D0); 433 writel(val, dc_base + UMC_RDATACTL_D1); 434 435 readl(dc_base + UMC_RDATACTL_D1); /* relax */ 436 } 437 438 /* enable/disable auto refresh */ 439 static void umc_refresh_ctrl(void __iomem *dc_base, int enable) 440 { 441 u32 tmp; 442 443 tmp = readl(dc_base + UMC_SPCSETB); 444 tmp &= ~UMC_SPCSETB_AREFMD_MASK; 445 446 if (enable) 447 tmp |= UMC_SPCSETB_AREFMD_ARB; 448 else 449 tmp |= UMC_SPCSETB_AREFMD_REG; 450 451 writel(tmp, dc_base + UMC_SPCSETB); 452 udelay(1); 453 } 454 455 static void umc_ud_init(void __iomem *umc_base, int ch) 456 { 457 writel(0x00000003, umc_base + UMC_BITPERPIXELMODE_D0); 458 459 if (ch == 2) 460 writel(0x00000033, umc_base + UMC_PAIR1DOFF_D0); 461 } 462 463 static int umc_dc_init(void __iomem *dc_base, enum dram_freq freq, 464 unsigned long size, int width, int ch) 465 { 466 enum dram_size size_e; 467 int latency; 468 u32 val; 469 470 switch (size) { 471 case 0: 472 return 0; 473 case SZ_256M: 474 size_e = DRAM_SZ_256M; 475 break; 476 case SZ_512M: 477 size_e = DRAM_SZ_512M; 478 break; 479 default: 480 pr_err("unsupported DRAM size 0x%08lx (per 16bit) for ch%d\n", 481 size, ch); 482 return -EINVAL; 483 } 484 485 writel(umc_cmdctla[freq], dc_base + UMC_CMDCTLA); 486 487 writel(ch == 2 ? umc_cmdctlb_ch2[freq] : umc_cmdctlb_ch01[freq], 488 dc_base + UMC_CMDCTLB); 489 490 writel(umc_spcctla[freq][size_e], dc_base + UMC_SPCCTLA); 491 writel(umc_spcctlb[freq], dc_base + UMC_SPCCTLB); 492 493 val = 0x000e000e; 494 latency = 12; 495 /* ES2 inserted one more FF to the logic. */ 496 if (uniphier_get_soc_model() >= 2) 497 latency += 2; 498 499 if (latency > 0xf) { 500 val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT; 501 val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT; 502 } else { 503 val |= latency << UMC_RDATACTL_RADLTY_SHIFT; 504 } 505 506 writel(val, dc_base + UMC_RDATACTL_D0); 507 if (width >= 32) 508 writel(val, dc_base + UMC_RDATACTL_D1); 509 510 writel(0x04060A02, dc_base + UMC_WDATACTL_D0); 511 if (width >= 32) 512 writel(0x04060A02, dc_base + UMC_WDATACTL_D1); 513 writel(0x04000000, dc_base + UMC_DATASET); 514 writel(0x00400020, dc_base + UMC_DCCGCTL); 515 writel(0x00000084, dc_base + UMC_FLOWCTLG); 516 writel(0x00000000, dc_base + UMC_ACSSETA); 517 518 writel(ch == 2 ? umc_flowctla_ch2[freq] : umc_flowctla_ch01[freq], 519 dc_base + UMC_FLOWCTLA); 520 521 writel(0x00004400, dc_base + UMC_FLOWCTLC); 522 writel(0x200A0A00, dc_base + UMC_SPCSETB); 523 writel(0x00000520, dc_base + UMC_DFICUPDCTLA); 524 writel(0x0000000D, dc_base + UMC_RESPCTL); 525 526 if (ch != 2) { 527 writel(0x00202000, dc_base + UMC_FLOWCTLB); 528 writel(0xFDBFFFFF, dc_base + UMC_FLOWCTLOB0); 529 writel(0xFFFFFFFF, dc_base + UMC_FLOWCTLOB1); 530 writel(0x00080700, dc_base + UMC_BSICMAPSET); 531 } else { 532 writel(0x00200000, dc_base + UMC_FLOWCTLB); 533 writel(0x00000000, dc_base + UMC_BSICMAPSET); 534 } 535 536 writel(0x00000000, dc_base + UMC_ERRMASKA); 537 writel(0x00000000, dc_base + UMC_ERRMASKB); 538 539 return 0; 540 } 541 542 static int umc_ch_init(void __iomem *umc_ch_base, enum dram_freq freq, 543 unsigned long size, unsigned int width, int ch) 544 { 545 void __iomem *dc_base = umc_ch_base + 0x00011000; 546 void __iomem *phy_base = umc_ch_base + 0x00030000; 547 int ret; 548 549 writel(0x00000002, dc_base + UMC_INITSET); 550 while (readl(dc_base + UMC_INITSTAT) & BIT(2)) 551 cpu_relax(); 552 553 /* deassert PHY reset signals */ 554 writel(UMC_DIOCTLA_CTL_NRST | UMC_DIOCTLA_CFG_NRST, 555 dc_base + UMC_DIOCTLA); 556 557 ddrphy_init(phy_base, freq, width, ch); 558 559 ret = ddrphy_impedance_calibration(phy_base); 560 if (ret) 561 return ret; 562 563 ddrphy_dram_init(phy_base); 564 if (ret) 565 return ret; 566 567 ret = umc_dc_init(dc_base, freq, size, width, ch); 568 if (ret) 569 return ret; 570 571 umc_ud_init(umc_ch_base, ch); 572 573 ret = ddrphy_training(phy_base); 574 if (ret) 575 return ret; 576 577 udelay(1); 578 579 /* match the system latency between UMC and PHY */ 580 umc_set_system_latency(dc_base, 581 ddrphy_get_system_latency(phy_base, width)); 582 583 udelay(1); 584 585 /* stop auto refresh before clearing FIFO in PHY */ 586 umc_refresh_ctrl(dc_base, 0); 587 ddrphy_fifo_reset(phy_base); 588 umc_refresh_ctrl(dc_base, 1); 589 590 udelay(10); 591 592 return 0; 593 } 594 595 static void um_init(void __iomem *um_base) 596 { 597 writel(0x000000ff, um_base + UMC_MBUS0); 598 writel(0x000000ff, um_base + UMC_MBUS1); 599 writel(0x000000ff, um_base + UMC_MBUS2); 600 writel(0x000000ff, um_base + UMC_MBUS3); 601 } 602 603 int uniphier_pxs2_umc_init(const struct uniphier_board_data *bd) 604 { 605 void __iomem *um_base = (void __iomem *)0x5b600000; 606 void __iomem *umc_ch_base = (void __iomem *)0x5b800000; 607 enum dram_freq freq; 608 int ch, ret; 609 610 switch (bd->dram_freq) { 611 case 1866: 612 freq = DRAM_FREQ_1866M; 613 break; 614 case 2133: 615 freq = DRAM_FREQ_2133M; 616 break; 617 default: 618 pr_err("unsupported DRAM frequency %d MHz\n", bd->dram_freq); 619 return -EINVAL; 620 } 621 622 for (ch = 0; ch < DRAM_CH_NR; ch++) { 623 unsigned long size = bd->dram_ch[ch].size; 624 unsigned int width = bd->dram_ch[ch].width; 625 626 if (size) { 627 ret = umc_ch_init(umc_ch_base, freq, 628 size / (width / 16), width, ch); 629 if (ret) { 630 pr_err("failed to initialize UMC ch%d\n", ch); 631 return ret; 632 } 633 } 634 635 umc_ch_base += 0x00200000; 636 } 637 638 um_init(um_base); 639 640 return 0; 641 } 642