1 /* 2 * Copyright (C) 2014 Gateworks Corporation 3 * Author: Tim Harvey <tharvey@gateworks.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <linux/types.h> 10 #include <asm/arch/clock.h> 11 #include <asm/arch/mx6-ddr.h> 12 #include <asm/arch/sys_proto.h> 13 #include <asm/io.h> 14 #include <asm/types.h> 15 #include <wait_bit.h> 16 17 #if defined(CONFIG_MX6_DDRCAL) 18 static void reset_read_data_fifos(void) 19 { 20 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 21 22 /* Reset data FIFOs twice. */ 23 setbits_le32(&mmdc0->mpdgctrl0, 1 << 31); 24 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0); 25 26 setbits_le32(&mmdc0->mpdgctrl0, 1 << 31); 27 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0); 28 } 29 30 static void precharge_all(const bool cs0_enable, const bool cs1_enable) 31 { 32 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 33 34 /* 35 * Issue the Precharge-All command to the DDR device for both 36 * chip selects. Note, CON_REQ bit should also remain set. If 37 * only using one chip select, then precharge only the desired 38 * chip select. 39 */ 40 if (cs0_enable) { /* CS0 */ 41 writel(0x04008050, &mmdc0->mdscr); 42 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0); 43 } 44 45 if (cs1_enable) { /* CS1 */ 46 writel(0x04008058, &mmdc0->mdscr); 47 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0); 48 } 49 } 50 51 static void force_delay_measurement(int bus_size) 52 { 53 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 54 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR; 55 56 writel(0x800, &mmdc0->mpmur0); 57 if (bus_size == 0x2) 58 writel(0x800, &mmdc1->mpmur0); 59 } 60 61 static void modify_dg_result(u32 *reg_st0, u32 *reg_st1, u32 *reg_ctrl) 62 { 63 u32 dg_tmp_val, dg_dl_abs_offset, dg_hc_del, val_ctrl; 64 65 /* 66 * DQS gating absolute offset should be modified from reflecting 67 * (HW_DG_LOWx + HW_DG_UPx)/2 to reflecting (HW_DG_UPx - 0x80) 68 */ 69 70 val_ctrl = readl(reg_ctrl); 71 val_ctrl &= 0xf0000000; 72 73 dg_tmp_val = ((readl(reg_st0) & 0x07ff0000) >> 16) - 0xc0; 74 dg_dl_abs_offset = dg_tmp_val & 0x7f; 75 dg_hc_del = (dg_tmp_val & 0x780) << 1; 76 77 val_ctrl |= dg_dl_abs_offset + dg_hc_del; 78 79 dg_tmp_val = ((readl(reg_st1) & 0x07ff0000) >> 16) - 0xc0; 80 dg_dl_abs_offset = dg_tmp_val & 0x7f; 81 dg_hc_del = (dg_tmp_val & 0x780) << 1; 82 83 val_ctrl |= (dg_dl_abs_offset + dg_hc_del) << 16; 84 85 writel(val_ctrl, reg_ctrl); 86 } 87 88 static void correct_mpwldectr_result(void *reg) 89 { 90 /* Limit is 200/256 of CK, which is WL_HC_DELx | 0x48. */ 91 const unsigned int limit = 0x148; 92 u32 val = readl(reg); 93 u32 old = val; 94 95 if ((val & 0x17f) > limit) 96 val &= 0xffff << 16; 97 98 if (((val >> 16) & 0x17f) > limit) 99 val &= 0xffff; 100 101 if (old != val) 102 writel(val, reg); 103 } 104 105 int mmdc_do_write_level_calibration(struct mx6_ddr_sysinfo const *sysinfo) 106 { 107 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 108 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR; 109 u32 esdmisc_val, zq_val; 110 u32 errors = 0; 111 u32 ldectrl[4] = {0}; 112 u32 ddr_mr1 = 0x4; 113 u32 rwalat_max; 114 115 /* 116 * Stash old values in case calibration fails, 117 * we need to restore them 118 */ 119 ldectrl[0] = readl(&mmdc0->mpwldectrl0); 120 ldectrl[1] = readl(&mmdc0->mpwldectrl1); 121 if (sysinfo->dsize == 2) { 122 ldectrl[2] = readl(&mmdc1->mpwldectrl0); 123 ldectrl[3] = readl(&mmdc1->mpwldectrl1); 124 } 125 126 /* disable DDR logic power down timer */ 127 clrbits_le32(&mmdc0->mdpdc, 0xff00); 128 129 /* disable Adopt power down timer */ 130 setbits_le32(&mmdc0->mapsr, 0x1); 131 132 debug("Starting write leveling calibration.\n"); 133 134 /* 135 * 2. disable auto refresh and ZQ calibration 136 * before proceeding with Write Leveling calibration 137 */ 138 esdmisc_val = readl(&mmdc0->mdref); 139 writel(0x0000C000, &mmdc0->mdref); 140 zq_val = readl(&mmdc0->mpzqhwctrl); 141 writel(zq_val & ~0x3, &mmdc0->mpzqhwctrl); 142 143 /* 3. increase walat and ralat to maximum */ 144 rwalat_max = (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17); 145 setbits_le32(&mmdc0->mdmisc, rwalat_max); 146 if (sysinfo->dsize == 2) 147 setbits_le32(&mmdc1->mdmisc, rwalat_max); 148 /* 149 * 4 & 5. Configure the external DDR device to enter write-leveling 150 * mode through Load Mode Register command. 151 * Register setting: 152 * Bits[31:16] MR1 value (0x0080 write leveling enable) 153 * Bit[9] set WL_EN to enable MMDC DQS output 154 * Bits[6:4] set CMD bits for Load Mode Register programming 155 * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming 156 */ 157 writel(0x00808231, &mmdc0->mdscr); 158 159 /* 6. Activate automatic calibration by setting MPWLGCR[HW_WL_EN] */ 160 writel(0x00000001, &mmdc0->mpwlgcr); 161 162 /* 163 * 7. Upon completion of this process the MMDC de-asserts 164 * the MPWLGCR[HW_WL_EN] 165 */ 166 wait_for_bit_le32(&mmdc0->mpwlgcr, 1 << 0, 0, 100, 0); 167 168 /* 169 * 8. check for any errors: check both PHYs for x64 configuration, 170 * if x32, check only PHY0 171 */ 172 if (readl(&mmdc0->mpwlgcr) & 0x00000F00) 173 errors |= 1; 174 if (sysinfo->dsize == 2) 175 if (readl(&mmdc1->mpwlgcr) & 0x00000F00) 176 errors |= 2; 177 178 debug("Ending write leveling calibration. Error mask: 0x%x\n", errors); 179 180 /* check to see if cal failed */ 181 if ((readl(&mmdc0->mpwldectrl0) == 0x001F001F) && 182 (readl(&mmdc0->mpwldectrl1) == 0x001F001F) && 183 ((sysinfo->dsize < 2) || 184 ((readl(&mmdc1->mpwldectrl0) == 0x001F001F) && 185 (readl(&mmdc1->mpwldectrl1) == 0x001F001F)))) { 186 debug("Cal seems to have soft-failed due to memory not supporting write leveling on all channels. Restoring original write leveling values.\n"); 187 writel(ldectrl[0], &mmdc0->mpwldectrl0); 188 writel(ldectrl[1], &mmdc0->mpwldectrl1); 189 if (sysinfo->dsize == 2) { 190 writel(ldectrl[2], &mmdc1->mpwldectrl0); 191 writel(ldectrl[3], &mmdc1->mpwldectrl1); 192 } 193 errors |= 4; 194 } 195 196 correct_mpwldectr_result(&mmdc0->mpwldectrl0); 197 correct_mpwldectr_result(&mmdc0->mpwldectrl1); 198 if (sysinfo->dsize == 2) { 199 correct_mpwldectr_result(&mmdc1->mpwldectrl0); 200 correct_mpwldectr_result(&mmdc1->mpwldectrl1); 201 } 202 203 /* 204 * User should issue MRS command to exit write leveling mode 205 * through Load Mode Register command 206 * Register setting: 207 * Bits[31:16] MR1 value "ddr_mr1" value from initialization 208 * Bit[9] clear WL_EN to disable MMDC DQS output 209 * Bits[6:4] set CMD bits for Load Mode Register programming 210 * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming 211 */ 212 writel((ddr_mr1 << 16) + 0x8031, &mmdc0->mdscr); 213 214 /* re-enable auto refresh and zq cal */ 215 writel(esdmisc_val, &mmdc0->mdref); 216 writel(zq_val, &mmdc0->mpzqhwctrl); 217 218 debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08X\n", 219 readl(&mmdc0->mpwldectrl0)); 220 debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08X\n", 221 readl(&mmdc0->mpwldectrl1)); 222 if (sysinfo->dsize == 2) { 223 debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08X\n", 224 readl(&mmdc1->mpwldectrl0)); 225 debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08X\n", 226 readl(&mmdc1->mpwldectrl1)); 227 } 228 229 /* We must force a readback of these values, to get them to stick */ 230 readl(&mmdc0->mpwldectrl0); 231 readl(&mmdc0->mpwldectrl1); 232 if (sysinfo->dsize == 2) { 233 readl(&mmdc1->mpwldectrl0); 234 readl(&mmdc1->mpwldectrl1); 235 } 236 237 /* enable DDR logic power down timer: */ 238 setbits_le32(&mmdc0->mdpdc, 0x00005500); 239 240 /* Enable Adopt power down timer: */ 241 clrbits_le32(&mmdc0->mapsr, 0x1); 242 243 /* Clear CON_REQ */ 244 writel(0, &mmdc0->mdscr); 245 246 return errors; 247 } 248 249 int mmdc_do_dqs_calibration(struct mx6_ddr_sysinfo const *sysinfo) 250 { 251 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 252 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR; 253 struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux = 254 (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE; 255 bool cs0_enable; 256 bool cs1_enable; 257 bool cs0_enable_initial; 258 bool cs1_enable_initial; 259 u32 esdmisc_val; 260 u32 temp_ref; 261 u32 pddword = 0x00ffff00; /* best so far, place into MPPDCMPR1 */ 262 u32 errors = 0; 263 u32 initdelay = 0x40404040; 264 265 /* check to see which chip selects are enabled */ 266 cs0_enable_initial = readl(&mmdc0->mdctl) & 0x80000000; 267 cs1_enable_initial = readl(&mmdc0->mdctl) & 0x40000000; 268 269 /* disable DDR logic power down timer: */ 270 clrbits_le32(&mmdc0->mdpdc, 0xff00); 271 272 /* disable Adopt power down timer: */ 273 setbits_le32(&mmdc0->mapsr, 0x1); 274 275 /* set DQS pull ups */ 276 setbits_le32(&mx6_ddr_iomux->dram_sdqs0, 0x7000); 277 setbits_le32(&mx6_ddr_iomux->dram_sdqs1, 0x7000); 278 setbits_le32(&mx6_ddr_iomux->dram_sdqs2, 0x7000); 279 setbits_le32(&mx6_ddr_iomux->dram_sdqs3, 0x7000); 280 setbits_le32(&mx6_ddr_iomux->dram_sdqs4, 0x7000); 281 setbits_le32(&mx6_ddr_iomux->dram_sdqs5, 0x7000); 282 setbits_le32(&mx6_ddr_iomux->dram_sdqs6, 0x7000); 283 setbits_le32(&mx6_ddr_iomux->dram_sdqs7, 0x7000); 284 285 /* Save old RALAT and WALAT values */ 286 esdmisc_val = readl(&mmdc0->mdmisc); 287 288 setbits_le32(&mmdc0->mdmisc, 289 (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17)); 290 291 /* Disable auto refresh before proceeding with calibration */ 292 temp_ref = readl(&mmdc0->mdref); 293 writel(0x0000c000, &mmdc0->mdref); 294 295 /* 296 * Per the ref manual, issue one refresh cycle MDSCR[CMD]= 0x2, 297 * this also sets the CON_REQ bit. 298 */ 299 if (cs0_enable_initial) 300 writel(0x00008020, &mmdc0->mdscr); 301 if (cs1_enable_initial) 302 writel(0x00008028, &mmdc0->mdscr); 303 304 /* poll to make sure the con_ack bit was asserted */ 305 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0); 306 307 /* 308 * Check MDMISC register CALIB_PER_CS to see which CS calibration 309 * is targeted to (under normal cases, it should be cleared 310 * as this is the default value, indicating calibration is directed 311 * to CS0). 312 * Disable the other chip select not being target for calibration 313 * to avoid any potential issues. This will get re-enabled at end 314 * of calibration. 315 */ 316 if ((readl(&mmdc0->mdmisc) & 0x00100000) == 0) 317 clrbits_le32(&mmdc0->mdctl, 1 << 30); /* clear SDE_1 */ 318 else 319 clrbits_le32(&mmdc0->mdctl, 1 << 31); /* clear SDE_0 */ 320 321 /* 322 * Check to see which chip selects are now enabled for 323 * the remainder of the calibration. 324 */ 325 cs0_enable = readl(&mmdc0->mdctl) & 0x80000000; 326 cs1_enable = readl(&mmdc0->mdctl) & 0x40000000; 327 328 precharge_all(cs0_enable, cs1_enable); 329 330 /* Write the pre-defined value into MPPDCMPR1 */ 331 writel(pddword, &mmdc0->mppdcmpr1); 332 333 /* 334 * Issue a write access to the external DDR device by setting 335 * the bit SW_DUMMY_WR (bit 0) in the MPSWDAR0 and then poll 336 * this bit until it clears to indicate completion of the write access. 337 */ 338 setbits_le32(&mmdc0->mpswdar0, 1); 339 wait_for_bit_le32(&mmdc0->mpswdar0, 1 << 0, 0, 100, 0); 340 341 /* Set the RD_DL_ABS# bits to their default values 342 * (will be calibrated later in the read delay-line calibration). 343 * Both PHYs for x64 configuration, if x32, do only PHY0. 344 */ 345 writel(initdelay, &mmdc0->mprddlctl); 346 if (sysinfo->dsize == 0x2) 347 writel(initdelay, &mmdc1->mprddlctl); 348 349 /* Force a measurment, for previous delay setup to take effect. */ 350 force_delay_measurement(sysinfo->dsize); 351 352 /* 353 * *************************** 354 * Read DQS Gating calibration 355 * *************************** 356 */ 357 debug("Starting Read DQS Gating calibration.\n"); 358 359 /* 360 * Reset the read data FIFOs (two resets); only need to issue reset 361 * to PHY0 since in x64 mode, the reset will also go to PHY1. 362 */ 363 reset_read_data_fifos(); 364 365 /* 366 * Start the automatic read DQS gating calibration process by 367 * asserting MPDGCTRL0[HW_DG_EN] and MPDGCTRL0[DG_CMP_CYC] 368 * and then poll MPDGCTRL0[HW_DG_EN]] until this bit clears 369 * to indicate completion. 370 * Also, ensure that MPDGCTRL0[HW_DG_ERR] is clear to indicate 371 * no errors were seen during calibration. 372 */ 373 374 /* 375 * Set bit 30: chooses option to wait 32 cycles instead of 376 * 16 before comparing read data. 377 */ 378 setbits_le32(&mmdc0->mpdgctrl0, 1 << 30); 379 if (sysinfo->dsize == 2) 380 setbits_le32(&mmdc1->mpdgctrl0, 1 << 30); 381 382 /* Set bit 28 to start automatic read DQS gating calibration */ 383 setbits_le32(&mmdc0->mpdgctrl0, 5 << 28); 384 385 /* Poll for completion. MPDGCTRL0[HW_DG_EN] should be 0 */ 386 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 28, 0, 100, 0); 387 388 /* 389 * Check to see if any errors were encountered during calibration 390 * (check MPDGCTRL0[HW_DG_ERR]). 391 * Check both PHYs for x64 configuration, if x32, check only PHY0. 392 */ 393 if (readl(&mmdc0->mpdgctrl0) & 0x00001000) 394 errors |= 1; 395 396 if ((sysinfo->dsize == 0x2) && (readl(&mmdc1->mpdgctrl0) & 0x00001000)) 397 errors |= 2; 398 399 /* now disable mpdgctrl0[DG_CMP_CYC] */ 400 clrbits_le32(&mmdc0->mpdgctrl0, 1 << 30); 401 if (sysinfo->dsize == 2) 402 clrbits_le32(&mmdc1->mpdgctrl0, 1 << 30); 403 404 /* 405 * DQS gating absolute offset should be modified from 406 * reflecting (HW_DG_LOWx + HW_DG_UPx)/2 to 407 * reflecting (HW_DG_UPx - 0x80) 408 */ 409 modify_dg_result(&mmdc0->mpdghwst0, &mmdc0->mpdghwst1, 410 &mmdc0->mpdgctrl0); 411 modify_dg_result(&mmdc0->mpdghwst2, &mmdc0->mpdghwst3, 412 &mmdc0->mpdgctrl1); 413 if (sysinfo->dsize == 0x2) { 414 modify_dg_result(&mmdc1->mpdghwst0, &mmdc1->mpdghwst1, 415 &mmdc1->mpdgctrl0); 416 modify_dg_result(&mmdc1->mpdghwst2, &mmdc1->mpdghwst3, 417 &mmdc1->mpdgctrl1); 418 } 419 debug("Ending Read DQS Gating calibration. Error mask: 0x%x\n", errors); 420 421 /* 422 * ********************** 423 * Read Delay calibration 424 * ********************** 425 */ 426 debug("Starting Read Delay calibration.\n"); 427 428 reset_read_data_fifos(); 429 430 /* 431 * 4. Issue the Precharge-All command to the DDR device for both 432 * chip selects. If only using one chip select, then precharge 433 * only the desired chip select. 434 */ 435 precharge_all(cs0_enable, cs1_enable); 436 437 /* 438 * 9. Read delay-line calibration 439 * Start the automatic read calibration process by asserting 440 * MPRDDLHWCTL[HW_RD_DL_EN]. 441 */ 442 writel(0x00000030, &mmdc0->mprddlhwctl); 443 444 /* 445 * 10. poll for completion 446 * MMDC indicates that the write data calibration had finished by 447 * setting MPRDDLHWCTL[HW_RD_DL_EN] = 0. Also, ensure that 448 * no error bits were set. 449 */ 450 wait_for_bit_le32(&mmdc0->mprddlhwctl, 1 << 4, 0, 100, 0); 451 452 /* check both PHYs for x64 configuration, if x32, check only PHY0 */ 453 if (readl(&mmdc0->mprddlhwctl) & 0x0000000f) 454 errors |= 4; 455 456 if ((sysinfo->dsize == 0x2) && 457 (readl(&mmdc1->mprddlhwctl) & 0x0000000f)) 458 errors |= 8; 459 460 debug("Ending Read Delay calibration. Error mask: 0x%x\n", errors); 461 462 /* 463 * *********************** 464 * Write Delay Calibration 465 * *********************** 466 */ 467 debug("Starting Write Delay calibration.\n"); 468 469 reset_read_data_fifos(); 470 471 /* 472 * 4. Issue the Precharge-All command to the DDR device for both 473 * chip selects. If only using one chip select, then precharge 474 * only the desired chip select. 475 */ 476 precharge_all(cs0_enable, cs1_enable); 477 478 /* 479 * 8. Set the WR_DL_ABS# bits to their default values. 480 * Both PHYs for x64 configuration, if x32, do only PHY0. 481 */ 482 writel(initdelay, &mmdc0->mpwrdlctl); 483 if (sysinfo->dsize == 0x2) 484 writel(initdelay, &mmdc1->mpwrdlctl); 485 486 /* 487 * XXX This isn't in the manual. Force a measurement, 488 * for previous delay setup to effect. 489 */ 490 force_delay_measurement(sysinfo->dsize); 491 492 /* 493 * 9. 10. Start the automatic write calibration process 494 * by asserting MPWRDLHWCTL0[HW_WR_DL_EN]. 495 */ 496 writel(0x00000030, &mmdc0->mpwrdlhwctl); 497 498 /* 499 * Poll for completion. 500 * MMDC indicates that the write data calibration had finished 501 * by setting MPWRDLHWCTL[HW_WR_DL_EN] = 0. 502 * Also, ensure that no error bits were set. 503 */ 504 wait_for_bit_le32(&mmdc0->mpwrdlhwctl, 1 << 4, 0, 100, 0); 505 506 /* Check both PHYs for x64 configuration, if x32, check only PHY0 */ 507 if (readl(&mmdc0->mpwrdlhwctl) & 0x0000000f) 508 errors |= 16; 509 510 if ((sysinfo->dsize == 0x2) && 511 (readl(&mmdc1->mpwrdlhwctl) & 0x0000000f)) 512 errors |= 32; 513 514 debug("Ending Write Delay calibration. Error mask: 0x%x\n", errors); 515 516 reset_read_data_fifos(); 517 518 /* Enable DDR logic power down timer */ 519 setbits_le32(&mmdc0->mdpdc, 0x00005500); 520 521 /* Enable Adopt power down timer */ 522 clrbits_le32(&mmdc0->mapsr, 0x1); 523 524 /* Restore MDMISC value (RALAT, WALAT) to MMDCP1 */ 525 writel(esdmisc_val, &mmdc0->mdmisc); 526 527 /* Clear DQS pull ups */ 528 clrbits_le32(&mx6_ddr_iomux->dram_sdqs0, 0x7000); 529 clrbits_le32(&mx6_ddr_iomux->dram_sdqs1, 0x7000); 530 clrbits_le32(&mx6_ddr_iomux->dram_sdqs2, 0x7000); 531 clrbits_le32(&mx6_ddr_iomux->dram_sdqs3, 0x7000); 532 clrbits_le32(&mx6_ddr_iomux->dram_sdqs4, 0x7000); 533 clrbits_le32(&mx6_ddr_iomux->dram_sdqs5, 0x7000); 534 clrbits_le32(&mx6_ddr_iomux->dram_sdqs6, 0x7000); 535 clrbits_le32(&mx6_ddr_iomux->dram_sdqs7, 0x7000); 536 537 /* Re-enable SDE (chip selects) if they were set initially */ 538 if (cs1_enable_initial) 539 /* Set SDE_1 */ 540 setbits_le32(&mmdc0->mdctl, 1 << 30); 541 542 if (cs0_enable_initial) 543 /* Set SDE_0 */ 544 setbits_le32(&mmdc0->mdctl, 1 << 31); 545 546 /* Re-enable to auto refresh */ 547 writel(temp_ref, &mmdc0->mdref); 548 549 /* Clear the MDSCR (including the con_req bit) */ 550 writel(0x0, &mmdc0->mdscr); /* CS0 */ 551 552 /* Poll to make sure the con_ack bit is clear */ 553 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 0, 100, 0); 554 555 /* 556 * Print out the registers that were updated as a result 557 * of the calibration process. 558 */ 559 debug("MMDC registers updated from calibration\n"); 560 debug("Read DQS gating calibration:\n"); 561 debug("\tMPDGCTRL0 PHY0 = 0x%08X\n", readl(&mmdc0->mpdgctrl0)); 562 debug("\tMPDGCTRL1 PHY0 = 0x%08X\n", readl(&mmdc0->mpdgctrl1)); 563 if (sysinfo->dsize == 2) { 564 debug("\tMPDGCTRL0 PHY1 = 0x%08X\n", readl(&mmdc1->mpdgctrl0)); 565 debug("\tMPDGCTRL1 PHY1 = 0x%08X\n", readl(&mmdc1->mpdgctrl1)); 566 } 567 debug("Read calibration:\n"); 568 debug("\tMPRDDLCTL PHY0 = 0x%08X\n", readl(&mmdc0->mprddlctl)); 569 if (sysinfo->dsize == 2) 570 debug("\tMPRDDLCTL PHY1 = 0x%08X\n", readl(&mmdc1->mprddlctl)); 571 debug("Write calibration:\n"); 572 debug("\tMPWRDLCTL PHY0 = 0x%08X\n", readl(&mmdc0->mpwrdlctl)); 573 if (sysinfo->dsize == 2) 574 debug("\tMPWRDLCTL PHY1 = 0x%08X\n", readl(&mmdc1->mpwrdlctl)); 575 576 /* 577 * Registers below are for debugging purposes. These print out 578 * the upper and lower boundaries captured during 579 * read DQS gating calibration. 580 */ 581 debug("Status registers bounds for read DQS gating:\n"); 582 debug("\tMPDGHWST0 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst0)); 583 debug("\tMPDGHWST1 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst1)); 584 debug("\tMPDGHWST2 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst2)); 585 debug("\tMPDGHWST3 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst3)); 586 if (sysinfo->dsize == 2) { 587 debug("\tMPDGHWST0 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst0)); 588 debug("\tMPDGHWST1 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst1)); 589 debug("\tMPDGHWST2 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst2)); 590 debug("\tMPDGHWST3 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst3)); 591 } 592 593 debug("Final do_dqs_calibration error mask: 0x%x\n", errors); 594 595 return errors; 596 } 597 #endif 598 599 #if defined(CONFIG_MX6SX) 600 /* Configure MX6SX mmdc iomux */ 601 void mx6sx_dram_iocfg(unsigned width, 602 const struct mx6sx_iomux_ddr_regs *ddr, 603 const struct mx6sx_iomux_grp_regs *grp) 604 { 605 struct mx6sx_iomux_ddr_regs *mx6_ddr_iomux; 606 struct mx6sx_iomux_grp_regs *mx6_grp_iomux; 607 608 mx6_ddr_iomux = (struct mx6sx_iomux_ddr_regs *)MX6SX_IOM_DDR_BASE; 609 mx6_grp_iomux = (struct mx6sx_iomux_grp_regs *)MX6SX_IOM_GRP_BASE; 610 611 /* DDR IO TYPE */ 612 writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type); 613 writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke); 614 615 /* CLOCK */ 616 writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0); 617 618 /* ADDRESS */ 619 writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas); 620 writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras); 621 writel(grp->grp_addds, &mx6_grp_iomux->grp_addds); 622 623 /* Control */ 624 writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset); 625 writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2); 626 writel(ddr->dram_sdcke0, &mx6_ddr_iomux->dram_sdcke0); 627 writel(ddr->dram_sdcke1, &mx6_ddr_iomux->dram_sdcke1); 628 writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0); 629 writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1); 630 writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds); 631 632 /* Data Strobes */ 633 writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl); 634 writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0); 635 writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1); 636 if (width >= 32) { 637 writel(ddr->dram_sdqs2, &mx6_ddr_iomux->dram_sdqs2); 638 writel(ddr->dram_sdqs3, &mx6_ddr_iomux->dram_sdqs3); 639 } 640 641 /* Data */ 642 writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode); 643 writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds); 644 writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds); 645 if (width >= 32) { 646 writel(grp->grp_b2ds, &mx6_grp_iomux->grp_b2ds); 647 writel(grp->grp_b3ds, &mx6_grp_iomux->grp_b3ds); 648 } 649 writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0); 650 writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1); 651 if (width >= 32) { 652 writel(ddr->dram_dqm2, &mx6_ddr_iomux->dram_dqm2); 653 writel(ddr->dram_dqm3, &mx6_ddr_iomux->dram_dqm3); 654 } 655 } 656 #endif 657 658 #if defined(CONFIG_MX6UL) || defined(CONFIG_MX6ULL) 659 void mx6ul_dram_iocfg(unsigned width, 660 const struct mx6ul_iomux_ddr_regs *ddr, 661 const struct mx6ul_iomux_grp_regs *grp) 662 { 663 struct mx6ul_iomux_ddr_regs *mx6_ddr_iomux; 664 struct mx6ul_iomux_grp_regs *mx6_grp_iomux; 665 666 mx6_ddr_iomux = (struct mx6ul_iomux_ddr_regs *)MX6UL_IOM_DDR_BASE; 667 mx6_grp_iomux = (struct mx6ul_iomux_grp_regs *)MX6UL_IOM_GRP_BASE; 668 669 /* DDR IO TYPE */ 670 writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type); 671 writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke); 672 673 /* CLOCK */ 674 writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0); 675 676 /* ADDRESS */ 677 writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas); 678 writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras); 679 writel(grp->grp_addds, &mx6_grp_iomux->grp_addds); 680 681 /* Control */ 682 writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset); 683 writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2); 684 writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0); 685 writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1); 686 writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds); 687 688 /* Data Strobes */ 689 writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl); 690 writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0); 691 writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1); 692 693 /* Data */ 694 writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode); 695 writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds); 696 writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds); 697 writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0); 698 writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1); 699 } 700 #endif 701 702 #if defined(CONFIG_MX6SL) 703 void mx6sl_dram_iocfg(unsigned width, 704 const struct mx6sl_iomux_ddr_regs *ddr, 705 const struct mx6sl_iomux_grp_regs *grp) 706 { 707 struct mx6sl_iomux_ddr_regs *mx6_ddr_iomux; 708 struct mx6sl_iomux_grp_regs *mx6_grp_iomux; 709 710 mx6_ddr_iomux = (struct mx6sl_iomux_ddr_regs *)MX6SL_IOM_DDR_BASE; 711 mx6_grp_iomux = (struct mx6sl_iomux_grp_regs *)MX6SL_IOM_GRP_BASE; 712 713 /* DDR IO TYPE */ 714 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type; 715 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke; 716 717 /* CLOCK */ 718 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0; 719 720 /* ADDRESS */ 721 mx6_ddr_iomux->dram_cas = ddr->dram_cas; 722 mx6_ddr_iomux->dram_ras = ddr->dram_ras; 723 mx6_grp_iomux->grp_addds = grp->grp_addds; 724 725 /* Control */ 726 mx6_ddr_iomux->dram_reset = ddr->dram_reset; 727 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2; 728 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds; 729 730 /* Data Strobes */ 731 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl; 732 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0; 733 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1; 734 if (width >= 32) { 735 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2; 736 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3; 737 } 738 739 /* Data */ 740 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode; 741 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds; 742 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds; 743 if (width >= 32) { 744 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds; 745 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds; 746 } 747 748 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0; 749 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1; 750 if (width >= 32) { 751 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2; 752 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3; 753 } 754 } 755 #endif 756 757 #if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6Q) || defined(CONFIG_MX6D) 758 /* Configure MX6DQ mmdc iomux */ 759 void mx6dq_dram_iocfg(unsigned width, 760 const struct mx6dq_iomux_ddr_regs *ddr, 761 const struct mx6dq_iomux_grp_regs *grp) 762 { 763 volatile struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux; 764 volatile struct mx6dq_iomux_grp_regs *mx6_grp_iomux; 765 766 mx6_ddr_iomux = (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE; 767 mx6_grp_iomux = (struct mx6dq_iomux_grp_regs *)MX6DQ_IOM_GRP_BASE; 768 769 /* DDR IO Type */ 770 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type; 771 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke; 772 773 /* Clock */ 774 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0; 775 mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1; 776 777 /* Address */ 778 mx6_ddr_iomux->dram_cas = ddr->dram_cas; 779 mx6_ddr_iomux->dram_ras = ddr->dram_ras; 780 mx6_grp_iomux->grp_addds = grp->grp_addds; 781 782 /* Control */ 783 mx6_ddr_iomux->dram_reset = ddr->dram_reset; 784 mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0; 785 mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1; 786 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2; 787 mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0; 788 mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1; 789 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds; 790 791 /* Data Strobes */ 792 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl; 793 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0; 794 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1; 795 if (width >= 32) { 796 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2; 797 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3; 798 } 799 if (width >= 64) { 800 mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4; 801 mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5; 802 mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6; 803 mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7; 804 } 805 806 /* Data */ 807 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode; 808 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds; 809 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds; 810 if (width >= 32) { 811 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds; 812 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds; 813 } 814 if (width >= 64) { 815 mx6_grp_iomux->grp_b4ds = grp->grp_b4ds; 816 mx6_grp_iomux->grp_b5ds = grp->grp_b5ds; 817 mx6_grp_iomux->grp_b6ds = grp->grp_b6ds; 818 mx6_grp_iomux->grp_b7ds = grp->grp_b7ds; 819 } 820 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0; 821 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1; 822 if (width >= 32) { 823 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2; 824 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3; 825 } 826 if (width >= 64) { 827 mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4; 828 mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5; 829 mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6; 830 mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7; 831 } 832 } 833 #endif 834 835 #if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6DL) || defined(CONFIG_MX6S) 836 /* Configure MX6SDL mmdc iomux */ 837 void mx6sdl_dram_iocfg(unsigned width, 838 const struct mx6sdl_iomux_ddr_regs *ddr, 839 const struct mx6sdl_iomux_grp_regs *grp) 840 { 841 volatile struct mx6sdl_iomux_ddr_regs *mx6_ddr_iomux; 842 volatile struct mx6sdl_iomux_grp_regs *mx6_grp_iomux; 843 844 mx6_ddr_iomux = (struct mx6sdl_iomux_ddr_regs *)MX6SDL_IOM_DDR_BASE; 845 mx6_grp_iomux = (struct mx6sdl_iomux_grp_regs *)MX6SDL_IOM_GRP_BASE; 846 847 /* DDR IO Type */ 848 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type; 849 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke; 850 851 /* Clock */ 852 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0; 853 mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1; 854 855 /* Address */ 856 mx6_ddr_iomux->dram_cas = ddr->dram_cas; 857 mx6_ddr_iomux->dram_ras = ddr->dram_ras; 858 mx6_grp_iomux->grp_addds = grp->grp_addds; 859 860 /* Control */ 861 mx6_ddr_iomux->dram_reset = ddr->dram_reset; 862 mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0; 863 mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1; 864 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2; 865 mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0; 866 mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1; 867 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds; 868 869 /* Data Strobes */ 870 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl; 871 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0; 872 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1; 873 if (width >= 32) { 874 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2; 875 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3; 876 } 877 if (width >= 64) { 878 mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4; 879 mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5; 880 mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6; 881 mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7; 882 } 883 884 /* Data */ 885 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode; 886 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds; 887 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds; 888 if (width >= 32) { 889 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds; 890 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds; 891 } 892 if (width >= 64) { 893 mx6_grp_iomux->grp_b4ds = grp->grp_b4ds; 894 mx6_grp_iomux->grp_b5ds = grp->grp_b5ds; 895 mx6_grp_iomux->grp_b6ds = grp->grp_b6ds; 896 mx6_grp_iomux->grp_b7ds = grp->grp_b7ds; 897 } 898 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0; 899 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1; 900 if (width >= 32) { 901 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2; 902 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3; 903 } 904 if (width >= 64) { 905 mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4; 906 mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5; 907 mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6; 908 mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7; 909 } 910 } 911 #endif 912 913 /* 914 * Configure mx6 mmdc registers based on: 915 * - board-specific memory configuration 916 * - board-specific calibration data 917 * - ddr3/lpddr2 chip details 918 * 919 * The various calculations here are derived from the Freescale 920 * 1. i.Mx6DQSDL DDR3 Script Aid spreadsheet (DOC-94917) designed to generate 921 * MMDC configuration registers based on memory system and memory chip 922 * parameters. 923 * 924 * 2. i.Mx6SL LPDDR2 Script Aid spreadsheet V0.04 designed to generate MMDC 925 * configuration registers based on memory system and memory chip 926 * parameters. 927 * 928 * The defaults here are those which were specified in the spreadsheet. 929 * For details on each register, refer to the IMX6DQRM and/or IMX6SDLRM 930 * and/or IMX6SLRM section titled MMDC initialization. 931 */ 932 #define MR(val, ba, cmd, cs1) \ 933 ((val << 16) | (1 << 15) | (cmd << 4) | (cs1 << 3) | ba) 934 #define MMDC1(entry, value) do { \ 935 if (!is_mx6sx() && !is_mx6ul() && !is_mx6ull() && !is_mx6sl()) \ 936 mmdc1->entry = value; \ 937 } while (0) 938 939 /* 940 * According JESD209-2B-LPDDR2: Table 103 941 * WL: write latency 942 */ 943 static int lpddr2_wl(uint32_t mem_speed) 944 { 945 switch (mem_speed) { 946 case 1066: 947 case 933: 948 return 4; 949 case 800: 950 return 3; 951 case 677: 952 case 533: 953 return 2; 954 case 400: 955 case 333: 956 return 1; 957 default: 958 puts("invalid memory speed\n"); 959 hang(); 960 } 961 962 return 0; 963 } 964 965 /* 966 * According JESD209-2B-LPDDR2: Table 103 967 * RL: read latency 968 */ 969 static int lpddr2_rl(uint32_t mem_speed) 970 { 971 switch (mem_speed) { 972 case 1066: 973 return 8; 974 case 933: 975 return 7; 976 case 800: 977 return 6; 978 case 677: 979 return 5; 980 case 533: 981 return 4; 982 case 400: 983 case 333: 984 return 3; 985 default: 986 puts("invalid memory speed\n"); 987 hang(); 988 } 989 990 return 0; 991 } 992 993 void mx6_lpddr2_cfg(const struct mx6_ddr_sysinfo *sysinfo, 994 const struct mx6_mmdc_calibration *calib, 995 const struct mx6_lpddr2_cfg *lpddr2_cfg) 996 { 997 volatile struct mmdc_p_regs *mmdc0; 998 u32 val; 999 u8 tcke, tcksrx, tcksre, trrd; 1000 u8 twl, txp, tfaw, tcl; 1001 u16 tras, twr, tmrd, trtp, twtr, trfc, txsr; 1002 u16 trcd_lp, trppb_lp, trpab_lp, trc_lp; 1003 u16 cs0_end; 1004 u8 coladdr; 1005 int clkper; /* clock period in picoseconds */ 1006 int clock; /* clock freq in mHz */ 1007 int cs; 1008 1009 /* only support 16/32 bits */ 1010 if (sysinfo->dsize > 1) 1011 hang(); 1012 1013 mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 1014 1015 clock = mxc_get_clock(MXC_DDR_CLK) / 1000000U; 1016 clkper = (1000 * 1000) / clock; /* pico seconds */ 1017 1018 twl = lpddr2_wl(lpddr2_cfg->mem_speed) - 1; 1019 1020 /* LPDDR2-S2 and LPDDR2-S4 have the same tRFC value. */ 1021 switch (lpddr2_cfg->density) { 1022 case 1: 1023 case 2: 1024 case 4: 1025 trfc = DIV_ROUND_UP(130000, clkper) - 1; 1026 txsr = DIV_ROUND_UP(140000, clkper) - 1; 1027 break; 1028 case 8: 1029 trfc = DIV_ROUND_UP(210000, clkper) - 1; 1030 txsr = DIV_ROUND_UP(220000, clkper) - 1; 1031 break; 1032 default: 1033 /* 1034 * 64Mb, 128Mb, 256Mb, 512Mb are not supported currently. 1035 */ 1036 hang(); 1037 break; 1038 } 1039 /* 1040 * txpdll, txpr, taonpd and taofpd are not relevant in LPDDR2 mode, 1041 * set them to 0. */ 1042 txp = DIV_ROUND_UP(7500, clkper) - 1; 1043 tcke = 3; 1044 if (lpddr2_cfg->mem_speed == 333) 1045 tfaw = DIV_ROUND_UP(60000, clkper) - 1; 1046 else 1047 tfaw = DIV_ROUND_UP(50000, clkper) - 1; 1048 trrd = DIV_ROUND_UP(10000, clkper) - 1; 1049 1050 /* tckesr for LPDDR2 */ 1051 tcksre = DIV_ROUND_UP(15000, clkper); 1052 tcksrx = tcksre; 1053 twr = DIV_ROUND_UP(15000, clkper) - 1; 1054 /* 1055 * tMRR: 2, tMRW: 5 1056 * tMRD should be set to max(tMRR, tMRW) 1057 */ 1058 tmrd = 5; 1059 tras = DIV_ROUND_UP(lpddr2_cfg->trasmin, clkper / 10) - 1; 1060 /* LPDDR2 mode use tRCD_LP filed in MDCFG3. */ 1061 trcd_lp = DIV_ROUND_UP(lpddr2_cfg->trcd_lp, clkper / 10) - 1; 1062 trc_lp = DIV_ROUND_UP(lpddr2_cfg->trasmin + lpddr2_cfg->trppb_lp, 1063 clkper / 10) - 1; 1064 trppb_lp = DIV_ROUND_UP(lpddr2_cfg->trppb_lp, clkper / 10) - 1; 1065 trpab_lp = DIV_ROUND_UP(lpddr2_cfg->trpab_lp, clkper / 10) - 1; 1066 /* To LPDDR2, CL in MDCFG0 refers to RL */ 1067 tcl = lpddr2_rl(lpddr2_cfg->mem_speed) - 3; 1068 twtr = DIV_ROUND_UP(7500, clkper) - 1; 1069 trtp = DIV_ROUND_UP(7500, clkper) - 1; 1070 1071 cs0_end = 4 * sysinfo->cs_density - 1; 1072 1073 debug("density:%d Gb (%d Gb per chip)\n", 1074 sysinfo->cs_density, lpddr2_cfg->density); 1075 debug("clock: %dMHz (%d ps)\n", clock, clkper); 1076 debug("memspd:%d\n", lpddr2_cfg->mem_speed); 1077 debug("trcd_lp=%d\n", trcd_lp); 1078 debug("trppb_lp=%d\n", trppb_lp); 1079 debug("trpab_lp=%d\n", trpab_lp); 1080 debug("trc_lp=%d\n", trc_lp); 1081 debug("tcke=%d\n", tcke); 1082 debug("tcksrx=%d\n", tcksrx); 1083 debug("tcksre=%d\n", tcksre); 1084 debug("trfc=%d\n", trfc); 1085 debug("txsr=%d\n", txsr); 1086 debug("txp=%d\n", txp); 1087 debug("tfaw=%d\n", tfaw); 1088 debug("tcl=%d\n", tcl); 1089 debug("tras=%d\n", tras); 1090 debug("twr=%d\n", twr); 1091 debug("tmrd=%d\n", tmrd); 1092 debug("twl=%d\n", twl); 1093 debug("trtp=%d\n", trtp); 1094 debug("twtr=%d\n", twtr); 1095 debug("trrd=%d\n", trrd); 1096 debug("cs0_end=%d\n", cs0_end); 1097 debug("ncs=%d\n", sysinfo->ncs); 1098 1099 /* 1100 * board-specific configuration: 1101 * These values are determined empirically and vary per board layout 1102 */ 1103 mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0; 1104 mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1; 1105 mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0; 1106 mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1; 1107 mmdc0->mprddlctl = calib->p0_mprddlctl; 1108 mmdc0->mpwrdlctl = calib->p0_mpwrdlctl; 1109 mmdc0->mpzqlp2ctl = calib->mpzqlp2ctl; 1110 1111 /* Read data DQ Byte0-3 delay */ 1112 mmdc0->mprddqby0dl = 0x33333333; 1113 mmdc0->mprddqby1dl = 0x33333333; 1114 if (sysinfo->dsize > 0) { 1115 mmdc0->mprddqby2dl = 0x33333333; 1116 mmdc0->mprddqby3dl = 0x33333333; 1117 } 1118 1119 /* Write data DQ Byte0-3 delay */ 1120 mmdc0->mpwrdqby0dl = 0xf3333333; 1121 mmdc0->mpwrdqby1dl = 0xf3333333; 1122 if (sysinfo->dsize > 0) { 1123 mmdc0->mpwrdqby2dl = 0xf3333333; 1124 mmdc0->mpwrdqby3dl = 0xf3333333; 1125 } 1126 1127 /* 1128 * In LPDDR2 mode this register should be cleared, 1129 * so no termination will be activated. 1130 */ 1131 mmdc0->mpodtctrl = 0; 1132 1133 /* complete calibration */ 1134 val = (1 << 11); /* Force measurement on delay-lines */ 1135 mmdc0->mpmur0 = val; 1136 1137 /* Step 1: configuration request */ 1138 mmdc0->mdscr = (u32)(1 << 15); /* config request */ 1139 1140 /* Step 2: Timing configuration */ 1141 mmdc0->mdcfg0 = (trfc << 24) | (txsr << 16) | (txp << 13) | 1142 (tfaw << 4) | tcl; 1143 mmdc0->mdcfg1 = (tras << 16) | (twr << 9) | (tmrd << 5) | twl; 1144 mmdc0->mdcfg2 = (trtp << 6) | (twtr << 3) | trrd; 1145 mmdc0->mdcfg3lp = (trc_lp << 16) | (trcd_lp << 8) | 1146 (trppb_lp << 4) | trpab_lp; 1147 mmdc0->mdotc = 0; 1148 1149 mmdc0->mdasp = cs0_end; /* CS addressing */ 1150 1151 /* Step 3: Configure DDR type */ 1152 mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) | 1153 (sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) | 1154 (sysinfo->ralat << 6) | (1 << 3); 1155 1156 /* Step 4: Configure delay while leaving reset */ 1157 mmdc0->mdor = (sysinfo->sde_to_rst << 8) | 1158 (sysinfo->rst_to_cke << 0); 1159 1160 /* Step 5: Configure DDR physical parameters (density and burst len) */ 1161 coladdr = lpddr2_cfg->coladdr; 1162 if (lpddr2_cfg->coladdr == 8) /* 8-bit COL is 0x3 */ 1163 coladdr += 4; 1164 else if (lpddr2_cfg->coladdr == 12) /* 12-bit COL is 0x4 */ 1165 coladdr += 1; 1166 mmdc0->mdctl = (lpddr2_cfg->rowaddr - 11) << 24 | /* ROW */ 1167 (coladdr - 9) << 20 | /* COL */ 1168 (0 << 19) | /* Burst Length = 4 for LPDDR2 */ 1169 (sysinfo->dsize << 16); /* DDR data bus size */ 1170 1171 /* Step 6: Perform ZQ calibration */ 1172 val = 0xa1390003; /* one-time HW ZQ calib */ 1173 mmdc0->mpzqhwctrl = val; 1174 1175 /* Step 7: Enable MMDC with desired chip select */ 1176 mmdc0->mdctl |= (1 << 31) | /* SDE_0 for CS0 */ 1177 ((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */ 1178 1179 /* Step 8: Write Mode Registers to Init LPDDR2 devices */ 1180 for (cs = 0; cs < sysinfo->ncs; cs++) { 1181 /* MR63: reset */ 1182 mmdc0->mdscr = MR(63, 0, 3, cs); 1183 /* MR10: calibration, 1184 * 0xff is calibration command after intilization. 1185 */ 1186 val = 0xA | (0xff << 8); 1187 mmdc0->mdscr = MR(val, 0, 3, cs); 1188 /* MR1 */ 1189 val = 0x1 | (0x82 << 8); 1190 mmdc0->mdscr = MR(val, 0, 3, cs); 1191 /* MR2 */ 1192 val = 0x2 | (0x04 << 8); 1193 mmdc0->mdscr = MR(val, 0, 3, cs); 1194 /* MR3 */ 1195 val = 0x3 | (0x02 << 8); 1196 mmdc0->mdscr = MR(val, 0, 3, cs); 1197 } 1198 1199 /* Step 10: Power down control and self-refresh */ 1200 mmdc0->mdpdc = (tcke & 0x7) << 16 | 1201 5 << 12 | /* PWDT_1: 256 cycles */ 1202 5 << 8 | /* PWDT_0: 256 cycles */ 1203 1 << 6 | /* BOTH_CS_PD */ 1204 (tcksrx & 0x7) << 3 | 1205 (tcksre & 0x7); 1206 mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */ 1207 1208 /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */ 1209 val = 0xa1310003; 1210 mmdc0->mpzqhwctrl = val; 1211 1212 /* Step 12: Configure and activate periodic refresh */ 1213 mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11); 1214 1215 /* Step 13: Deassert config request - init complete */ 1216 mmdc0->mdscr = 0x00000000; 1217 1218 /* wait for auto-ZQ calibration to complete */ 1219 mdelay(1); 1220 } 1221 1222 void mx6_ddr3_cfg(const struct mx6_ddr_sysinfo *sysinfo, 1223 const struct mx6_mmdc_calibration *calib, 1224 const struct mx6_ddr3_cfg *ddr3_cfg) 1225 { 1226 volatile struct mmdc_p_regs *mmdc0; 1227 volatile struct mmdc_p_regs *mmdc1; 1228 u32 val; 1229 u8 tcke, tcksrx, tcksre, txpdll, taofpd, taonpd, trrd; 1230 u8 todtlon, taxpd, tanpd, tcwl, txp, tfaw, tcl; 1231 u8 todt_idle_off = 0x4; /* from DDR3 Script Aid spreadsheet */ 1232 u16 trcd, trc, tras, twr, tmrd, trtp, trp, twtr, trfc, txs, txpr; 1233 u16 cs0_end; 1234 u16 tdllk = 0x1ff; /* DLL locking time: 512 cycles (JEDEC DDR3) */ 1235 u8 coladdr; 1236 int clkper; /* clock period in picoseconds */ 1237 int clock; /* clock freq in MHz */ 1238 int cs; 1239 u16 mem_speed = ddr3_cfg->mem_speed; 1240 1241 mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 1242 if (!is_mx6sx() && !is_mx6ul() && !is_mx6ull() && !is_mx6sl()) 1243 mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR; 1244 1245 /* Limit mem_speed for MX6D/MX6Q */ 1246 if (is_mx6dq() || is_mx6dqp()) { 1247 if (mem_speed > 1066) 1248 mem_speed = 1066; /* 1066 MT/s */ 1249 1250 tcwl = 4; 1251 } 1252 /* Limit mem_speed for MX6S/MX6DL */ 1253 else { 1254 if (mem_speed > 800) 1255 mem_speed = 800; /* 800 MT/s */ 1256 1257 tcwl = 3; 1258 } 1259 1260 clock = mem_speed / 2; 1261 /* 1262 * Data rate of 1066 MT/s requires 533 MHz DDR3 clock, but MX6D/Q supports 1263 * up to 528 MHz, so reduce the clock to fit chip specs 1264 */ 1265 if (is_mx6dq() || is_mx6dqp()) { 1266 if (clock > 528) 1267 clock = 528; /* 528 MHz */ 1268 } 1269 1270 clkper = (1000 * 1000) / clock; /* pico seconds */ 1271 todtlon = tcwl; 1272 taxpd = tcwl; 1273 tanpd = tcwl; 1274 1275 switch (ddr3_cfg->density) { 1276 case 1: /* 1Gb per chip */ 1277 trfc = DIV_ROUND_UP(110000, clkper) - 1; 1278 txs = DIV_ROUND_UP(120000, clkper) - 1; 1279 break; 1280 case 2: /* 2Gb per chip */ 1281 trfc = DIV_ROUND_UP(160000, clkper) - 1; 1282 txs = DIV_ROUND_UP(170000, clkper) - 1; 1283 break; 1284 case 4: /* 4Gb per chip */ 1285 trfc = DIV_ROUND_UP(260000, clkper) - 1; 1286 txs = DIV_ROUND_UP(270000, clkper) - 1; 1287 break; 1288 case 8: /* 8Gb per chip */ 1289 trfc = DIV_ROUND_UP(350000, clkper) - 1; 1290 txs = DIV_ROUND_UP(360000, clkper) - 1; 1291 break; 1292 default: 1293 /* invalid density */ 1294 puts("invalid chip density\n"); 1295 hang(); 1296 break; 1297 } 1298 txpr = txs; 1299 1300 switch (mem_speed) { 1301 case 800: 1302 txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1; 1303 tcke = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1; 1304 if (ddr3_cfg->pagesz == 1) { 1305 tfaw = DIV_ROUND_UP(40000, clkper) - 1; 1306 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1; 1307 } else { 1308 tfaw = DIV_ROUND_UP(50000, clkper) - 1; 1309 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1; 1310 } 1311 break; 1312 case 1066: 1313 txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1; 1314 tcke = DIV_ROUND_UP(max(3 * clkper, 5625), clkper) - 1; 1315 if (ddr3_cfg->pagesz == 1) { 1316 tfaw = DIV_ROUND_UP(37500, clkper) - 1; 1317 trrd = DIV_ROUND_UP(max(4 * clkper, 7500), clkper) - 1; 1318 } else { 1319 tfaw = DIV_ROUND_UP(50000, clkper) - 1; 1320 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1; 1321 } 1322 break; 1323 default: 1324 puts("invalid memory speed\n"); 1325 hang(); 1326 break; 1327 } 1328 txpdll = DIV_ROUND_UP(max(10 * clkper, 24000), clkper) - 1; 1329 tcksre = DIV_ROUND_UP(max(5 * clkper, 10000), clkper); 1330 taonpd = DIV_ROUND_UP(2000, clkper) - 1; 1331 tcksrx = tcksre; 1332 taofpd = taonpd; 1333 twr = DIV_ROUND_UP(15000, clkper) - 1; 1334 tmrd = DIV_ROUND_UP(max(12 * clkper, 15000), clkper) - 1; 1335 trc = DIV_ROUND_UP(ddr3_cfg->trcmin, clkper / 10) - 1; 1336 tras = DIV_ROUND_UP(ddr3_cfg->trasmin, clkper / 10) - 1; 1337 tcl = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 3; 1338 trp = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 1; 1339 twtr = ROUND(max(4 * clkper, 7500) / clkper, 1) - 1; 1340 trcd = trp; 1341 trtp = twtr; 1342 cs0_end = 4 * sysinfo->cs_density - 1; 1343 1344 debug("density:%d Gb (%d Gb per chip)\n", 1345 sysinfo->cs_density, ddr3_cfg->density); 1346 debug("clock: %dMHz (%d ps)\n", clock, clkper); 1347 debug("memspd:%d\n", mem_speed); 1348 debug("tcke=%d\n", tcke); 1349 debug("tcksrx=%d\n", tcksrx); 1350 debug("tcksre=%d\n", tcksre); 1351 debug("taofpd=%d\n", taofpd); 1352 debug("taonpd=%d\n", taonpd); 1353 debug("todtlon=%d\n", todtlon); 1354 debug("tanpd=%d\n", tanpd); 1355 debug("taxpd=%d\n", taxpd); 1356 debug("trfc=%d\n", trfc); 1357 debug("txs=%d\n", txs); 1358 debug("txp=%d\n", txp); 1359 debug("txpdll=%d\n", txpdll); 1360 debug("tfaw=%d\n", tfaw); 1361 debug("tcl=%d\n", tcl); 1362 debug("trcd=%d\n", trcd); 1363 debug("trp=%d\n", trp); 1364 debug("trc=%d\n", trc); 1365 debug("tras=%d\n", tras); 1366 debug("twr=%d\n", twr); 1367 debug("tmrd=%d\n", tmrd); 1368 debug("tcwl=%d\n", tcwl); 1369 debug("tdllk=%d\n", tdllk); 1370 debug("trtp=%d\n", trtp); 1371 debug("twtr=%d\n", twtr); 1372 debug("trrd=%d\n", trrd); 1373 debug("txpr=%d\n", txpr); 1374 debug("cs0_end=%d\n", cs0_end); 1375 debug("ncs=%d\n", sysinfo->ncs); 1376 debug("Rtt_wr=%d\n", sysinfo->rtt_wr); 1377 debug("Rtt_nom=%d\n", sysinfo->rtt_nom); 1378 debug("SRT=%d\n", ddr3_cfg->SRT); 1379 debug("twr=%d\n", twr); 1380 1381 /* 1382 * board-specific configuration: 1383 * These values are determined empirically and vary per board layout 1384 * see: 1385 * appnote, ddr3 spreadsheet 1386 */ 1387 mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0; 1388 mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1; 1389 mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0; 1390 mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1; 1391 mmdc0->mprddlctl = calib->p0_mprddlctl; 1392 mmdc0->mpwrdlctl = calib->p0_mpwrdlctl; 1393 if (sysinfo->dsize > 1) { 1394 MMDC1(mpwldectrl0, calib->p1_mpwldectrl0); 1395 MMDC1(mpwldectrl1, calib->p1_mpwldectrl1); 1396 MMDC1(mpdgctrl0, calib->p1_mpdgctrl0); 1397 MMDC1(mpdgctrl1, calib->p1_mpdgctrl1); 1398 MMDC1(mprddlctl, calib->p1_mprddlctl); 1399 MMDC1(mpwrdlctl, calib->p1_mpwrdlctl); 1400 } 1401 1402 /* Read data DQ Byte0-3 delay */ 1403 mmdc0->mprddqby0dl = 0x33333333; 1404 mmdc0->mprddqby1dl = 0x33333333; 1405 if (sysinfo->dsize > 0) { 1406 mmdc0->mprddqby2dl = 0x33333333; 1407 mmdc0->mprddqby3dl = 0x33333333; 1408 } 1409 1410 if (sysinfo->dsize > 1) { 1411 MMDC1(mprddqby0dl, 0x33333333); 1412 MMDC1(mprddqby1dl, 0x33333333); 1413 MMDC1(mprddqby2dl, 0x33333333); 1414 MMDC1(mprddqby3dl, 0x33333333); 1415 } 1416 1417 /* MMDC Termination: rtt_nom:2 RZQ/2(120ohm), rtt_nom:1 RZQ/4(60ohm) */ 1418 val = (sysinfo->rtt_nom == 2) ? 0x00011117 : 0x00022227; 1419 mmdc0->mpodtctrl = val; 1420 if (sysinfo->dsize > 1) 1421 MMDC1(mpodtctrl, val); 1422 1423 /* complete calibration */ 1424 val = (1 << 11); /* Force measurement on delay-lines */ 1425 mmdc0->mpmur0 = val; 1426 if (sysinfo->dsize > 1) 1427 MMDC1(mpmur0, val); 1428 1429 /* Step 1: configuration request */ 1430 mmdc0->mdscr = (u32)(1 << 15); /* config request */ 1431 1432 /* Step 2: Timing configuration */ 1433 mmdc0->mdcfg0 = (trfc << 24) | (txs << 16) | (txp << 13) | 1434 (txpdll << 9) | (tfaw << 4) | tcl; 1435 mmdc0->mdcfg1 = (trcd << 29) | (trp << 26) | (trc << 21) | 1436 (tras << 16) | (1 << 15) /* trpa */ | 1437 (twr << 9) | (tmrd << 5) | tcwl; 1438 mmdc0->mdcfg2 = (tdllk << 16) | (trtp << 6) | (twtr << 3) | trrd; 1439 mmdc0->mdotc = (taofpd << 27) | (taonpd << 24) | (tanpd << 20) | 1440 (taxpd << 16) | (todtlon << 12) | (todt_idle_off << 4); 1441 mmdc0->mdasp = cs0_end; /* CS addressing */ 1442 1443 /* Step 3: Configure DDR type */ 1444 mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) | 1445 (sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) | 1446 (sysinfo->ralat << 6); 1447 1448 /* Step 4: Configure delay while leaving reset */ 1449 mmdc0->mdor = (txpr << 16) | (sysinfo->sde_to_rst << 8) | 1450 (sysinfo->rst_to_cke << 0); 1451 1452 /* Step 5: Configure DDR physical parameters (density and burst len) */ 1453 coladdr = ddr3_cfg->coladdr; 1454 if (ddr3_cfg->coladdr == 8) /* 8-bit COL is 0x3 */ 1455 coladdr += 4; 1456 else if (ddr3_cfg->coladdr == 12) /* 12-bit COL is 0x4 */ 1457 coladdr += 1; 1458 mmdc0->mdctl = (ddr3_cfg->rowaddr - 11) << 24 | /* ROW */ 1459 (coladdr - 9) << 20 | /* COL */ 1460 (1 << 19) | /* Burst Length = 8 for DDR3 */ 1461 (sysinfo->dsize << 16); /* DDR data bus size */ 1462 1463 /* Step 6: Perform ZQ calibration */ 1464 val = 0xa1390001; /* one-time HW ZQ calib */ 1465 mmdc0->mpzqhwctrl = val; 1466 if (sysinfo->dsize > 1) 1467 MMDC1(mpzqhwctrl, val); 1468 1469 /* Step 7: Enable MMDC with desired chip select */ 1470 mmdc0->mdctl |= (1 << 31) | /* SDE_0 for CS0 */ 1471 ((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */ 1472 1473 /* Step 8: Write Mode Registers to Init DDR3 devices */ 1474 for (cs = 0; cs < sysinfo->ncs; cs++) { 1475 /* MR2 */ 1476 val = (sysinfo->rtt_wr & 3) << 9 | (ddr3_cfg->SRT & 1) << 7 | 1477 ((tcwl - 3) & 3) << 3; 1478 debug("MR2 CS%d: 0x%08x\n", cs, (u32)MR(val, 2, 3, cs)); 1479 mmdc0->mdscr = MR(val, 2, 3, cs); 1480 /* MR3 */ 1481 debug("MR3 CS%d: 0x%08x\n", cs, (u32)MR(0, 3, 3, cs)); 1482 mmdc0->mdscr = MR(0, 3, 3, cs); 1483 /* MR1 */ 1484 val = ((sysinfo->rtt_nom & 1) ? 1 : 0) << 2 | 1485 ((sysinfo->rtt_nom & 2) ? 1 : 0) << 6; 1486 debug("MR1 CS%d: 0x%08x\n", cs, (u32)MR(val, 1, 3, cs)); 1487 mmdc0->mdscr = MR(val, 1, 3, cs); 1488 /* MR0 */ 1489 val = ((tcl - 1) << 4) | /* CAS */ 1490 (1 << 8) | /* DLL Reset */ 1491 ((twr - 3) << 9) | /* Write Recovery */ 1492 (sysinfo->pd_fast_exit << 12); /* Precharge PD PLL on */ 1493 debug("MR0 CS%d: 0x%08x\n", cs, (u32)MR(val, 0, 3, cs)); 1494 mmdc0->mdscr = MR(val, 0, 3, cs); 1495 /* ZQ calibration */ 1496 val = (1 << 10); 1497 mmdc0->mdscr = MR(val, 0, 4, cs); 1498 } 1499 1500 /* Step 10: Power down control and self-refresh */ 1501 mmdc0->mdpdc = (tcke & 0x7) << 16 | 1502 5 << 12 | /* PWDT_1: 256 cycles */ 1503 5 << 8 | /* PWDT_0: 256 cycles */ 1504 1 << 6 | /* BOTH_CS_PD */ 1505 (tcksrx & 0x7) << 3 | 1506 (tcksre & 0x7); 1507 if (!sysinfo->pd_fast_exit) 1508 mmdc0->mdpdc |= (1 << 7); /* SLOW_PD */ 1509 mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */ 1510 1511 /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */ 1512 val = 0xa1390003; 1513 mmdc0->mpzqhwctrl = val; 1514 if (sysinfo->dsize > 1) 1515 MMDC1(mpzqhwctrl, val); 1516 1517 /* Step 12: Configure and activate periodic refresh */ 1518 mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11); 1519 1520 /* Step 13: Deassert config request - init complete */ 1521 mmdc0->mdscr = 0x00000000; 1522 1523 /* wait for auto-ZQ calibration to complete */ 1524 mdelay(1); 1525 } 1526 1527 void mmdc_read_calibration(struct mx6_ddr_sysinfo const *sysinfo, 1528 struct mx6_mmdc_calibration *calib) 1529 { 1530 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 1531 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR; 1532 1533 calib->p0_mpwldectrl0 = readl(&mmdc0->mpwldectrl0); 1534 calib->p0_mpwldectrl1 = readl(&mmdc0->mpwldectrl1); 1535 calib->p0_mpdgctrl0 = readl(&mmdc0->mpdgctrl0); 1536 calib->p0_mpdgctrl1 = readl(&mmdc0->mpdgctrl1); 1537 calib->p0_mprddlctl = readl(&mmdc0->mprddlctl); 1538 calib->p0_mpwrdlctl = readl(&mmdc0->mpwrdlctl); 1539 1540 if (sysinfo->dsize == 2) { 1541 calib->p1_mpwldectrl0 = readl(&mmdc1->mpwldectrl0); 1542 calib->p1_mpwldectrl1 = readl(&mmdc1->mpwldectrl1); 1543 calib->p1_mpdgctrl0 = readl(&mmdc1->mpdgctrl0); 1544 calib->p1_mpdgctrl1 = readl(&mmdc1->mpdgctrl1); 1545 calib->p1_mprddlctl = readl(&mmdc1->mprddlctl); 1546 calib->p1_mpwrdlctl = readl(&mmdc1->mpwrdlctl); 1547 } 1548 } 1549 1550 void mx6_dram_cfg(const struct mx6_ddr_sysinfo *sysinfo, 1551 const struct mx6_mmdc_calibration *calib, 1552 const void *ddr_cfg) 1553 { 1554 if (sysinfo->ddr_type == DDR_TYPE_DDR3) { 1555 mx6_ddr3_cfg(sysinfo, calib, ddr_cfg); 1556 } else if (sysinfo->ddr_type == DDR_TYPE_LPDDR2) { 1557 mx6_lpddr2_cfg(sysinfo, calib, ddr_cfg); 1558 } else { 1559 puts("Unsupported ddr type\n"); 1560 hang(); 1561 } 1562 } 1563