1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2014 Gateworks Corporation 4 * Author: Tim Harvey <tharvey@gateworks.com> 5 */ 6 7 #include <common.h> 8 #include <linux/types.h> 9 #include <asm/arch/clock.h> 10 #include <asm/arch/mx6-ddr.h> 11 #include <asm/arch/sys_proto.h> 12 #include <asm/io.h> 13 #include <asm/types.h> 14 #include <wait_bit.h> 15 16 #if defined(CONFIG_MX6_DDRCAL) 17 static void reset_read_data_fifos(void) 18 { 19 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 20 21 /* Reset data FIFOs twice. */ 22 setbits_le32(&mmdc0->mpdgctrl0, 1 << 31); 23 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0); 24 25 setbits_le32(&mmdc0->mpdgctrl0, 1 << 31); 26 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0); 27 } 28 29 static void precharge_all(const bool cs0_enable, const bool cs1_enable) 30 { 31 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 32 33 /* 34 * Issue the Precharge-All command to the DDR device for both 35 * chip selects. Note, CON_REQ bit should also remain set. If 36 * only using one chip select, then precharge only the desired 37 * chip select. 38 */ 39 if (cs0_enable) { /* CS0 */ 40 writel(0x04008050, &mmdc0->mdscr); 41 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0); 42 } 43 44 if (cs1_enable) { /* CS1 */ 45 writel(0x04008058, &mmdc0->mdscr); 46 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0); 47 } 48 } 49 50 static void force_delay_measurement(int bus_size) 51 { 52 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 53 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR; 54 55 writel(0x800, &mmdc0->mpmur0); 56 if (bus_size == 0x2) 57 writel(0x800, &mmdc1->mpmur0); 58 } 59 60 static void modify_dg_result(u32 *reg_st0, u32 *reg_st1, u32 *reg_ctrl) 61 { 62 u32 dg_tmp_val, dg_dl_abs_offset, dg_hc_del, val_ctrl; 63 64 /* 65 * DQS gating absolute offset should be modified from reflecting 66 * (HW_DG_LOWx + HW_DG_UPx)/2 to reflecting (HW_DG_UPx - 0x80) 67 */ 68 69 val_ctrl = readl(reg_ctrl); 70 val_ctrl &= 0xf0000000; 71 72 dg_tmp_val = ((readl(reg_st0) & 0x07ff0000) >> 16) - 0xc0; 73 dg_dl_abs_offset = dg_tmp_val & 0x7f; 74 dg_hc_del = (dg_tmp_val & 0x780) << 1; 75 76 val_ctrl |= dg_dl_abs_offset + dg_hc_del; 77 78 dg_tmp_val = ((readl(reg_st1) & 0x07ff0000) >> 16) - 0xc0; 79 dg_dl_abs_offset = dg_tmp_val & 0x7f; 80 dg_hc_del = (dg_tmp_val & 0x780) << 1; 81 82 val_ctrl |= (dg_dl_abs_offset + dg_hc_del) << 16; 83 84 writel(val_ctrl, reg_ctrl); 85 } 86 87 static void correct_mpwldectr_result(void *reg) 88 { 89 /* Limit is 200/256 of CK, which is WL_HC_DELx | 0x48. */ 90 const unsigned int limit = 0x148; 91 u32 val = readl(reg); 92 u32 old = val; 93 94 if ((val & 0x17f) > limit) 95 val &= 0xffff << 16; 96 97 if (((val >> 16) & 0x17f) > limit) 98 val &= 0xffff; 99 100 if (old != val) 101 writel(val, reg); 102 } 103 104 int mmdc_do_write_level_calibration(struct mx6_ddr_sysinfo const *sysinfo) 105 { 106 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 107 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR; 108 u32 esdmisc_val, zq_val; 109 u32 errors = 0; 110 u32 ldectrl[4] = {0}; 111 u32 ddr_mr1 = 0x4; 112 u32 rwalat_max; 113 114 /* 115 * Stash old values in case calibration fails, 116 * we need to restore them 117 */ 118 ldectrl[0] = readl(&mmdc0->mpwldectrl0); 119 ldectrl[1] = readl(&mmdc0->mpwldectrl1); 120 if (sysinfo->dsize == 2) { 121 ldectrl[2] = readl(&mmdc1->mpwldectrl0); 122 ldectrl[3] = readl(&mmdc1->mpwldectrl1); 123 } 124 125 /* disable DDR logic power down timer */ 126 clrbits_le32(&mmdc0->mdpdc, 0xff00); 127 128 /* disable Adopt power down timer */ 129 setbits_le32(&mmdc0->mapsr, 0x1); 130 131 debug("Starting write leveling calibration.\n"); 132 133 /* 134 * 2. disable auto refresh and ZQ calibration 135 * before proceeding with Write Leveling calibration 136 */ 137 esdmisc_val = readl(&mmdc0->mdref); 138 writel(0x0000C000, &mmdc0->mdref); 139 zq_val = readl(&mmdc0->mpzqhwctrl); 140 writel(zq_val & ~0x3, &mmdc0->mpzqhwctrl); 141 142 /* 3. increase walat and ralat to maximum */ 143 rwalat_max = (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17); 144 setbits_le32(&mmdc0->mdmisc, rwalat_max); 145 if (sysinfo->dsize == 2) 146 setbits_le32(&mmdc1->mdmisc, rwalat_max); 147 /* 148 * 4 & 5. Configure the external DDR device to enter write-leveling 149 * mode through Load Mode Register command. 150 * Register setting: 151 * Bits[31:16] MR1 value (0x0080 write leveling enable) 152 * Bit[9] set WL_EN to enable MMDC DQS output 153 * Bits[6:4] set CMD bits for Load Mode Register programming 154 * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming 155 */ 156 writel(0x00808231, &mmdc0->mdscr); 157 158 /* 6. Activate automatic calibration by setting MPWLGCR[HW_WL_EN] */ 159 writel(0x00000001, &mmdc0->mpwlgcr); 160 161 /* 162 * 7. Upon completion of this process the MMDC de-asserts 163 * the MPWLGCR[HW_WL_EN] 164 */ 165 wait_for_bit_le32(&mmdc0->mpwlgcr, 1 << 0, 0, 100, 0); 166 167 /* 168 * 8. check for any errors: check both PHYs for x64 configuration, 169 * if x32, check only PHY0 170 */ 171 if (readl(&mmdc0->mpwlgcr) & 0x00000F00) 172 errors |= 1; 173 if (sysinfo->dsize == 2) 174 if (readl(&mmdc1->mpwlgcr) & 0x00000F00) 175 errors |= 2; 176 177 debug("Ending write leveling calibration. Error mask: 0x%x\n", errors); 178 179 /* check to see if cal failed */ 180 if ((readl(&mmdc0->mpwldectrl0) == 0x001F001F) && 181 (readl(&mmdc0->mpwldectrl1) == 0x001F001F) && 182 ((sysinfo->dsize < 2) || 183 ((readl(&mmdc1->mpwldectrl0) == 0x001F001F) && 184 (readl(&mmdc1->mpwldectrl1) == 0x001F001F)))) { 185 debug("Cal seems to have soft-failed due to memory not supporting write leveling on all channels. Restoring original write leveling values.\n"); 186 writel(ldectrl[0], &mmdc0->mpwldectrl0); 187 writel(ldectrl[1], &mmdc0->mpwldectrl1); 188 if (sysinfo->dsize == 2) { 189 writel(ldectrl[2], &mmdc1->mpwldectrl0); 190 writel(ldectrl[3], &mmdc1->mpwldectrl1); 191 } 192 errors |= 4; 193 } 194 195 correct_mpwldectr_result(&mmdc0->mpwldectrl0); 196 correct_mpwldectr_result(&mmdc0->mpwldectrl1); 197 if (sysinfo->dsize == 2) { 198 correct_mpwldectr_result(&mmdc1->mpwldectrl0); 199 correct_mpwldectr_result(&mmdc1->mpwldectrl1); 200 } 201 202 /* 203 * User should issue MRS command to exit write leveling mode 204 * through Load Mode Register command 205 * Register setting: 206 * Bits[31:16] MR1 value "ddr_mr1" value from initialization 207 * Bit[9] clear WL_EN to disable MMDC DQS output 208 * Bits[6:4] set CMD bits for Load Mode Register programming 209 * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming 210 */ 211 writel((ddr_mr1 << 16) + 0x8031, &mmdc0->mdscr); 212 213 /* re-enable auto refresh and zq cal */ 214 writel(esdmisc_val, &mmdc0->mdref); 215 writel(zq_val, &mmdc0->mpzqhwctrl); 216 217 debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08X\n", 218 readl(&mmdc0->mpwldectrl0)); 219 debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08X\n", 220 readl(&mmdc0->mpwldectrl1)); 221 if (sysinfo->dsize == 2) { 222 debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08X\n", 223 readl(&mmdc1->mpwldectrl0)); 224 debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08X\n", 225 readl(&mmdc1->mpwldectrl1)); 226 } 227 228 /* We must force a readback of these values, to get them to stick */ 229 readl(&mmdc0->mpwldectrl0); 230 readl(&mmdc0->mpwldectrl1); 231 if (sysinfo->dsize == 2) { 232 readl(&mmdc1->mpwldectrl0); 233 readl(&mmdc1->mpwldectrl1); 234 } 235 236 /* enable DDR logic power down timer: */ 237 setbits_le32(&mmdc0->mdpdc, 0x00005500); 238 239 /* Enable Adopt power down timer: */ 240 clrbits_le32(&mmdc0->mapsr, 0x1); 241 242 /* Clear CON_REQ */ 243 writel(0, &mmdc0->mdscr); 244 245 return errors; 246 } 247 248 int mmdc_do_dqs_calibration(struct mx6_ddr_sysinfo const *sysinfo) 249 { 250 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 251 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR; 252 struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux = 253 (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE; 254 bool cs0_enable; 255 bool cs1_enable; 256 bool cs0_enable_initial; 257 bool cs1_enable_initial; 258 u32 esdmisc_val; 259 u32 temp_ref; 260 u32 pddword = 0x00ffff00; /* best so far, place into MPPDCMPR1 */ 261 u32 errors = 0; 262 u32 initdelay = 0x40404040; 263 264 /* check to see which chip selects are enabled */ 265 cs0_enable_initial = readl(&mmdc0->mdctl) & 0x80000000; 266 cs1_enable_initial = readl(&mmdc0->mdctl) & 0x40000000; 267 268 /* disable DDR logic power down timer: */ 269 clrbits_le32(&mmdc0->mdpdc, 0xff00); 270 271 /* disable Adopt power down timer: */ 272 setbits_le32(&mmdc0->mapsr, 0x1); 273 274 /* set DQS pull ups */ 275 setbits_le32(&mx6_ddr_iomux->dram_sdqs0, 0x7000); 276 setbits_le32(&mx6_ddr_iomux->dram_sdqs1, 0x7000); 277 setbits_le32(&mx6_ddr_iomux->dram_sdqs2, 0x7000); 278 setbits_le32(&mx6_ddr_iomux->dram_sdqs3, 0x7000); 279 setbits_le32(&mx6_ddr_iomux->dram_sdqs4, 0x7000); 280 setbits_le32(&mx6_ddr_iomux->dram_sdqs5, 0x7000); 281 setbits_le32(&mx6_ddr_iomux->dram_sdqs6, 0x7000); 282 setbits_le32(&mx6_ddr_iomux->dram_sdqs7, 0x7000); 283 284 /* Save old RALAT and WALAT values */ 285 esdmisc_val = readl(&mmdc0->mdmisc); 286 287 setbits_le32(&mmdc0->mdmisc, 288 (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17)); 289 290 /* Disable auto refresh before proceeding with calibration */ 291 temp_ref = readl(&mmdc0->mdref); 292 writel(0x0000c000, &mmdc0->mdref); 293 294 /* 295 * Per the ref manual, issue one refresh cycle MDSCR[CMD]= 0x2, 296 * this also sets the CON_REQ bit. 297 */ 298 if (cs0_enable_initial) 299 writel(0x00008020, &mmdc0->mdscr); 300 if (cs1_enable_initial) 301 writel(0x00008028, &mmdc0->mdscr); 302 303 /* poll to make sure the con_ack bit was asserted */ 304 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0); 305 306 /* 307 * Check MDMISC register CALIB_PER_CS to see which CS calibration 308 * is targeted to (under normal cases, it should be cleared 309 * as this is the default value, indicating calibration is directed 310 * to CS0). 311 * Disable the other chip select not being target for calibration 312 * to avoid any potential issues. This will get re-enabled at end 313 * of calibration. 314 */ 315 if ((readl(&mmdc0->mdmisc) & 0x00100000) == 0) 316 clrbits_le32(&mmdc0->mdctl, 1 << 30); /* clear SDE_1 */ 317 else 318 clrbits_le32(&mmdc0->mdctl, 1 << 31); /* clear SDE_0 */ 319 320 /* 321 * Check to see which chip selects are now enabled for 322 * the remainder of the calibration. 323 */ 324 cs0_enable = readl(&mmdc0->mdctl) & 0x80000000; 325 cs1_enable = readl(&mmdc0->mdctl) & 0x40000000; 326 327 precharge_all(cs0_enable, cs1_enable); 328 329 /* Write the pre-defined value into MPPDCMPR1 */ 330 writel(pddword, &mmdc0->mppdcmpr1); 331 332 /* 333 * Issue a write access to the external DDR device by setting 334 * the bit SW_DUMMY_WR (bit 0) in the MPSWDAR0 and then poll 335 * this bit until it clears to indicate completion of the write access. 336 */ 337 setbits_le32(&mmdc0->mpswdar0, 1); 338 wait_for_bit_le32(&mmdc0->mpswdar0, 1 << 0, 0, 100, 0); 339 340 /* Set the RD_DL_ABS# bits to their default values 341 * (will be calibrated later in the read delay-line calibration). 342 * Both PHYs for x64 configuration, if x32, do only PHY0. 343 */ 344 writel(initdelay, &mmdc0->mprddlctl); 345 if (sysinfo->dsize == 0x2) 346 writel(initdelay, &mmdc1->mprddlctl); 347 348 /* Force a measurment, for previous delay setup to take effect. */ 349 force_delay_measurement(sysinfo->dsize); 350 351 /* 352 * *************************** 353 * Read DQS Gating calibration 354 * *************************** 355 */ 356 debug("Starting Read DQS Gating calibration.\n"); 357 358 /* 359 * Reset the read data FIFOs (two resets); only need to issue reset 360 * to PHY0 since in x64 mode, the reset will also go to PHY1. 361 */ 362 reset_read_data_fifos(); 363 364 /* 365 * Start the automatic read DQS gating calibration process by 366 * asserting MPDGCTRL0[HW_DG_EN] and MPDGCTRL0[DG_CMP_CYC] 367 * and then poll MPDGCTRL0[HW_DG_EN]] until this bit clears 368 * to indicate completion. 369 * Also, ensure that MPDGCTRL0[HW_DG_ERR] is clear to indicate 370 * no errors were seen during calibration. 371 */ 372 373 /* 374 * Set bit 30: chooses option to wait 32 cycles instead of 375 * 16 before comparing read data. 376 */ 377 setbits_le32(&mmdc0->mpdgctrl0, 1 << 30); 378 if (sysinfo->dsize == 2) 379 setbits_le32(&mmdc1->mpdgctrl0, 1 << 30); 380 381 /* Set bit 28 to start automatic read DQS gating calibration */ 382 setbits_le32(&mmdc0->mpdgctrl0, 5 << 28); 383 384 /* Poll for completion. MPDGCTRL0[HW_DG_EN] should be 0 */ 385 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 28, 0, 100, 0); 386 387 /* 388 * Check to see if any errors were encountered during calibration 389 * (check MPDGCTRL0[HW_DG_ERR]). 390 * Check both PHYs for x64 configuration, if x32, check only PHY0. 391 */ 392 if (readl(&mmdc0->mpdgctrl0) & 0x00001000) 393 errors |= 1; 394 395 if ((sysinfo->dsize == 0x2) && (readl(&mmdc1->mpdgctrl0) & 0x00001000)) 396 errors |= 2; 397 398 /* now disable mpdgctrl0[DG_CMP_CYC] */ 399 clrbits_le32(&mmdc0->mpdgctrl0, 1 << 30); 400 if (sysinfo->dsize == 2) 401 clrbits_le32(&mmdc1->mpdgctrl0, 1 << 30); 402 403 /* 404 * DQS gating absolute offset should be modified from 405 * reflecting (HW_DG_LOWx + HW_DG_UPx)/2 to 406 * reflecting (HW_DG_UPx - 0x80) 407 */ 408 modify_dg_result(&mmdc0->mpdghwst0, &mmdc0->mpdghwst1, 409 &mmdc0->mpdgctrl0); 410 modify_dg_result(&mmdc0->mpdghwst2, &mmdc0->mpdghwst3, 411 &mmdc0->mpdgctrl1); 412 if (sysinfo->dsize == 0x2) { 413 modify_dg_result(&mmdc1->mpdghwst0, &mmdc1->mpdghwst1, 414 &mmdc1->mpdgctrl0); 415 modify_dg_result(&mmdc1->mpdghwst2, &mmdc1->mpdghwst3, 416 &mmdc1->mpdgctrl1); 417 } 418 debug("Ending Read DQS Gating calibration. Error mask: 0x%x\n", errors); 419 420 /* 421 * ********************** 422 * Read Delay calibration 423 * ********************** 424 */ 425 debug("Starting Read Delay calibration.\n"); 426 427 reset_read_data_fifos(); 428 429 /* 430 * 4. Issue the Precharge-All command to the DDR device for both 431 * chip selects. If only using one chip select, then precharge 432 * only the desired chip select. 433 */ 434 precharge_all(cs0_enable, cs1_enable); 435 436 /* 437 * 9. Read delay-line calibration 438 * Start the automatic read calibration process by asserting 439 * MPRDDLHWCTL[HW_RD_DL_EN]. 440 */ 441 writel(0x00000030, &mmdc0->mprddlhwctl); 442 443 /* 444 * 10. poll for completion 445 * MMDC indicates that the write data calibration had finished by 446 * setting MPRDDLHWCTL[HW_RD_DL_EN] = 0. Also, ensure that 447 * no error bits were set. 448 */ 449 wait_for_bit_le32(&mmdc0->mprddlhwctl, 1 << 4, 0, 100, 0); 450 451 /* check both PHYs for x64 configuration, if x32, check only PHY0 */ 452 if (readl(&mmdc0->mprddlhwctl) & 0x0000000f) 453 errors |= 4; 454 455 if ((sysinfo->dsize == 0x2) && 456 (readl(&mmdc1->mprddlhwctl) & 0x0000000f)) 457 errors |= 8; 458 459 debug("Ending Read Delay calibration. Error mask: 0x%x\n", errors); 460 461 /* 462 * *********************** 463 * Write Delay Calibration 464 * *********************** 465 */ 466 debug("Starting Write Delay calibration.\n"); 467 468 reset_read_data_fifos(); 469 470 /* 471 * 4. Issue the Precharge-All command to the DDR device for both 472 * chip selects. If only using one chip select, then precharge 473 * only the desired chip select. 474 */ 475 precharge_all(cs0_enable, cs1_enable); 476 477 /* 478 * 8. Set the WR_DL_ABS# bits to their default values. 479 * Both PHYs for x64 configuration, if x32, do only PHY0. 480 */ 481 writel(initdelay, &mmdc0->mpwrdlctl); 482 if (sysinfo->dsize == 0x2) 483 writel(initdelay, &mmdc1->mpwrdlctl); 484 485 /* 486 * XXX This isn't in the manual. Force a measurement, 487 * for previous delay setup to effect. 488 */ 489 force_delay_measurement(sysinfo->dsize); 490 491 /* 492 * 9. 10. Start the automatic write calibration process 493 * by asserting MPWRDLHWCTL0[HW_WR_DL_EN]. 494 */ 495 writel(0x00000030, &mmdc0->mpwrdlhwctl); 496 497 /* 498 * Poll for completion. 499 * MMDC indicates that the write data calibration had finished 500 * by setting MPWRDLHWCTL[HW_WR_DL_EN] = 0. 501 * Also, ensure that no error bits were set. 502 */ 503 wait_for_bit_le32(&mmdc0->mpwrdlhwctl, 1 << 4, 0, 100, 0); 504 505 /* Check both PHYs for x64 configuration, if x32, check only PHY0 */ 506 if (readl(&mmdc0->mpwrdlhwctl) & 0x0000000f) 507 errors |= 16; 508 509 if ((sysinfo->dsize == 0x2) && 510 (readl(&mmdc1->mpwrdlhwctl) & 0x0000000f)) 511 errors |= 32; 512 513 debug("Ending Write Delay calibration. Error mask: 0x%x\n", errors); 514 515 reset_read_data_fifos(); 516 517 /* Enable DDR logic power down timer */ 518 setbits_le32(&mmdc0->mdpdc, 0x00005500); 519 520 /* Enable Adopt power down timer */ 521 clrbits_le32(&mmdc0->mapsr, 0x1); 522 523 /* Restore MDMISC value (RALAT, WALAT) to MMDCP1 */ 524 writel(esdmisc_val, &mmdc0->mdmisc); 525 526 /* Clear DQS pull ups */ 527 clrbits_le32(&mx6_ddr_iomux->dram_sdqs0, 0x7000); 528 clrbits_le32(&mx6_ddr_iomux->dram_sdqs1, 0x7000); 529 clrbits_le32(&mx6_ddr_iomux->dram_sdqs2, 0x7000); 530 clrbits_le32(&mx6_ddr_iomux->dram_sdqs3, 0x7000); 531 clrbits_le32(&mx6_ddr_iomux->dram_sdqs4, 0x7000); 532 clrbits_le32(&mx6_ddr_iomux->dram_sdqs5, 0x7000); 533 clrbits_le32(&mx6_ddr_iomux->dram_sdqs6, 0x7000); 534 clrbits_le32(&mx6_ddr_iomux->dram_sdqs7, 0x7000); 535 536 /* Re-enable SDE (chip selects) if they were set initially */ 537 if (cs1_enable_initial) 538 /* Set SDE_1 */ 539 setbits_le32(&mmdc0->mdctl, 1 << 30); 540 541 if (cs0_enable_initial) 542 /* Set SDE_0 */ 543 setbits_le32(&mmdc0->mdctl, 1 << 31); 544 545 /* Re-enable to auto refresh */ 546 writel(temp_ref, &mmdc0->mdref); 547 548 /* Clear the MDSCR (including the con_req bit) */ 549 writel(0x0, &mmdc0->mdscr); /* CS0 */ 550 551 /* Poll to make sure the con_ack bit is clear */ 552 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 0, 100, 0); 553 554 /* 555 * Print out the registers that were updated as a result 556 * of the calibration process. 557 */ 558 debug("MMDC registers updated from calibration\n"); 559 debug("Read DQS gating calibration:\n"); 560 debug("\tMPDGCTRL0 PHY0 = 0x%08X\n", readl(&mmdc0->mpdgctrl0)); 561 debug("\tMPDGCTRL1 PHY0 = 0x%08X\n", readl(&mmdc0->mpdgctrl1)); 562 if (sysinfo->dsize == 2) { 563 debug("\tMPDGCTRL0 PHY1 = 0x%08X\n", readl(&mmdc1->mpdgctrl0)); 564 debug("\tMPDGCTRL1 PHY1 = 0x%08X\n", readl(&mmdc1->mpdgctrl1)); 565 } 566 debug("Read calibration:\n"); 567 debug("\tMPRDDLCTL PHY0 = 0x%08X\n", readl(&mmdc0->mprddlctl)); 568 if (sysinfo->dsize == 2) 569 debug("\tMPRDDLCTL PHY1 = 0x%08X\n", readl(&mmdc1->mprddlctl)); 570 debug("Write calibration:\n"); 571 debug("\tMPWRDLCTL PHY0 = 0x%08X\n", readl(&mmdc0->mpwrdlctl)); 572 if (sysinfo->dsize == 2) 573 debug("\tMPWRDLCTL PHY1 = 0x%08X\n", readl(&mmdc1->mpwrdlctl)); 574 575 /* 576 * Registers below are for debugging purposes. These print out 577 * the upper and lower boundaries captured during 578 * read DQS gating calibration. 579 */ 580 debug("Status registers bounds for read DQS gating:\n"); 581 debug("\tMPDGHWST0 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst0)); 582 debug("\tMPDGHWST1 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst1)); 583 debug("\tMPDGHWST2 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst2)); 584 debug("\tMPDGHWST3 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst3)); 585 if (sysinfo->dsize == 2) { 586 debug("\tMPDGHWST0 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst0)); 587 debug("\tMPDGHWST1 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst1)); 588 debug("\tMPDGHWST2 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst2)); 589 debug("\tMPDGHWST3 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst3)); 590 } 591 592 debug("Final do_dqs_calibration error mask: 0x%x\n", errors); 593 594 return errors; 595 } 596 #endif 597 598 #if defined(CONFIG_MX6SX) 599 /* Configure MX6SX mmdc iomux */ 600 void mx6sx_dram_iocfg(unsigned width, 601 const struct mx6sx_iomux_ddr_regs *ddr, 602 const struct mx6sx_iomux_grp_regs *grp) 603 { 604 struct mx6sx_iomux_ddr_regs *mx6_ddr_iomux; 605 struct mx6sx_iomux_grp_regs *mx6_grp_iomux; 606 607 mx6_ddr_iomux = (struct mx6sx_iomux_ddr_regs *)MX6SX_IOM_DDR_BASE; 608 mx6_grp_iomux = (struct mx6sx_iomux_grp_regs *)MX6SX_IOM_GRP_BASE; 609 610 /* DDR IO TYPE */ 611 writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type); 612 writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke); 613 614 /* CLOCK */ 615 writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0); 616 617 /* ADDRESS */ 618 writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas); 619 writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras); 620 writel(grp->grp_addds, &mx6_grp_iomux->grp_addds); 621 622 /* Control */ 623 writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset); 624 writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2); 625 writel(ddr->dram_sdcke0, &mx6_ddr_iomux->dram_sdcke0); 626 writel(ddr->dram_sdcke1, &mx6_ddr_iomux->dram_sdcke1); 627 writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0); 628 writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1); 629 writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds); 630 631 /* Data Strobes */ 632 writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl); 633 writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0); 634 writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1); 635 if (width >= 32) { 636 writel(ddr->dram_sdqs2, &mx6_ddr_iomux->dram_sdqs2); 637 writel(ddr->dram_sdqs3, &mx6_ddr_iomux->dram_sdqs3); 638 } 639 640 /* Data */ 641 writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode); 642 writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds); 643 writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds); 644 if (width >= 32) { 645 writel(grp->grp_b2ds, &mx6_grp_iomux->grp_b2ds); 646 writel(grp->grp_b3ds, &mx6_grp_iomux->grp_b3ds); 647 } 648 writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0); 649 writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1); 650 if (width >= 32) { 651 writel(ddr->dram_dqm2, &mx6_ddr_iomux->dram_dqm2); 652 writel(ddr->dram_dqm3, &mx6_ddr_iomux->dram_dqm3); 653 } 654 } 655 #endif 656 657 #if defined(CONFIG_MX6UL) || defined(CONFIG_MX6ULL) 658 void mx6ul_dram_iocfg(unsigned width, 659 const struct mx6ul_iomux_ddr_regs *ddr, 660 const struct mx6ul_iomux_grp_regs *grp) 661 { 662 struct mx6ul_iomux_ddr_regs *mx6_ddr_iomux; 663 struct mx6ul_iomux_grp_regs *mx6_grp_iomux; 664 665 mx6_ddr_iomux = (struct mx6ul_iomux_ddr_regs *)MX6UL_IOM_DDR_BASE; 666 mx6_grp_iomux = (struct mx6ul_iomux_grp_regs *)MX6UL_IOM_GRP_BASE; 667 668 /* DDR IO TYPE */ 669 writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type); 670 writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke); 671 672 /* CLOCK */ 673 writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0); 674 675 /* ADDRESS */ 676 writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas); 677 writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras); 678 writel(grp->grp_addds, &mx6_grp_iomux->grp_addds); 679 680 /* Control */ 681 writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset); 682 writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2); 683 writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0); 684 writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1); 685 writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds); 686 687 /* Data Strobes */ 688 writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl); 689 writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0); 690 writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1); 691 692 /* Data */ 693 writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode); 694 writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds); 695 writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds); 696 writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0); 697 writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1); 698 } 699 #endif 700 701 #if defined(CONFIG_MX6SL) 702 void mx6sl_dram_iocfg(unsigned width, 703 const struct mx6sl_iomux_ddr_regs *ddr, 704 const struct mx6sl_iomux_grp_regs *grp) 705 { 706 struct mx6sl_iomux_ddr_regs *mx6_ddr_iomux; 707 struct mx6sl_iomux_grp_regs *mx6_grp_iomux; 708 709 mx6_ddr_iomux = (struct mx6sl_iomux_ddr_regs *)MX6SL_IOM_DDR_BASE; 710 mx6_grp_iomux = (struct mx6sl_iomux_grp_regs *)MX6SL_IOM_GRP_BASE; 711 712 /* DDR IO TYPE */ 713 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type; 714 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke; 715 716 /* CLOCK */ 717 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0; 718 719 /* ADDRESS */ 720 mx6_ddr_iomux->dram_cas = ddr->dram_cas; 721 mx6_ddr_iomux->dram_ras = ddr->dram_ras; 722 mx6_grp_iomux->grp_addds = grp->grp_addds; 723 724 /* Control */ 725 mx6_ddr_iomux->dram_reset = ddr->dram_reset; 726 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2; 727 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds; 728 729 /* Data Strobes */ 730 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl; 731 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0; 732 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1; 733 if (width >= 32) { 734 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2; 735 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3; 736 } 737 738 /* Data */ 739 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode; 740 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds; 741 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds; 742 if (width >= 32) { 743 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds; 744 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds; 745 } 746 747 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0; 748 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1; 749 if (width >= 32) { 750 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2; 751 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3; 752 } 753 } 754 #endif 755 756 #if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6Q) || defined(CONFIG_MX6D) 757 /* Configure MX6DQ mmdc iomux */ 758 void mx6dq_dram_iocfg(unsigned width, 759 const struct mx6dq_iomux_ddr_regs *ddr, 760 const struct mx6dq_iomux_grp_regs *grp) 761 { 762 volatile struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux; 763 volatile struct mx6dq_iomux_grp_regs *mx6_grp_iomux; 764 765 mx6_ddr_iomux = (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE; 766 mx6_grp_iomux = (struct mx6dq_iomux_grp_regs *)MX6DQ_IOM_GRP_BASE; 767 768 /* DDR IO Type */ 769 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type; 770 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke; 771 772 /* Clock */ 773 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0; 774 mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1; 775 776 /* Address */ 777 mx6_ddr_iomux->dram_cas = ddr->dram_cas; 778 mx6_ddr_iomux->dram_ras = ddr->dram_ras; 779 mx6_grp_iomux->grp_addds = grp->grp_addds; 780 781 /* Control */ 782 mx6_ddr_iomux->dram_reset = ddr->dram_reset; 783 mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0; 784 mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1; 785 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2; 786 mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0; 787 mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1; 788 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds; 789 790 /* Data Strobes */ 791 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl; 792 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0; 793 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1; 794 if (width >= 32) { 795 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2; 796 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3; 797 } 798 if (width >= 64) { 799 mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4; 800 mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5; 801 mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6; 802 mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7; 803 } 804 805 /* Data */ 806 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode; 807 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds; 808 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds; 809 if (width >= 32) { 810 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds; 811 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds; 812 } 813 if (width >= 64) { 814 mx6_grp_iomux->grp_b4ds = grp->grp_b4ds; 815 mx6_grp_iomux->grp_b5ds = grp->grp_b5ds; 816 mx6_grp_iomux->grp_b6ds = grp->grp_b6ds; 817 mx6_grp_iomux->grp_b7ds = grp->grp_b7ds; 818 } 819 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0; 820 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1; 821 if (width >= 32) { 822 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2; 823 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3; 824 } 825 if (width >= 64) { 826 mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4; 827 mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5; 828 mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6; 829 mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7; 830 } 831 } 832 #endif 833 834 #if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6DL) || defined(CONFIG_MX6S) 835 /* Configure MX6SDL mmdc iomux */ 836 void mx6sdl_dram_iocfg(unsigned width, 837 const struct mx6sdl_iomux_ddr_regs *ddr, 838 const struct mx6sdl_iomux_grp_regs *grp) 839 { 840 volatile struct mx6sdl_iomux_ddr_regs *mx6_ddr_iomux; 841 volatile struct mx6sdl_iomux_grp_regs *mx6_grp_iomux; 842 843 mx6_ddr_iomux = (struct mx6sdl_iomux_ddr_regs *)MX6SDL_IOM_DDR_BASE; 844 mx6_grp_iomux = (struct mx6sdl_iomux_grp_regs *)MX6SDL_IOM_GRP_BASE; 845 846 /* DDR IO Type */ 847 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type; 848 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke; 849 850 /* Clock */ 851 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0; 852 mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1; 853 854 /* Address */ 855 mx6_ddr_iomux->dram_cas = ddr->dram_cas; 856 mx6_ddr_iomux->dram_ras = ddr->dram_ras; 857 mx6_grp_iomux->grp_addds = grp->grp_addds; 858 859 /* Control */ 860 mx6_ddr_iomux->dram_reset = ddr->dram_reset; 861 mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0; 862 mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1; 863 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2; 864 mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0; 865 mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1; 866 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds; 867 868 /* Data Strobes */ 869 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl; 870 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0; 871 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1; 872 if (width >= 32) { 873 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2; 874 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3; 875 } 876 if (width >= 64) { 877 mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4; 878 mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5; 879 mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6; 880 mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7; 881 } 882 883 /* Data */ 884 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode; 885 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds; 886 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds; 887 if (width >= 32) { 888 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds; 889 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds; 890 } 891 if (width >= 64) { 892 mx6_grp_iomux->grp_b4ds = grp->grp_b4ds; 893 mx6_grp_iomux->grp_b5ds = grp->grp_b5ds; 894 mx6_grp_iomux->grp_b6ds = grp->grp_b6ds; 895 mx6_grp_iomux->grp_b7ds = grp->grp_b7ds; 896 } 897 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0; 898 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1; 899 if (width >= 32) { 900 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2; 901 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3; 902 } 903 if (width >= 64) { 904 mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4; 905 mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5; 906 mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6; 907 mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7; 908 } 909 } 910 #endif 911 912 /* 913 * Configure mx6 mmdc registers based on: 914 * - board-specific memory configuration 915 * - board-specific calibration data 916 * - ddr3/lpddr2 chip details 917 * 918 * The various calculations here are derived from the Freescale 919 * 1. i.Mx6DQSDL DDR3 Script Aid spreadsheet (DOC-94917) designed to generate 920 * MMDC configuration registers based on memory system and memory chip 921 * parameters. 922 * 923 * 2. i.Mx6SL LPDDR2 Script Aid spreadsheet V0.04 designed to generate MMDC 924 * configuration registers based on memory system and memory chip 925 * parameters. 926 * 927 * The defaults here are those which were specified in the spreadsheet. 928 * For details on each register, refer to the IMX6DQRM and/or IMX6SDLRM 929 * and/or IMX6SLRM section titled MMDC initialization. 930 */ 931 #define MR(val, ba, cmd, cs1) \ 932 ((val << 16) | (1 << 15) | (cmd << 4) | (cs1 << 3) | ba) 933 #define MMDC1(entry, value) do { \ 934 if (!is_mx6sx() && !is_mx6ul() && !is_mx6ull() && !is_mx6sl()) \ 935 mmdc1->entry = value; \ 936 } while (0) 937 938 /* 939 * According JESD209-2B-LPDDR2: Table 103 940 * WL: write latency 941 */ 942 static int lpddr2_wl(uint32_t mem_speed) 943 { 944 switch (mem_speed) { 945 case 1066: 946 case 933: 947 return 4; 948 case 800: 949 return 3; 950 case 677: 951 case 533: 952 return 2; 953 case 400: 954 case 333: 955 return 1; 956 default: 957 puts("invalid memory speed\n"); 958 hang(); 959 } 960 961 return 0; 962 } 963 964 /* 965 * According JESD209-2B-LPDDR2: Table 103 966 * RL: read latency 967 */ 968 static int lpddr2_rl(uint32_t mem_speed) 969 { 970 switch (mem_speed) { 971 case 1066: 972 return 8; 973 case 933: 974 return 7; 975 case 800: 976 return 6; 977 case 677: 978 return 5; 979 case 533: 980 return 4; 981 case 400: 982 case 333: 983 return 3; 984 default: 985 puts("invalid memory speed\n"); 986 hang(); 987 } 988 989 return 0; 990 } 991 992 void mx6_lpddr2_cfg(const struct mx6_ddr_sysinfo *sysinfo, 993 const struct mx6_mmdc_calibration *calib, 994 const struct mx6_lpddr2_cfg *lpddr2_cfg) 995 { 996 volatile struct mmdc_p_regs *mmdc0; 997 u32 val; 998 u8 tcke, tcksrx, tcksre, trrd; 999 u8 twl, txp, tfaw, tcl; 1000 u16 tras, twr, tmrd, trtp, twtr, trfc, txsr; 1001 u16 trcd_lp, trppb_lp, trpab_lp, trc_lp; 1002 u16 cs0_end; 1003 u8 coladdr; 1004 int clkper; /* clock period in picoseconds */ 1005 int clock; /* clock freq in mHz */ 1006 int cs; 1007 1008 /* only support 16/32 bits */ 1009 if (sysinfo->dsize > 1) 1010 hang(); 1011 1012 mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 1013 1014 clock = mxc_get_clock(MXC_DDR_CLK) / 1000000U; 1015 clkper = (1000 * 1000) / clock; /* pico seconds */ 1016 1017 twl = lpddr2_wl(lpddr2_cfg->mem_speed) - 1; 1018 1019 /* LPDDR2-S2 and LPDDR2-S4 have the same tRFC value. */ 1020 switch (lpddr2_cfg->density) { 1021 case 1: 1022 case 2: 1023 case 4: 1024 trfc = DIV_ROUND_UP(130000, clkper) - 1; 1025 txsr = DIV_ROUND_UP(140000, clkper) - 1; 1026 break; 1027 case 8: 1028 trfc = DIV_ROUND_UP(210000, clkper) - 1; 1029 txsr = DIV_ROUND_UP(220000, clkper) - 1; 1030 break; 1031 default: 1032 /* 1033 * 64Mb, 128Mb, 256Mb, 512Mb are not supported currently. 1034 */ 1035 hang(); 1036 break; 1037 } 1038 /* 1039 * txpdll, txpr, taonpd and taofpd are not relevant in LPDDR2 mode, 1040 * set them to 0. */ 1041 txp = DIV_ROUND_UP(7500, clkper) - 1; 1042 tcke = 3; 1043 if (lpddr2_cfg->mem_speed == 333) 1044 tfaw = DIV_ROUND_UP(60000, clkper) - 1; 1045 else 1046 tfaw = DIV_ROUND_UP(50000, clkper) - 1; 1047 trrd = DIV_ROUND_UP(10000, clkper) - 1; 1048 1049 /* tckesr for LPDDR2 */ 1050 tcksre = DIV_ROUND_UP(15000, clkper); 1051 tcksrx = tcksre; 1052 twr = DIV_ROUND_UP(15000, clkper) - 1; 1053 /* 1054 * tMRR: 2, tMRW: 5 1055 * tMRD should be set to max(tMRR, tMRW) 1056 */ 1057 tmrd = 5; 1058 tras = DIV_ROUND_UP(lpddr2_cfg->trasmin, clkper / 10) - 1; 1059 /* LPDDR2 mode use tRCD_LP filed in MDCFG3. */ 1060 trcd_lp = DIV_ROUND_UP(lpddr2_cfg->trcd_lp, clkper / 10) - 1; 1061 trc_lp = DIV_ROUND_UP(lpddr2_cfg->trasmin + lpddr2_cfg->trppb_lp, 1062 clkper / 10) - 1; 1063 trppb_lp = DIV_ROUND_UP(lpddr2_cfg->trppb_lp, clkper / 10) - 1; 1064 trpab_lp = DIV_ROUND_UP(lpddr2_cfg->trpab_lp, clkper / 10) - 1; 1065 /* To LPDDR2, CL in MDCFG0 refers to RL */ 1066 tcl = lpddr2_rl(lpddr2_cfg->mem_speed) - 3; 1067 twtr = DIV_ROUND_UP(7500, clkper) - 1; 1068 trtp = DIV_ROUND_UP(7500, clkper) - 1; 1069 1070 cs0_end = 4 * sysinfo->cs_density - 1; 1071 1072 debug("density:%d Gb (%d Gb per chip)\n", 1073 sysinfo->cs_density, lpddr2_cfg->density); 1074 debug("clock: %dMHz (%d ps)\n", clock, clkper); 1075 debug("memspd:%d\n", lpddr2_cfg->mem_speed); 1076 debug("trcd_lp=%d\n", trcd_lp); 1077 debug("trppb_lp=%d\n", trppb_lp); 1078 debug("trpab_lp=%d\n", trpab_lp); 1079 debug("trc_lp=%d\n", trc_lp); 1080 debug("tcke=%d\n", tcke); 1081 debug("tcksrx=%d\n", tcksrx); 1082 debug("tcksre=%d\n", tcksre); 1083 debug("trfc=%d\n", trfc); 1084 debug("txsr=%d\n", txsr); 1085 debug("txp=%d\n", txp); 1086 debug("tfaw=%d\n", tfaw); 1087 debug("tcl=%d\n", tcl); 1088 debug("tras=%d\n", tras); 1089 debug("twr=%d\n", twr); 1090 debug("tmrd=%d\n", tmrd); 1091 debug("twl=%d\n", twl); 1092 debug("trtp=%d\n", trtp); 1093 debug("twtr=%d\n", twtr); 1094 debug("trrd=%d\n", trrd); 1095 debug("cs0_end=%d\n", cs0_end); 1096 debug("ncs=%d\n", sysinfo->ncs); 1097 1098 /* 1099 * board-specific configuration: 1100 * These values are determined empirically and vary per board layout 1101 */ 1102 mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0; 1103 mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1; 1104 mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0; 1105 mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1; 1106 mmdc0->mprddlctl = calib->p0_mprddlctl; 1107 mmdc0->mpwrdlctl = calib->p0_mpwrdlctl; 1108 mmdc0->mpzqlp2ctl = calib->mpzqlp2ctl; 1109 1110 /* Read data DQ Byte0-3 delay */ 1111 mmdc0->mprddqby0dl = 0x33333333; 1112 mmdc0->mprddqby1dl = 0x33333333; 1113 if (sysinfo->dsize > 0) { 1114 mmdc0->mprddqby2dl = 0x33333333; 1115 mmdc0->mprddqby3dl = 0x33333333; 1116 } 1117 1118 /* Write data DQ Byte0-3 delay */ 1119 mmdc0->mpwrdqby0dl = 0xf3333333; 1120 mmdc0->mpwrdqby1dl = 0xf3333333; 1121 if (sysinfo->dsize > 0) { 1122 mmdc0->mpwrdqby2dl = 0xf3333333; 1123 mmdc0->mpwrdqby3dl = 0xf3333333; 1124 } 1125 1126 /* 1127 * In LPDDR2 mode this register should be cleared, 1128 * so no termination will be activated. 1129 */ 1130 mmdc0->mpodtctrl = 0; 1131 1132 /* complete calibration */ 1133 val = (1 << 11); /* Force measurement on delay-lines */ 1134 mmdc0->mpmur0 = val; 1135 1136 /* Step 1: configuration request */ 1137 mmdc0->mdscr = (u32)(1 << 15); /* config request */ 1138 1139 /* Step 2: Timing configuration */ 1140 mmdc0->mdcfg0 = (trfc << 24) | (txsr << 16) | (txp << 13) | 1141 (tfaw << 4) | tcl; 1142 mmdc0->mdcfg1 = (tras << 16) | (twr << 9) | (tmrd << 5) | twl; 1143 mmdc0->mdcfg2 = (trtp << 6) | (twtr << 3) | trrd; 1144 mmdc0->mdcfg3lp = (trc_lp << 16) | (trcd_lp << 8) | 1145 (trppb_lp << 4) | trpab_lp; 1146 mmdc0->mdotc = 0; 1147 1148 mmdc0->mdasp = cs0_end; /* CS addressing */ 1149 1150 /* Step 3: Configure DDR type */ 1151 mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) | 1152 (sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) | 1153 (sysinfo->ralat << 6) | (1 << 3); 1154 1155 /* Step 4: Configure delay while leaving reset */ 1156 mmdc0->mdor = (sysinfo->sde_to_rst << 8) | 1157 (sysinfo->rst_to_cke << 0); 1158 1159 /* Step 5: Configure DDR physical parameters (density and burst len) */ 1160 coladdr = lpddr2_cfg->coladdr; 1161 if (lpddr2_cfg->coladdr == 8) /* 8-bit COL is 0x3 */ 1162 coladdr += 4; 1163 else if (lpddr2_cfg->coladdr == 12) /* 12-bit COL is 0x4 */ 1164 coladdr += 1; 1165 mmdc0->mdctl = (lpddr2_cfg->rowaddr - 11) << 24 | /* ROW */ 1166 (coladdr - 9) << 20 | /* COL */ 1167 (0 << 19) | /* Burst Length = 4 for LPDDR2 */ 1168 (sysinfo->dsize << 16); /* DDR data bus size */ 1169 1170 /* Step 6: Perform ZQ calibration */ 1171 val = 0xa1390003; /* one-time HW ZQ calib */ 1172 mmdc0->mpzqhwctrl = val; 1173 1174 /* Step 7: Enable MMDC with desired chip select */ 1175 mmdc0->mdctl |= (1 << 31) | /* SDE_0 for CS0 */ 1176 ((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */ 1177 1178 /* Step 8: Write Mode Registers to Init LPDDR2 devices */ 1179 for (cs = 0; cs < sysinfo->ncs; cs++) { 1180 /* MR63: reset */ 1181 mmdc0->mdscr = MR(63, 0, 3, cs); 1182 /* MR10: calibration, 1183 * 0xff is calibration command after intilization. 1184 */ 1185 val = 0xA | (0xff << 8); 1186 mmdc0->mdscr = MR(val, 0, 3, cs); 1187 /* MR1 */ 1188 val = 0x1 | (0x82 << 8); 1189 mmdc0->mdscr = MR(val, 0, 3, cs); 1190 /* MR2 */ 1191 val = 0x2 | (0x04 << 8); 1192 mmdc0->mdscr = MR(val, 0, 3, cs); 1193 /* MR3 */ 1194 val = 0x3 | (0x02 << 8); 1195 mmdc0->mdscr = MR(val, 0, 3, cs); 1196 } 1197 1198 /* Step 10: Power down control and self-refresh */ 1199 mmdc0->mdpdc = (tcke & 0x7) << 16 | 1200 5 << 12 | /* PWDT_1: 256 cycles */ 1201 5 << 8 | /* PWDT_0: 256 cycles */ 1202 1 << 6 | /* BOTH_CS_PD */ 1203 (tcksrx & 0x7) << 3 | 1204 (tcksre & 0x7); 1205 mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */ 1206 1207 /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */ 1208 val = 0xa1310003; 1209 mmdc0->mpzqhwctrl = val; 1210 1211 /* Step 12: Configure and activate periodic refresh */ 1212 mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11); 1213 1214 /* Step 13: Deassert config request - init complete */ 1215 mmdc0->mdscr = 0x00000000; 1216 1217 /* wait for auto-ZQ calibration to complete */ 1218 mdelay(1); 1219 } 1220 1221 void mx6_ddr3_cfg(const struct mx6_ddr_sysinfo *sysinfo, 1222 const struct mx6_mmdc_calibration *calib, 1223 const struct mx6_ddr3_cfg *ddr3_cfg) 1224 { 1225 volatile struct mmdc_p_regs *mmdc0; 1226 volatile struct mmdc_p_regs *mmdc1; 1227 u32 val; 1228 u8 tcke, tcksrx, tcksre, txpdll, taofpd, taonpd, trrd; 1229 u8 todtlon, taxpd, tanpd, tcwl, txp, tfaw, tcl; 1230 u8 todt_idle_off = 0x4; /* from DDR3 Script Aid spreadsheet */ 1231 u16 trcd, trc, tras, twr, tmrd, trtp, trp, twtr, trfc, txs, txpr; 1232 u16 cs0_end; 1233 u16 tdllk = 0x1ff; /* DLL locking time: 512 cycles (JEDEC DDR3) */ 1234 u8 coladdr; 1235 int clkper; /* clock period in picoseconds */ 1236 int clock; /* clock freq in MHz */ 1237 int cs; 1238 u16 mem_speed = ddr3_cfg->mem_speed; 1239 1240 mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 1241 if (!is_mx6sx() && !is_mx6ul() && !is_mx6ull() && !is_mx6sl()) 1242 mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR; 1243 1244 /* Limit mem_speed for MX6D/MX6Q */ 1245 if (is_mx6dq() || is_mx6dqp()) { 1246 if (mem_speed > 1066) 1247 mem_speed = 1066; /* 1066 MT/s */ 1248 1249 tcwl = 4; 1250 } 1251 /* Limit mem_speed for MX6S/MX6DL */ 1252 else { 1253 if (mem_speed > 800) 1254 mem_speed = 800; /* 800 MT/s */ 1255 1256 tcwl = 3; 1257 } 1258 1259 clock = mem_speed / 2; 1260 /* 1261 * Data rate of 1066 MT/s requires 533 MHz DDR3 clock, but MX6D/Q supports 1262 * up to 528 MHz, so reduce the clock to fit chip specs 1263 */ 1264 if (is_mx6dq() || is_mx6dqp()) { 1265 if (clock > 528) 1266 clock = 528; /* 528 MHz */ 1267 } 1268 1269 clkper = (1000 * 1000) / clock; /* pico seconds */ 1270 todtlon = tcwl; 1271 taxpd = tcwl; 1272 tanpd = tcwl; 1273 1274 switch (ddr3_cfg->density) { 1275 case 1: /* 1Gb per chip */ 1276 trfc = DIV_ROUND_UP(110000, clkper) - 1; 1277 txs = DIV_ROUND_UP(120000, clkper) - 1; 1278 break; 1279 case 2: /* 2Gb per chip */ 1280 trfc = DIV_ROUND_UP(160000, clkper) - 1; 1281 txs = DIV_ROUND_UP(170000, clkper) - 1; 1282 break; 1283 case 4: /* 4Gb per chip */ 1284 trfc = DIV_ROUND_UP(260000, clkper) - 1; 1285 txs = DIV_ROUND_UP(270000, clkper) - 1; 1286 break; 1287 case 8: /* 8Gb per chip */ 1288 trfc = DIV_ROUND_UP(350000, clkper) - 1; 1289 txs = DIV_ROUND_UP(360000, clkper) - 1; 1290 break; 1291 default: 1292 /* invalid density */ 1293 puts("invalid chip density\n"); 1294 hang(); 1295 break; 1296 } 1297 txpr = txs; 1298 1299 switch (mem_speed) { 1300 case 800: 1301 txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1; 1302 tcke = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1; 1303 if (ddr3_cfg->pagesz == 1) { 1304 tfaw = DIV_ROUND_UP(40000, clkper) - 1; 1305 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1; 1306 } else { 1307 tfaw = DIV_ROUND_UP(50000, clkper) - 1; 1308 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1; 1309 } 1310 break; 1311 case 1066: 1312 txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1; 1313 tcke = DIV_ROUND_UP(max(3 * clkper, 5625), clkper) - 1; 1314 if (ddr3_cfg->pagesz == 1) { 1315 tfaw = DIV_ROUND_UP(37500, clkper) - 1; 1316 trrd = DIV_ROUND_UP(max(4 * clkper, 7500), clkper) - 1; 1317 } else { 1318 tfaw = DIV_ROUND_UP(50000, clkper) - 1; 1319 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1; 1320 } 1321 break; 1322 default: 1323 puts("invalid memory speed\n"); 1324 hang(); 1325 break; 1326 } 1327 txpdll = DIV_ROUND_UP(max(10 * clkper, 24000), clkper) - 1; 1328 tcksre = DIV_ROUND_UP(max(5 * clkper, 10000), clkper); 1329 taonpd = DIV_ROUND_UP(2000, clkper) - 1; 1330 tcksrx = tcksre; 1331 taofpd = taonpd; 1332 twr = DIV_ROUND_UP(15000, clkper) - 1; 1333 tmrd = DIV_ROUND_UP(max(12 * clkper, 15000), clkper) - 1; 1334 trc = DIV_ROUND_UP(ddr3_cfg->trcmin, clkper / 10) - 1; 1335 tras = DIV_ROUND_UP(ddr3_cfg->trasmin, clkper / 10) - 1; 1336 tcl = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 3; 1337 trp = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 1; 1338 twtr = ROUND(max(4 * clkper, 7500) / clkper, 1) - 1; 1339 trcd = trp; 1340 trtp = twtr; 1341 cs0_end = 4 * sysinfo->cs_density - 1; 1342 1343 debug("density:%d Gb (%d Gb per chip)\n", 1344 sysinfo->cs_density, ddr3_cfg->density); 1345 debug("clock: %dMHz (%d ps)\n", clock, clkper); 1346 debug("memspd:%d\n", mem_speed); 1347 debug("tcke=%d\n", tcke); 1348 debug("tcksrx=%d\n", tcksrx); 1349 debug("tcksre=%d\n", tcksre); 1350 debug("taofpd=%d\n", taofpd); 1351 debug("taonpd=%d\n", taonpd); 1352 debug("todtlon=%d\n", todtlon); 1353 debug("tanpd=%d\n", tanpd); 1354 debug("taxpd=%d\n", taxpd); 1355 debug("trfc=%d\n", trfc); 1356 debug("txs=%d\n", txs); 1357 debug("txp=%d\n", txp); 1358 debug("txpdll=%d\n", txpdll); 1359 debug("tfaw=%d\n", tfaw); 1360 debug("tcl=%d\n", tcl); 1361 debug("trcd=%d\n", trcd); 1362 debug("trp=%d\n", trp); 1363 debug("trc=%d\n", trc); 1364 debug("tras=%d\n", tras); 1365 debug("twr=%d\n", twr); 1366 debug("tmrd=%d\n", tmrd); 1367 debug("tcwl=%d\n", tcwl); 1368 debug("tdllk=%d\n", tdllk); 1369 debug("trtp=%d\n", trtp); 1370 debug("twtr=%d\n", twtr); 1371 debug("trrd=%d\n", trrd); 1372 debug("txpr=%d\n", txpr); 1373 debug("cs0_end=%d\n", cs0_end); 1374 debug("ncs=%d\n", sysinfo->ncs); 1375 debug("Rtt_wr=%d\n", sysinfo->rtt_wr); 1376 debug("Rtt_nom=%d\n", sysinfo->rtt_nom); 1377 debug("SRT=%d\n", ddr3_cfg->SRT); 1378 debug("twr=%d\n", twr); 1379 1380 /* 1381 * board-specific configuration: 1382 * These values are determined empirically and vary per board layout 1383 * see: 1384 * appnote, ddr3 spreadsheet 1385 */ 1386 mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0; 1387 mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1; 1388 mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0; 1389 mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1; 1390 mmdc0->mprddlctl = calib->p0_mprddlctl; 1391 mmdc0->mpwrdlctl = calib->p0_mpwrdlctl; 1392 if (sysinfo->dsize > 1) { 1393 MMDC1(mpwldectrl0, calib->p1_mpwldectrl0); 1394 MMDC1(mpwldectrl1, calib->p1_mpwldectrl1); 1395 MMDC1(mpdgctrl0, calib->p1_mpdgctrl0); 1396 MMDC1(mpdgctrl1, calib->p1_mpdgctrl1); 1397 MMDC1(mprddlctl, calib->p1_mprddlctl); 1398 MMDC1(mpwrdlctl, calib->p1_mpwrdlctl); 1399 } 1400 1401 /* Read data DQ Byte0-3 delay */ 1402 mmdc0->mprddqby0dl = 0x33333333; 1403 mmdc0->mprddqby1dl = 0x33333333; 1404 if (sysinfo->dsize > 0) { 1405 mmdc0->mprddqby2dl = 0x33333333; 1406 mmdc0->mprddqby3dl = 0x33333333; 1407 } 1408 1409 if (sysinfo->dsize > 1) { 1410 MMDC1(mprddqby0dl, 0x33333333); 1411 MMDC1(mprddqby1dl, 0x33333333); 1412 MMDC1(mprddqby2dl, 0x33333333); 1413 MMDC1(mprddqby3dl, 0x33333333); 1414 } 1415 1416 /* MMDC Termination: rtt_nom:2 RZQ/2(120ohm), rtt_nom:1 RZQ/4(60ohm) */ 1417 val = (sysinfo->rtt_nom == 2) ? 0x00011117 : 0x00022227; 1418 mmdc0->mpodtctrl = val; 1419 if (sysinfo->dsize > 1) 1420 MMDC1(mpodtctrl, val); 1421 1422 /* complete calibration */ 1423 val = (1 << 11); /* Force measurement on delay-lines */ 1424 mmdc0->mpmur0 = val; 1425 if (sysinfo->dsize > 1) 1426 MMDC1(mpmur0, val); 1427 1428 /* Step 1: configuration request */ 1429 mmdc0->mdscr = (u32)(1 << 15); /* config request */ 1430 1431 /* Step 2: Timing configuration */ 1432 mmdc0->mdcfg0 = (trfc << 24) | (txs << 16) | (txp << 13) | 1433 (txpdll << 9) | (tfaw << 4) | tcl; 1434 mmdc0->mdcfg1 = (trcd << 29) | (trp << 26) | (trc << 21) | 1435 (tras << 16) | (1 << 15) /* trpa */ | 1436 (twr << 9) | (tmrd << 5) | tcwl; 1437 mmdc0->mdcfg2 = (tdllk << 16) | (trtp << 6) | (twtr << 3) | trrd; 1438 mmdc0->mdotc = (taofpd << 27) | (taonpd << 24) | (tanpd << 20) | 1439 (taxpd << 16) | (todtlon << 12) | (todt_idle_off << 4); 1440 mmdc0->mdasp = cs0_end; /* CS addressing */ 1441 1442 /* Step 3: Configure DDR type */ 1443 mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) | 1444 (sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) | 1445 (sysinfo->ralat << 6); 1446 1447 /* Step 4: Configure delay while leaving reset */ 1448 mmdc0->mdor = (txpr << 16) | (sysinfo->sde_to_rst << 8) | 1449 (sysinfo->rst_to_cke << 0); 1450 1451 /* Step 5: Configure DDR physical parameters (density and burst len) */ 1452 coladdr = ddr3_cfg->coladdr; 1453 if (ddr3_cfg->coladdr == 8) /* 8-bit COL is 0x3 */ 1454 coladdr += 4; 1455 else if (ddr3_cfg->coladdr == 12) /* 12-bit COL is 0x4 */ 1456 coladdr += 1; 1457 mmdc0->mdctl = (ddr3_cfg->rowaddr - 11) << 24 | /* ROW */ 1458 (coladdr - 9) << 20 | /* COL */ 1459 (1 << 19) | /* Burst Length = 8 for DDR3 */ 1460 (sysinfo->dsize << 16); /* DDR data bus size */ 1461 1462 /* Step 6: Perform ZQ calibration */ 1463 val = 0xa1390001; /* one-time HW ZQ calib */ 1464 mmdc0->mpzqhwctrl = val; 1465 if (sysinfo->dsize > 1) 1466 MMDC1(mpzqhwctrl, val); 1467 1468 /* Step 7: Enable MMDC with desired chip select */ 1469 mmdc0->mdctl |= (1 << 31) | /* SDE_0 for CS0 */ 1470 ((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */ 1471 1472 /* Step 8: Write Mode Registers to Init DDR3 devices */ 1473 for (cs = 0; cs < sysinfo->ncs; cs++) { 1474 /* MR2 */ 1475 val = (sysinfo->rtt_wr & 3) << 9 | (ddr3_cfg->SRT & 1) << 7 | 1476 ((tcwl - 3) & 3) << 3; 1477 debug("MR2 CS%d: 0x%08x\n", cs, (u32)MR(val, 2, 3, cs)); 1478 mmdc0->mdscr = MR(val, 2, 3, cs); 1479 /* MR3 */ 1480 debug("MR3 CS%d: 0x%08x\n", cs, (u32)MR(0, 3, 3, cs)); 1481 mmdc0->mdscr = MR(0, 3, 3, cs); 1482 /* MR1 */ 1483 val = ((sysinfo->rtt_nom & 1) ? 1 : 0) << 2 | 1484 ((sysinfo->rtt_nom & 2) ? 1 : 0) << 6; 1485 debug("MR1 CS%d: 0x%08x\n", cs, (u32)MR(val, 1, 3, cs)); 1486 mmdc0->mdscr = MR(val, 1, 3, cs); 1487 /* MR0 */ 1488 val = ((tcl - 1) << 4) | /* CAS */ 1489 (1 << 8) | /* DLL Reset */ 1490 ((twr - 3) << 9) | /* Write Recovery */ 1491 (sysinfo->pd_fast_exit << 12); /* Precharge PD PLL on */ 1492 debug("MR0 CS%d: 0x%08x\n", cs, (u32)MR(val, 0, 3, cs)); 1493 mmdc0->mdscr = MR(val, 0, 3, cs); 1494 /* ZQ calibration */ 1495 val = (1 << 10); 1496 mmdc0->mdscr = MR(val, 0, 4, cs); 1497 } 1498 1499 /* Step 10: Power down control and self-refresh */ 1500 mmdc0->mdpdc = (tcke & 0x7) << 16 | 1501 5 << 12 | /* PWDT_1: 256 cycles */ 1502 5 << 8 | /* PWDT_0: 256 cycles */ 1503 1 << 6 | /* BOTH_CS_PD */ 1504 (tcksrx & 0x7) << 3 | 1505 (tcksre & 0x7); 1506 if (!sysinfo->pd_fast_exit) 1507 mmdc0->mdpdc |= (1 << 7); /* SLOW_PD */ 1508 mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */ 1509 1510 /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */ 1511 val = 0xa1390003; 1512 mmdc0->mpzqhwctrl = val; 1513 if (sysinfo->dsize > 1) 1514 MMDC1(mpzqhwctrl, val); 1515 1516 /* Step 12: Configure and activate periodic refresh */ 1517 mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11); 1518 1519 /* Step 13: Deassert config request - init complete */ 1520 mmdc0->mdscr = 0x00000000; 1521 1522 /* wait for auto-ZQ calibration to complete */ 1523 mdelay(1); 1524 } 1525 1526 void mmdc_read_calibration(struct mx6_ddr_sysinfo const *sysinfo, 1527 struct mx6_mmdc_calibration *calib) 1528 { 1529 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR; 1530 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR; 1531 1532 calib->p0_mpwldectrl0 = readl(&mmdc0->mpwldectrl0); 1533 calib->p0_mpwldectrl1 = readl(&mmdc0->mpwldectrl1); 1534 calib->p0_mpdgctrl0 = readl(&mmdc0->mpdgctrl0); 1535 calib->p0_mpdgctrl1 = readl(&mmdc0->mpdgctrl1); 1536 calib->p0_mprddlctl = readl(&mmdc0->mprddlctl); 1537 calib->p0_mpwrdlctl = readl(&mmdc0->mpwrdlctl); 1538 1539 if (sysinfo->dsize == 2) { 1540 calib->p1_mpwldectrl0 = readl(&mmdc1->mpwldectrl0); 1541 calib->p1_mpwldectrl1 = readl(&mmdc1->mpwldectrl1); 1542 calib->p1_mpdgctrl0 = readl(&mmdc1->mpdgctrl0); 1543 calib->p1_mpdgctrl1 = readl(&mmdc1->mpdgctrl1); 1544 calib->p1_mprddlctl = readl(&mmdc1->mprddlctl); 1545 calib->p1_mpwrdlctl = readl(&mmdc1->mpwrdlctl); 1546 } 1547 } 1548 1549 void mx6_dram_cfg(const struct mx6_ddr_sysinfo *sysinfo, 1550 const struct mx6_mmdc_calibration *calib, 1551 const void *ddr_cfg) 1552 { 1553 if (sysinfo->ddr_type == DDR_TYPE_DDR3) { 1554 mx6_ddr3_cfg(sysinfo, calib, ddr_cfg); 1555 } else if (sysinfo->ddr_type == DDR_TYPE_LPDDR2) { 1556 mx6_lpddr2_cfg(sysinfo, calib, ddr_cfg); 1557 } else { 1558 puts("Unsupported ddr type\n"); 1559 hang(); 1560 } 1561 } 1562