1 // SPDX-License-Identifier: BSD-3-Clause 2 /* 3 * Copyright Altera Corporation (C) 2012-2015 4 */ 5 6 #include <common.h> 7 #include <asm/io.h> 8 #include <asm/arch/sdram.h> 9 #include <errno.h> 10 #include "sequencer.h" 11 12 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs = 13 (struct socfpga_sdr_rw_load_manager *) 14 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800); 15 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs = 16 (struct socfpga_sdr_rw_load_jump_manager *) 17 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00); 18 static struct socfpga_sdr_reg_file *sdr_reg_file = 19 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS; 20 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr = 21 (struct socfpga_sdr_scc_mgr *) 22 (SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00); 23 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd = 24 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS; 25 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg = 26 (struct socfpga_phy_mgr_cfg *) 27 (SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40); 28 static struct socfpga_data_mgr *data_mgr = 29 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS; 30 static struct socfpga_sdr_ctrl *sdr_ctrl = 31 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS; 32 33 const struct socfpga_sdram_rw_mgr_config *rwcfg; 34 const struct socfpga_sdram_io_config *iocfg; 35 const struct socfpga_sdram_misc_config *misccfg; 36 37 #define DELTA_D 1 38 39 /* 40 * In order to reduce ROM size, most of the selectable calibration steps are 41 * decided at compile time based on the user's calibration mode selection, 42 * as captured by the STATIC_CALIB_STEPS selection below. 43 * 44 * However, to support simulation-time selection of fast simulation mode, where 45 * we skip everything except the bare minimum, we need a few of the steps to 46 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the 47 * check, which is based on the rtl-supplied value, or we dynamically compute 48 * the value to use based on the dynamically-chosen calibration mode 49 */ 50 51 #define DLEVEL 0 52 #define STATIC_IN_RTL_SIM 0 53 #define STATIC_SKIP_DELAY_LOOPS 0 54 55 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \ 56 STATIC_SKIP_DELAY_LOOPS) 57 58 /* calibration steps requested by the rtl */ 59 static u16 dyn_calib_steps; 60 61 /* 62 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option 63 * instead of static, we use boolean logic to select between 64 * non-skip and skip values 65 * 66 * The mask is set to include all bits when not-skipping, but is 67 * zero when skipping 68 */ 69 70 static u16 skip_delay_mask; /* mask off bits when skipping/not-skipping */ 71 72 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \ 73 ((non_skip_value) & skip_delay_mask) 74 75 static struct gbl_type *gbl; 76 static struct param_type *param; 77 78 static void set_failing_group_stage(u32 group, u32 stage, 79 u32 substage) 80 { 81 /* 82 * Only set the global stage if there was not been any other 83 * failing group 84 */ 85 if (gbl->error_stage == CAL_STAGE_NIL) { 86 gbl->error_substage = substage; 87 gbl->error_stage = stage; 88 gbl->error_group = group; 89 } 90 } 91 92 static void reg_file_set_group(u16 set_group) 93 { 94 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16); 95 } 96 97 static void reg_file_set_stage(u8 set_stage) 98 { 99 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff); 100 } 101 102 static void reg_file_set_sub_stage(u8 set_sub_stage) 103 { 104 set_sub_stage &= 0xff; 105 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8); 106 } 107 108 /** 109 * phy_mgr_initialize() - Initialize PHY Manager 110 * 111 * Initialize PHY Manager. 112 */ 113 static void phy_mgr_initialize(void) 114 { 115 u32 ratio; 116 117 debug("%s:%d\n", __func__, __LINE__); 118 /* Calibration has control over path to memory */ 119 /* 120 * In Hard PHY this is a 2-bit control: 121 * 0: AFI Mux Select 122 * 1: DDIO Mux Select 123 */ 124 writel(0x3, &phy_mgr_cfg->mux_sel); 125 126 /* USER memory clock is not stable we begin initialization */ 127 writel(0, &phy_mgr_cfg->reset_mem_stbl); 128 129 /* USER calibration status all set to zero */ 130 writel(0, &phy_mgr_cfg->cal_status); 131 132 writel(0, &phy_mgr_cfg->cal_debug_info); 133 134 /* Init params only if we do NOT skip calibration. */ 135 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) 136 return; 137 138 ratio = rwcfg->mem_dq_per_read_dqs / 139 rwcfg->mem_virtual_groups_per_read_dqs; 140 param->read_correct_mask_vg = (1 << ratio) - 1; 141 param->write_correct_mask_vg = (1 << ratio) - 1; 142 param->read_correct_mask = (1 << rwcfg->mem_dq_per_read_dqs) - 1; 143 param->write_correct_mask = (1 << rwcfg->mem_dq_per_write_dqs) - 1; 144 } 145 146 /** 147 * set_rank_and_odt_mask() - Set Rank and ODT mask 148 * @rank: Rank mask 149 * @odt_mode: ODT mode, OFF or READ_WRITE 150 * 151 * Set Rank and ODT mask (On-Die Termination). 152 */ 153 static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode) 154 { 155 u32 odt_mask_0 = 0; 156 u32 odt_mask_1 = 0; 157 u32 cs_and_odt_mask; 158 159 if (odt_mode == RW_MGR_ODT_MODE_OFF) { 160 odt_mask_0 = 0x0; 161 odt_mask_1 = 0x0; 162 } else { /* RW_MGR_ODT_MODE_READ_WRITE */ 163 switch (rwcfg->mem_number_of_ranks) { 164 case 1: /* 1 Rank */ 165 /* Read: ODT = 0 ; Write: ODT = 1 */ 166 odt_mask_0 = 0x0; 167 odt_mask_1 = 0x1; 168 break; 169 case 2: /* 2 Ranks */ 170 if (rwcfg->mem_number_of_cs_per_dimm == 1) { 171 /* 172 * - Dual-Slot , Single-Rank (1 CS per DIMM) 173 * OR 174 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM) 175 * 176 * Since MEM_NUMBER_OF_RANKS is 2, they 177 * are both single rank with 2 CS each 178 * (special for RDIMM). 179 * 180 * Read: Turn on ODT on the opposite rank 181 * Write: Turn on ODT on all ranks 182 */ 183 odt_mask_0 = 0x3 & ~(1 << rank); 184 odt_mask_1 = 0x3; 185 } else { 186 /* 187 * - Single-Slot , Dual-Rank (2 CS per DIMM) 188 * 189 * Read: Turn on ODT off on all ranks 190 * Write: Turn on ODT on active rank 191 */ 192 odt_mask_0 = 0x0; 193 odt_mask_1 = 0x3 & (1 << rank); 194 } 195 break; 196 case 4: /* 4 Ranks */ 197 /* Read: 198 * ----------+-----------------------+ 199 * | ODT | 200 * Read From +-----------------------+ 201 * Rank | 3 | 2 | 1 | 0 | 202 * ----------+-----+-----+-----+-----+ 203 * 0 | 0 | 1 | 0 | 0 | 204 * 1 | 1 | 0 | 0 | 0 | 205 * 2 | 0 | 0 | 0 | 1 | 206 * 3 | 0 | 0 | 1 | 0 | 207 * ----------+-----+-----+-----+-----+ 208 * 209 * Write: 210 * ----------+-----------------------+ 211 * | ODT | 212 * Write To +-----------------------+ 213 * Rank | 3 | 2 | 1 | 0 | 214 * ----------+-----+-----+-----+-----+ 215 * 0 | 0 | 1 | 0 | 1 | 216 * 1 | 1 | 0 | 1 | 0 | 217 * 2 | 0 | 1 | 0 | 1 | 218 * 3 | 1 | 0 | 1 | 0 | 219 * ----------+-----+-----+-----+-----+ 220 */ 221 switch (rank) { 222 case 0: 223 odt_mask_0 = 0x4; 224 odt_mask_1 = 0x5; 225 break; 226 case 1: 227 odt_mask_0 = 0x8; 228 odt_mask_1 = 0xA; 229 break; 230 case 2: 231 odt_mask_0 = 0x1; 232 odt_mask_1 = 0x5; 233 break; 234 case 3: 235 odt_mask_0 = 0x2; 236 odt_mask_1 = 0xA; 237 break; 238 } 239 break; 240 } 241 } 242 243 cs_and_odt_mask = (0xFF & ~(1 << rank)) | 244 ((0xFF & odt_mask_0) << 8) | 245 ((0xFF & odt_mask_1) << 16); 246 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS | 247 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); 248 } 249 250 /** 251 * scc_mgr_set() - Set SCC Manager register 252 * @off: Base offset in SCC Manager space 253 * @grp: Read/Write group 254 * @val: Value to be set 255 * 256 * This function sets the SCC Manager (Scan Chain Control Manager) register. 257 */ 258 static void scc_mgr_set(u32 off, u32 grp, u32 val) 259 { 260 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2)); 261 } 262 263 /** 264 * scc_mgr_initialize() - Initialize SCC Manager registers 265 * 266 * Initialize SCC Manager registers. 267 */ 268 static void scc_mgr_initialize(void) 269 { 270 /* 271 * Clear register file for HPS. 16 (2^4) is the size of the 272 * full register file in the scc mgr: 273 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS + 274 * MEM_IF_READ_DQS_WIDTH - 1); 275 */ 276 int i; 277 278 for (i = 0; i < 16; i++) { 279 debug_cond(DLEVEL >= 1, "%s:%d: Clearing SCC RFILE index %u\n", 280 __func__, __LINE__, i); 281 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, i, 0); 282 } 283 } 284 285 static void scc_mgr_set_dqdqs_output_phase(u32 write_group, u32 phase) 286 { 287 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase); 288 } 289 290 static void scc_mgr_set_dqs_bus_in_delay(u32 read_group, u32 delay) 291 { 292 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay); 293 } 294 295 static void scc_mgr_set_dqs_en_phase(u32 read_group, u32 phase) 296 { 297 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase); 298 } 299 300 static void scc_mgr_set_dqs_en_delay(u32 read_group, u32 delay) 301 { 302 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay); 303 } 304 305 static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay) 306 { 307 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay); 308 } 309 310 static void scc_mgr_set_dqs_io_in_delay(u32 delay) 311 { 312 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs, 313 delay); 314 } 315 316 static void scc_mgr_set_dm_in_delay(u32 dm, u32 delay) 317 { 318 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, 319 rwcfg->mem_dq_per_write_dqs + 1 + dm, 320 delay); 321 } 322 323 static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay) 324 { 325 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay); 326 } 327 328 static void scc_mgr_set_dqs_out1_delay(u32 delay) 329 { 330 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs, 331 delay); 332 } 333 334 static void scc_mgr_set_dm_out1_delay(u32 dm, u32 delay) 335 { 336 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, 337 rwcfg->mem_dq_per_write_dqs + 1 + dm, 338 delay); 339 } 340 341 /* load up dqs config settings */ 342 static void scc_mgr_load_dqs(u32 dqs) 343 { 344 writel(dqs, &sdr_scc_mgr->dqs_ena); 345 } 346 347 /* load up dqs io config settings */ 348 static void scc_mgr_load_dqs_io(void) 349 { 350 writel(0, &sdr_scc_mgr->dqs_io_ena); 351 } 352 353 /* load up dq config settings */ 354 static void scc_mgr_load_dq(u32 dq_in_group) 355 { 356 writel(dq_in_group, &sdr_scc_mgr->dq_ena); 357 } 358 359 /* load up dm config settings */ 360 static void scc_mgr_load_dm(u32 dm) 361 { 362 writel(dm, &sdr_scc_mgr->dm_ena); 363 } 364 365 /** 366 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks 367 * @off: Base offset in SCC Manager space 368 * @grp: Read/Write group 369 * @val: Value to be set 370 * @update: If non-zero, trigger SCC Manager update for all ranks 371 * 372 * This function sets the SCC Manager (Scan Chain Control Manager) register 373 * and optionally triggers the SCC update for all ranks. 374 */ 375 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val, 376 const int update) 377 { 378 u32 r; 379 380 for (r = 0; r < rwcfg->mem_number_of_ranks; 381 r += NUM_RANKS_PER_SHADOW_REG) { 382 scc_mgr_set(off, grp, val); 383 384 if (update || (r == 0)) { 385 writel(grp, &sdr_scc_mgr->dqs_ena); 386 writel(0, &sdr_scc_mgr->update); 387 } 388 } 389 } 390 391 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase) 392 { 393 /* 394 * USER although the h/w doesn't support different phases per 395 * shadow register, for simplicity our scc manager modeling 396 * keeps different phase settings per shadow reg, and it's 397 * important for us to keep them in sync to match h/w. 398 * for efficiency, the scan chain update should occur only 399 * once to sr0. 400 */ 401 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET, 402 read_group, phase, 0); 403 } 404 405 static void scc_mgr_set_dqdqs_output_phase_all_ranks(u32 write_group, 406 u32 phase) 407 { 408 /* 409 * USER although the h/w doesn't support different phases per 410 * shadow register, for simplicity our scc manager modeling 411 * keeps different phase settings per shadow reg, and it's 412 * important for us to keep them in sync to match h/w. 413 * for efficiency, the scan chain update should occur only 414 * once to sr0. 415 */ 416 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, 417 write_group, phase, 0); 418 } 419 420 static void scc_mgr_set_dqs_en_delay_all_ranks(u32 read_group, 421 u32 delay) 422 { 423 /* 424 * In shadow register mode, the T11 settings are stored in 425 * registers in the core, which are updated by the DQS_ENA 426 * signals. Not issuing the SCC_MGR_UPD command allows us to 427 * save lots of rank switching overhead, by calling 428 * select_shadow_regs_for_update with update_scan_chains 429 * set to 0. 430 */ 431 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET, 432 read_group, delay, 1); 433 } 434 435 /** 436 * scc_mgr_set_oct_out1_delay() - Set OCT output delay 437 * @write_group: Write group 438 * @delay: Delay value 439 * 440 * This function sets the OCT output delay in SCC manager. 441 */ 442 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay) 443 { 444 const int ratio = rwcfg->mem_if_read_dqs_width / 445 rwcfg->mem_if_write_dqs_width; 446 const int base = write_group * ratio; 447 int i; 448 /* 449 * Load the setting in the SCC manager 450 * Although OCT affects only write data, the OCT delay is controlled 451 * by the DQS logic block which is instantiated once per read group. 452 * For protocols where a write group consists of multiple read groups, 453 * the setting must be set multiple times. 454 */ 455 for (i = 0; i < ratio; i++) 456 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay); 457 } 458 459 /** 460 * scc_mgr_set_hhp_extras() - Set HHP extras. 461 * 462 * Load the fixed setting in the SCC manager HHP extras. 463 */ 464 static void scc_mgr_set_hhp_extras(void) 465 { 466 /* 467 * Load the fixed setting in the SCC manager 468 * bits: 0:0 = 1'b1 - DQS bypass 469 * bits: 1:1 = 1'b1 - DQ bypass 470 * bits: 4:2 = 3'b001 - rfifo_mode 471 * bits: 6:5 = 2'b01 - rfifo clock_select 472 * bits: 7:7 = 1'b0 - separate gating from ungating setting 473 * bits: 8:8 = 1'b0 - separate OE from Output delay setting 474 */ 475 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) | 476 (1 << 2) | (1 << 1) | (1 << 0); 477 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | 478 SCC_MGR_HHP_GLOBALS_OFFSET | 479 SCC_MGR_HHP_EXTRAS_OFFSET; 480 481 debug_cond(DLEVEL >= 1, "%s:%d Setting HHP Extras\n", 482 __func__, __LINE__); 483 writel(value, addr); 484 debug_cond(DLEVEL >= 1, "%s:%d Done Setting HHP Extras\n", 485 __func__, __LINE__); 486 } 487 488 /** 489 * scc_mgr_zero_all() - Zero all DQS config 490 * 491 * Zero all DQS config. 492 */ 493 static void scc_mgr_zero_all(void) 494 { 495 int i, r; 496 497 /* 498 * USER Zero all DQS config settings, across all groups and all 499 * shadow registers 500 */ 501 for (r = 0; r < rwcfg->mem_number_of_ranks; 502 r += NUM_RANKS_PER_SHADOW_REG) { 503 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) { 504 /* 505 * The phases actually don't exist on a per-rank basis, 506 * but there's no harm updating them several times, so 507 * let's keep the code simple. 508 */ 509 scc_mgr_set_dqs_bus_in_delay(i, iocfg->dqs_in_reserve); 510 scc_mgr_set_dqs_en_phase(i, 0); 511 scc_mgr_set_dqs_en_delay(i, 0); 512 } 513 514 for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) { 515 scc_mgr_set_dqdqs_output_phase(i, 0); 516 /* Arria V/Cyclone V don't have out2. */ 517 scc_mgr_set_oct_out1_delay(i, iocfg->dqs_out_reserve); 518 } 519 } 520 521 /* Multicast to all DQS group enables. */ 522 writel(0xff, &sdr_scc_mgr->dqs_ena); 523 writel(0, &sdr_scc_mgr->update); 524 } 525 526 /** 527 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update 528 * @write_group: Write group 529 * 530 * Set bypass mode and trigger SCC update. 531 */ 532 static void scc_set_bypass_mode(const u32 write_group) 533 { 534 /* Multicast to all DQ enables. */ 535 writel(0xff, &sdr_scc_mgr->dq_ena); 536 writel(0xff, &sdr_scc_mgr->dm_ena); 537 538 /* Update current DQS IO enable. */ 539 writel(0, &sdr_scc_mgr->dqs_io_ena); 540 541 /* Update the DQS logic. */ 542 writel(write_group, &sdr_scc_mgr->dqs_ena); 543 544 /* Hit update. */ 545 writel(0, &sdr_scc_mgr->update); 546 } 547 548 /** 549 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group 550 * @write_group: Write group 551 * 552 * Load DQS settings for Write Group, do not trigger SCC update. 553 */ 554 static void scc_mgr_load_dqs_for_write_group(const u32 write_group) 555 { 556 const int ratio = rwcfg->mem_if_read_dqs_width / 557 rwcfg->mem_if_write_dqs_width; 558 const int base = write_group * ratio; 559 int i; 560 /* 561 * Load the setting in the SCC manager 562 * Although OCT affects only write data, the OCT delay is controlled 563 * by the DQS logic block which is instantiated once per read group. 564 * For protocols where a write group consists of multiple read groups, 565 * the setting must be set multiple times. 566 */ 567 for (i = 0; i < ratio; i++) 568 writel(base + i, &sdr_scc_mgr->dqs_ena); 569 } 570 571 /** 572 * scc_mgr_zero_group() - Zero all configs for a group 573 * 574 * Zero DQ, DM, DQS and OCT configs for a group. 575 */ 576 static void scc_mgr_zero_group(const u32 write_group, const int out_only) 577 { 578 int i, r; 579 580 for (r = 0; r < rwcfg->mem_number_of_ranks; 581 r += NUM_RANKS_PER_SHADOW_REG) { 582 /* Zero all DQ config settings. */ 583 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) { 584 scc_mgr_set_dq_out1_delay(i, 0); 585 if (!out_only) 586 scc_mgr_set_dq_in_delay(i, 0); 587 } 588 589 /* Multicast to all DQ enables. */ 590 writel(0xff, &sdr_scc_mgr->dq_ena); 591 592 /* Zero all DM config settings. */ 593 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { 594 if (!out_only) 595 scc_mgr_set_dm_in_delay(i, 0); 596 scc_mgr_set_dm_out1_delay(i, 0); 597 } 598 599 /* Multicast to all DM enables. */ 600 writel(0xff, &sdr_scc_mgr->dm_ena); 601 602 /* Zero all DQS IO settings. */ 603 if (!out_only) 604 scc_mgr_set_dqs_io_in_delay(0); 605 606 /* Arria V/Cyclone V don't have out2. */ 607 scc_mgr_set_dqs_out1_delay(iocfg->dqs_out_reserve); 608 scc_mgr_set_oct_out1_delay(write_group, iocfg->dqs_out_reserve); 609 scc_mgr_load_dqs_for_write_group(write_group); 610 611 /* Multicast to all DQS IO enables (only 1 in total). */ 612 writel(0, &sdr_scc_mgr->dqs_io_ena); 613 614 /* Hit update to zero everything. */ 615 writel(0, &sdr_scc_mgr->update); 616 } 617 } 618 619 /* 620 * apply and load a particular input delay for the DQ pins in a group 621 * group_bgn is the index of the first dq pin (in the write group) 622 */ 623 static void scc_mgr_apply_group_dq_in_delay(u32 group_bgn, u32 delay) 624 { 625 u32 i, p; 626 627 for (i = 0, p = group_bgn; i < rwcfg->mem_dq_per_read_dqs; i++, p++) { 628 scc_mgr_set_dq_in_delay(p, delay); 629 scc_mgr_load_dq(p); 630 } 631 } 632 633 /** 634 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group 635 * @delay: Delay value 636 * 637 * Apply and load a particular output delay for the DQ pins in a group. 638 */ 639 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay) 640 { 641 int i; 642 643 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) { 644 scc_mgr_set_dq_out1_delay(i, delay); 645 scc_mgr_load_dq(i); 646 } 647 } 648 649 /* apply and load a particular output delay for the DM pins in a group */ 650 static void scc_mgr_apply_group_dm_out1_delay(u32 delay1) 651 { 652 u32 i; 653 654 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { 655 scc_mgr_set_dm_out1_delay(i, delay1); 656 scc_mgr_load_dm(i); 657 } 658 } 659 660 661 /* apply and load delay on both DQS and OCT out1 */ 662 static void scc_mgr_apply_group_dqs_io_and_oct_out1(u32 write_group, 663 u32 delay) 664 { 665 scc_mgr_set_dqs_out1_delay(delay); 666 scc_mgr_load_dqs_io(); 667 668 scc_mgr_set_oct_out1_delay(write_group, delay); 669 scc_mgr_load_dqs_for_write_group(write_group); 670 } 671 672 /** 673 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT 674 * @write_group: Write group 675 * @delay: Delay value 676 * 677 * Apply a delay to the entire output side: DQ, DM, DQS, OCT. 678 */ 679 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group, 680 const u32 delay) 681 { 682 u32 i, new_delay; 683 684 /* DQ shift */ 685 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) 686 scc_mgr_load_dq(i); 687 688 /* DM shift */ 689 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) 690 scc_mgr_load_dm(i); 691 692 /* DQS shift */ 693 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay; 694 if (new_delay > iocfg->io_out2_delay_max) { 695 debug_cond(DLEVEL >= 1, 696 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n", 697 __func__, __LINE__, write_group, delay, new_delay, 698 iocfg->io_out2_delay_max, 699 new_delay - iocfg->io_out2_delay_max); 700 new_delay -= iocfg->io_out2_delay_max; 701 scc_mgr_set_dqs_out1_delay(new_delay); 702 } 703 704 scc_mgr_load_dqs_io(); 705 706 /* OCT shift */ 707 new_delay = READ_SCC_OCT_OUT2_DELAY + delay; 708 if (new_delay > iocfg->io_out2_delay_max) { 709 debug_cond(DLEVEL >= 1, 710 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n", 711 __func__, __LINE__, write_group, delay, 712 new_delay, iocfg->io_out2_delay_max, 713 new_delay - iocfg->io_out2_delay_max); 714 new_delay -= iocfg->io_out2_delay_max; 715 scc_mgr_set_oct_out1_delay(write_group, new_delay); 716 } 717 718 scc_mgr_load_dqs_for_write_group(write_group); 719 } 720 721 /** 722 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks 723 * @write_group: Write group 724 * @delay: Delay value 725 * 726 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks. 727 */ 728 static void 729 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group, 730 const u32 delay) 731 { 732 int r; 733 734 for (r = 0; r < rwcfg->mem_number_of_ranks; 735 r += NUM_RANKS_PER_SHADOW_REG) { 736 scc_mgr_apply_group_all_out_delay_add(write_group, delay); 737 writel(0, &sdr_scc_mgr->update); 738 } 739 } 740 741 /** 742 * set_jump_as_return() - Return instruction optimization 743 * 744 * Optimization used to recover some slots in ddr3 inst_rom could be 745 * applied to other protocols if we wanted to 746 */ 747 static void set_jump_as_return(void) 748 { 749 /* 750 * To save space, we replace return with jump to special shared 751 * RETURN instruction so we set the counter to large value so that 752 * we always jump. 753 */ 754 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0); 755 writel(rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 756 } 757 758 /** 759 * delay_for_n_mem_clocks() - Delay for N memory clocks 760 * @clocks: Length of the delay 761 * 762 * Delay for N memory clocks. 763 */ 764 static void delay_for_n_mem_clocks(const u32 clocks) 765 { 766 u32 afi_clocks; 767 u16 c_loop; 768 u8 inner; 769 u8 outer; 770 771 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks); 772 773 /* Scale (rounding up) to get afi clocks. */ 774 afi_clocks = DIV_ROUND_UP(clocks, misccfg->afi_rate_ratio); 775 if (afi_clocks) /* Temporary underflow protection */ 776 afi_clocks--; 777 778 /* 779 * Note, we don't bother accounting for being off a little 780 * bit because of a few extra instructions in outer loops. 781 * Note, the loops have a test at the end, and do the test 782 * before the decrement, and so always perform the loop 783 * 1 time more than the counter value 784 */ 785 c_loop = afi_clocks >> 16; 786 outer = c_loop ? 0xff : (afi_clocks >> 8); 787 inner = outer ? 0xff : afi_clocks; 788 789 /* 790 * rom instructions are structured as follows: 791 * 792 * IDLE_LOOP2: jnz cntr0, TARGET_A 793 * IDLE_LOOP1: jnz cntr1, TARGET_B 794 * return 795 * 796 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and 797 * TARGET_B is set to IDLE_LOOP2 as well 798 * 799 * if we have no outer loop, though, then we can use IDLE_LOOP1 only, 800 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely 801 * 802 * a little confusing, but it helps save precious space in the inst_rom 803 * and sequencer rom and keeps the delays more accurate and reduces 804 * overhead 805 */ 806 if (afi_clocks < 0x100) { 807 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), 808 &sdr_rw_load_mgr_regs->load_cntr1); 809 810 writel(rwcfg->idle_loop1, 811 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 812 813 writel(rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS | 814 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 815 } else { 816 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), 817 &sdr_rw_load_mgr_regs->load_cntr0); 818 819 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer), 820 &sdr_rw_load_mgr_regs->load_cntr1); 821 822 writel(rwcfg->idle_loop2, 823 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 824 825 writel(rwcfg->idle_loop2, 826 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 827 828 do { 829 writel(rwcfg->idle_loop2, 830 SDR_PHYGRP_RWMGRGRP_ADDRESS | 831 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 832 } while (c_loop-- != 0); 833 } 834 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks); 835 } 836 837 /** 838 * rw_mgr_mem_init_load_regs() - Load instruction registers 839 * @cntr0: Counter 0 value 840 * @cntr1: Counter 1 value 841 * @cntr2: Counter 2 value 842 * @jump: Jump instruction value 843 * 844 * Load instruction registers. 845 */ 846 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump) 847 { 848 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 849 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 850 851 /* Load counters */ 852 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0), 853 &sdr_rw_load_mgr_regs->load_cntr0); 854 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1), 855 &sdr_rw_load_mgr_regs->load_cntr1); 856 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2), 857 &sdr_rw_load_mgr_regs->load_cntr2); 858 859 /* Load jump address */ 860 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 861 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1); 862 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2); 863 864 /* Execute count instruction */ 865 writel(jump, grpaddr); 866 } 867 868 /** 869 * rw_mgr_mem_load_user() - Load user calibration values 870 * @fin1: Final instruction 1 871 * @fin2: Final instruction 2 872 * @precharge: If 1, precharge the banks at the end 873 * 874 * Load user calibration values and optionally precharge the banks. 875 */ 876 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2, 877 const int precharge) 878 { 879 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 880 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 881 u32 r; 882 883 for (r = 0; r < rwcfg->mem_number_of_ranks; r++) { 884 /* set rank */ 885 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 886 887 /* precharge all banks ... */ 888 if (precharge) 889 writel(rwcfg->precharge_all, grpaddr); 890 891 /* 892 * USER Use Mirror-ed commands for odd ranks if address 893 * mirrorring is on 894 */ 895 if ((rwcfg->mem_address_mirroring >> r) & 0x1) { 896 set_jump_as_return(); 897 writel(rwcfg->mrs2_mirr, grpaddr); 898 delay_for_n_mem_clocks(4); 899 set_jump_as_return(); 900 writel(rwcfg->mrs3_mirr, grpaddr); 901 delay_for_n_mem_clocks(4); 902 set_jump_as_return(); 903 writel(rwcfg->mrs1_mirr, grpaddr); 904 delay_for_n_mem_clocks(4); 905 set_jump_as_return(); 906 writel(fin1, grpaddr); 907 } else { 908 set_jump_as_return(); 909 writel(rwcfg->mrs2, grpaddr); 910 delay_for_n_mem_clocks(4); 911 set_jump_as_return(); 912 writel(rwcfg->mrs3, grpaddr); 913 delay_for_n_mem_clocks(4); 914 set_jump_as_return(); 915 writel(rwcfg->mrs1, grpaddr); 916 set_jump_as_return(); 917 writel(fin2, grpaddr); 918 } 919 920 if (precharge) 921 continue; 922 923 set_jump_as_return(); 924 writel(rwcfg->zqcl, grpaddr); 925 926 /* tZQinit = tDLLK = 512 ck cycles */ 927 delay_for_n_mem_clocks(512); 928 } 929 } 930 931 /** 932 * rw_mgr_mem_initialize() - Initialize RW Manager 933 * 934 * Initialize RW Manager. 935 */ 936 static void rw_mgr_mem_initialize(void) 937 { 938 debug("%s:%d\n", __func__, __LINE__); 939 940 /* The reset / cke part of initialization is broadcasted to all ranks */ 941 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS | 942 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); 943 944 /* 945 * Here's how you load register for a loop 946 * Counters are located @ 0x800 947 * Jump address are located @ 0xC00 948 * For both, registers 0 to 3 are selected using bits 3 and 2, like 949 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C 950 * I know this ain't pretty, but Avalon bus throws away the 2 least 951 * significant bits 952 */ 953 954 /* Start with memory RESET activated */ 955 956 /* tINIT = 200us */ 957 958 /* 959 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles 960 * If a and b are the number of iteration in 2 nested loops 961 * it takes the following number of cycles to complete the operation: 962 * number_of_cycles = ((2 + n) * a + 2) * b 963 * where n is the number of instruction in the inner loop 964 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF, 965 * b = 6A 966 */ 967 rw_mgr_mem_init_load_regs(misccfg->tinit_cntr0_val, 968 misccfg->tinit_cntr1_val, 969 misccfg->tinit_cntr2_val, 970 rwcfg->init_reset_0_cke_0); 971 972 /* Indicate that memory is stable. */ 973 writel(1, &phy_mgr_cfg->reset_mem_stbl); 974 975 /* 976 * transition the RESET to high 977 * Wait for 500us 978 */ 979 980 /* 981 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles 982 * If a and b are the number of iteration in 2 nested loops 983 * it takes the following number of cycles to complete the operation 984 * number_of_cycles = ((2 + n) * a + 2) * b 985 * where n is the number of instruction in the inner loop 986 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83, 987 * b = FF 988 */ 989 rw_mgr_mem_init_load_regs(misccfg->treset_cntr0_val, 990 misccfg->treset_cntr1_val, 991 misccfg->treset_cntr2_val, 992 rwcfg->init_reset_1_cke_0); 993 994 /* Bring up clock enable. */ 995 996 /* tXRP < 250 ck cycles */ 997 delay_for_n_mem_clocks(250); 998 999 rw_mgr_mem_load_user(rwcfg->mrs0_dll_reset_mirr, rwcfg->mrs0_dll_reset, 1000 0); 1001 } 1002 1003 /** 1004 * rw_mgr_mem_handoff() - Hand off the memory to user 1005 * 1006 * At the end of calibration we have to program the user settings in 1007 * and hand off the memory to the user. 1008 */ 1009 static void rw_mgr_mem_handoff(void) 1010 { 1011 rw_mgr_mem_load_user(rwcfg->mrs0_user_mirr, rwcfg->mrs0_user, 1); 1012 /* 1013 * Need to wait tMOD (12CK or 15ns) time before issuing other 1014 * commands, but we will have plenty of NIOS cycles before actual 1015 * handoff so its okay. 1016 */ 1017 } 1018 1019 /** 1020 * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command 1021 * @group: Write Group 1022 * @use_dm: Use DM 1023 * 1024 * Issue write test command. Two variants are provided, one that just tests 1025 * a write pattern and another that tests datamask functionality. 1026 */ 1027 static void rw_mgr_mem_calibrate_write_test_issue(u32 group, 1028 u32 test_dm) 1029 { 1030 const u32 quick_write_mode = 1031 (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) && 1032 misccfg->enable_super_quick_calibration; 1033 u32 mcc_instruction; 1034 u32 rw_wl_nop_cycles; 1035 1036 /* 1037 * Set counter and jump addresses for the right 1038 * number of NOP cycles. 1039 * The number of supported NOP cycles can range from -1 to infinity 1040 * Three different cases are handled: 1041 * 1042 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping 1043 * mechanism will be used to insert the right number of NOPs 1044 * 1045 * 2. For a number of NOP cycles equals to 0, the micro-instruction 1046 * issuing the write command will jump straight to the 1047 * micro-instruction that turns on DQS (for DDRx), or outputs write 1048 * data (for RLD), skipping 1049 * the NOP micro-instruction all together 1050 * 1051 * 3. A number of NOP cycles equal to -1 indicates that DQS must be 1052 * turned on in the same micro-instruction that issues the write 1053 * command. Then we need 1054 * to directly jump to the micro-instruction that sends out the data 1055 * 1056 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters 1057 * (2 and 3). One jump-counter (0) is used to perform multiple 1058 * write-read operations. 1059 * one counter left to issue this command in "multiple-group" mode 1060 */ 1061 1062 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles; 1063 1064 if (rw_wl_nop_cycles == -1) { 1065 /* 1066 * CNTR 2 - We want to execute the special write operation that 1067 * turns on DQS right away and then skip directly to the 1068 * instruction that sends out the data. We set the counter to a 1069 * large number so that the jump is always taken. 1070 */ 1071 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); 1072 1073 /* CNTR 3 - Not used */ 1074 if (test_dm) { 1075 mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0_wl_1; 1076 writel(rwcfg->lfsr_wr_rd_dm_bank_0_data, 1077 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1078 writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop, 1079 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1080 } else { 1081 mcc_instruction = rwcfg->lfsr_wr_rd_bank_0_wl_1; 1082 writel(rwcfg->lfsr_wr_rd_bank_0_data, 1083 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1084 writel(rwcfg->lfsr_wr_rd_bank_0_nop, 1085 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1086 } 1087 } else if (rw_wl_nop_cycles == 0) { 1088 /* 1089 * CNTR 2 - We want to skip the NOP operation and go straight 1090 * to the DQS enable instruction. We set the counter to a large 1091 * number so that the jump is always taken. 1092 */ 1093 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); 1094 1095 /* CNTR 3 - Not used */ 1096 if (test_dm) { 1097 mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0; 1098 writel(rwcfg->lfsr_wr_rd_dm_bank_0_dqs, 1099 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1100 } else { 1101 mcc_instruction = rwcfg->lfsr_wr_rd_bank_0; 1102 writel(rwcfg->lfsr_wr_rd_bank_0_dqs, 1103 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1104 } 1105 } else { 1106 /* 1107 * CNTR 2 - In this case we want to execute the next instruction 1108 * and NOT take the jump. So we set the counter to 0. The jump 1109 * address doesn't count. 1110 */ 1111 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2); 1112 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1113 1114 /* 1115 * CNTR 3 - Set the nop counter to the number of cycles we 1116 * need to loop for, minus 1. 1117 */ 1118 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3); 1119 if (test_dm) { 1120 mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0; 1121 writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop, 1122 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1123 } else { 1124 mcc_instruction = rwcfg->lfsr_wr_rd_bank_0; 1125 writel(rwcfg->lfsr_wr_rd_bank_0_nop, 1126 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1127 } 1128 } 1129 1130 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1131 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1132 1133 if (quick_write_mode) 1134 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0); 1135 else 1136 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0); 1137 1138 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1139 1140 /* 1141 * CNTR 1 - This is used to ensure enough time elapses 1142 * for read data to come back. 1143 */ 1144 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1); 1145 1146 if (test_dm) { 1147 writel(rwcfg->lfsr_wr_rd_dm_bank_0_wait, 1148 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1149 } else { 1150 writel(rwcfg->lfsr_wr_rd_bank_0_wait, 1151 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1152 } 1153 1154 writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS | 1155 RW_MGR_RUN_SINGLE_GROUP_OFFSET) + 1156 (group << 2)); 1157 } 1158 1159 /** 1160 * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple pass 1161 * @rank_bgn: Rank number 1162 * @write_group: Write Group 1163 * @use_dm: Use DM 1164 * @all_correct: All bits must be correct in the mask 1165 * @bit_chk: Resulting bit mask after the test 1166 * @all_ranks: Test all ranks 1167 * 1168 * Test writes, can check for a single bit pass or multiple bit pass. 1169 */ 1170 static int 1171 rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group, 1172 const u32 use_dm, const u32 all_correct, 1173 u32 *bit_chk, const u32 all_ranks) 1174 { 1175 const u32 rank_end = all_ranks ? 1176 rwcfg->mem_number_of_ranks : 1177 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1178 const u32 shift_ratio = rwcfg->mem_dq_per_write_dqs / 1179 rwcfg->mem_virtual_groups_per_write_dqs; 1180 const u32 correct_mask_vg = param->write_correct_mask_vg; 1181 1182 u32 tmp_bit_chk, base_rw_mgr; 1183 int vg, r; 1184 1185 *bit_chk = param->write_correct_mask; 1186 1187 for (r = rank_bgn; r < rank_end; r++) { 1188 /* Set rank */ 1189 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1190 1191 tmp_bit_chk = 0; 1192 for (vg = rwcfg->mem_virtual_groups_per_write_dqs - 1; 1193 vg >= 0; vg--) { 1194 /* Reset the FIFOs to get pointers to known state. */ 1195 writel(0, &phy_mgr_cmd->fifo_reset); 1196 1197 rw_mgr_mem_calibrate_write_test_issue( 1198 write_group * 1199 rwcfg->mem_virtual_groups_per_write_dqs + vg, 1200 use_dm); 1201 1202 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1203 tmp_bit_chk <<= shift_ratio; 1204 tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr)); 1205 } 1206 1207 *bit_chk &= tmp_bit_chk; 1208 } 1209 1210 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1211 if (all_correct) { 1212 debug_cond(DLEVEL >= 2, 1213 "write_test(%u,%u,ALL) : %u == %u => %i\n", 1214 write_group, use_dm, *bit_chk, 1215 param->write_correct_mask, 1216 *bit_chk == param->write_correct_mask); 1217 return *bit_chk == param->write_correct_mask; 1218 } else { 1219 debug_cond(DLEVEL >= 2, 1220 "write_test(%u,%u,ONE) : %u != %i => %i\n", 1221 write_group, use_dm, *bit_chk, 0, *bit_chk != 0); 1222 return *bit_chk != 0x00; 1223 } 1224 } 1225 1226 /** 1227 * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns 1228 * @rank_bgn: Rank number 1229 * @group: Read/Write Group 1230 * @all_ranks: Test all ranks 1231 * 1232 * Performs a guaranteed read on the patterns we are going to use during a 1233 * read test to ensure memory works. 1234 */ 1235 static int 1236 rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group, 1237 const u32 all_ranks) 1238 { 1239 const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 1240 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1241 const u32 addr_offset = 1242 (group * rwcfg->mem_virtual_groups_per_read_dqs) << 2; 1243 const u32 rank_end = all_ranks ? 1244 rwcfg->mem_number_of_ranks : 1245 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1246 const u32 shift_ratio = rwcfg->mem_dq_per_read_dqs / 1247 rwcfg->mem_virtual_groups_per_read_dqs; 1248 const u32 correct_mask_vg = param->read_correct_mask_vg; 1249 1250 u32 tmp_bit_chk, base_rw_mgr, bit_chk; 1251 int vg, r; 1252 int ret = 0; 1253 1254 bit_chk = param->read_correct_mask; 1255 1256 for (r = rank_bgn; r < rank_end; r++) { 1257 /* Set rank */ 1258 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1259 1260 /* Load up a constant bursts of read commands */ 1261 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); 1262 writel(rwcfg->guaranteed_read, 1263 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1264 1265 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); 1266 writel(rwcfg->guaranteed_read_cont, 1267 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1268 1269 tmp_bit_chk = 0; 1270 for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1; 1271 vg >= 0; vg--) { 1272 /* Reset the FIFOs to get pointers to known state. */ 1273 writel(0, &phy_mgr_cmd->fifo_reset); 1274 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1275 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1276 writel(rwcfg->guaranteed_read, 1277 addr + addr_offset + (vg << 2)); 1278 1279 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1280 tmp_bit_chk <<= shift_ratio; 1281 tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr; 1282 } 1283 1284 bit_chk &= tmp_bit_chk; 1285 } 1286 1287 writel(rwcfg->clear_dqs_enable, addr + (group << 2)); 1288 1289 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1290 1291 if (bit_chk != param->read_correct_mask) 1292 ret = -EIO; 1293 1294 debug_cond(DLEVEL >= 1, 1295 "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n", 1296 __func__, __LINE__, group, bit_chk, 1297 param->read_correct_mask, ret); 1298 1299 return ret; 1300 } 1301 1302 /** 1303 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test 1304 * @rank_bgn: Rank number 1305 * @all_ranks: Test all ranks 1306 * 1307 * Load up the patterns we are going to use during a read test. 1308 */ 1309 static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn, 1310 const int all_ranks) 1311 { 1312 const u32 rank_end = all_ranks ? 1313 rwcfg->mem_number_of_ranks : 1314 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1315 u32 r; 1316 1317 debug("%s:%d\n", __func__, __LINE__); 1318 1319 for (r = rank_bgn; r < rank_end; r++) { 1320 /* set rank */ 1321 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1322 1323 /* Load up a constant bursts */ 1324 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); 1325 1326 writel(rwcfg->guaranteed_write_wait0, 1327 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1328 1329 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); 1330 1331 writel(rwcfg->guaranteed_write_wait1, 1332 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1333 1334 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2); 1335 1336 writel(rwcfg->guaranteed_write_wait2, 1337 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1338 1339 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3); 1340 1341 writel(rwcfg->guaranteed_write_wait3, 1342 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1343 1344 writel(rwcfg->guaranteed_write, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1345 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 1346 } 1347 1348 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1349 } 1350 1351 /** 1352 * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank 1353 * @rank_bgn: Rank number 1354 * @group: Read/Write group 1355 * @num_tries: Number of retries of the test 1356 * @all_correct: All bits must be correct in the mask 1357 * @bit_chk: Resulting bit mask after the test 1358 * @all_groups: Test all R/W groups 1359 * @all_ranks: Test all ranks 1360 * 1361 * Try a read and see if it returns correct data back. Test has dummy reads 1362 * inserted into the mix used to align DQS enable. Test has more thorough 1363 * checks than the regular read test. 1364 */ 1365 static int 1366 rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group, 1367 const u32 num_tries, const u32 all_correct, 1368 u32 *bit_chk, 1369 const u32 all_groups, const u32 all_ranks) 1370 { 1371 const u32 rank_end = all_ranks ? rwcfg->mem_number_of_ranks : 1372 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1373 const u32 quick_read_mode = 1374 ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) && 1375 misccfg->enable_super_quick_calibration); 1376 u32 correct_mask_vg = param->read_correct_mask_vg; 1377 u32 tmp_bit_chk; 1378 u32 base_rw_mgr; 1379 u32 addr; 1380 1381 int r, vg, ret; 1382 1383 *bit_chk = param->read_correct_mask; 1384 1385 for (r = rank_bgn; r < rank_end; r++) { 1386 /* set rank */ 1387 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1388 1389 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1); 1390 1391 writel(rwcfg->read_b2b_wait1, 1392 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1393 1394 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2); 1395 writel(rwcfg->read_b2b_wait2, 1396 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1397 1398 if (quick_read_mode) 1399 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0); 1400 /* need at least two (1+1) reads to capture failures */ 1401 else if (all_groups) 1402 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0); 1403 else 1404 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0); 1405 1406 writel(rwcfg->read_b2b, 1407 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1408 if (all_groups) 1409 writel(rwcfg->mem_if_read_dqs_width * 1410 rwcfg->mem_virtual_groups_per_read_dqs - 1, 1411 &sdr_rw_load_mgr_regs->load_cntr3); 1412 else 1413 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3); 1414 1415 writel(rwcfg->read_b2b, 1416 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1417 1418 tmp_bit_chk = 0; 1419 for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1; vg >= 0; 1420 vg--) { 1421 /* Reset the FIFOs to get pointers to known state. */ 1422 writel(0, &phy_mgr_cmd->fifo_reset); 1423 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1424 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1425 1426 if (all_groups) { 1427 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 1428 RW_MGR_RUN_ALL_GROUPS_OFFSET; 1429 } else { 1430 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 1431 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1432 } 1433 1434 writel(rwcfg->read_b2b, addr + 1435 ((group * 1436 rwcfg->mem_virtual_groups_per_read_dqs + 1437 vg) << 2)); 1438 1439 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1440 tmp_bit_chk <<= rwcfg->mem_dq_per_read_dqs / 1441 rwcfg->mem_virtual_groups_per_read_dqs; 1442 tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr); 1443 } 1444 1445 *bit_chk &= tmp_bit_chk; 1446 } 1447 1448 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1449 writel(rwcfg->clear_dqs_enable, addr + (group << 2)); 1450 1451 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1452 1453 if (all_correct) { 1454 ret = (*bit_chk == param->read_correct_mask); 1455 debug_cond(DLEVEL >= 2, 1456 "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n", 1457 __func__, __LINE__, group, all_groups, *bit_chk, 1458 param->read_correct_mask, ret); 1459 } else { 1460 ret = (*bit_chk != 0x00); 1461 debug_cond(DLEVEL >= 2, 1462 "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n", 1463 __func__, __LINE__, group, all_groups, *bit_chk, 1464 0, ret); 1465 } 1466 1467 return ret; 1468 } 1469 1470 /** 1471 * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks 1472 * @grp: Read/Write group 1473 * @num_tries: Number of retries of the test 1474 * @all_correct: All bits must be correct in the mask 1475 * @all_groups: Test all R/W groups 1476 * 1477 * Perform a READ test across all memory ranks. 1478 */ 1479 static int 1480 rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries, 1481 const u32 all_correct, 1482 const u32 all_groups) 1483 { 1484 u32 bit_chk; 1485 return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct, 1486 &bit_chk, all_groups, 1); 1487 } 1488 1489 /** 1490 * rw_mgr_incr_vfifo() - Increase VFIFO value 1491 * @grp: Read/Write group 1492 * 1493 * Increase VFIFO value. 1494 */ 1495 static void rw_mgr_incr_vfifo(const u32 grp) 1496 { 1497 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy); 1498 } 1499 1500 /** 1501 * rw_mgr_decr_vfifo() - Decrease VFIFO value 1502 * @grp: Read/Write group 1503 * 1504 * Decrease VFIFO value. 1505 */ 1506 static void rw_mgr_decr_vfifo(const u32 grp) 1507 { 1508 u32 i; 1509 1510 for (i = 0; i < misccfg->read_valid_fifo_size - 1; i++) 1511 rw_mgr_incr_vfifo(grp); 1512 } 1513 1514 /** 1515 * find_vfifo_failing_read() - Push VFIFO to get a failing read 1516 * @grp: Read/Write group 1517 * 1518 * Push VFIFO until a failing read happens. 1519 */ 1520 static int find_vfifo_failing_read(const u32 grp) 1521 { 1522 u32 v, ret, fail_cnt = 0; 1523 1524 for (v = 0; v < misccfg->read_valid_fifo_size; v++) { 1525 debug_cond(DLEVEL >= 2, "%s:%d: vfifo %u\n", 1526 __func__, __LINE__, v); 1527 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1528 PASS_ONE_BIT, 0); 1529 if (!ret) { 1530 fail_cnt++; 1531 1532 if (fail_cnt == 2) 1533 return v; 1534 } 1535 1536 /* Fiddle with FIFO. */ 1537 rw_mgr_incr_vfifo(grp); 1538 } 1539 1540 /* No failing read found! Something must have gone wrong. */ 1541 debug_cond(DLEVEL >= 2, "%s:%d: vfifo failed\n", __func__, __LINE__); 1542 return 0; 1543 } 1544 1545 /** 1546 * sdr_find_phase_delay() - Find DQS enable phase or delay 1547 * @working: If 1, look for working phase/delay, if 0, look for non-working 1548 * @delay: If 1, look for delay, if 0, look for phase 1549 * @grp: Read/Write group 1550 * @work: Working window position 1551 * @work_inc: Working window increment 1552 * @pd: DQS Phase/Delay Iterator 1553 * 1554 * Find working or non-working DQS enable phase setting. 1555 */ 1556 static int sdr_find_phase_delay(int working, int delay, const u32 grp, 1557 u32 *work, const u32 work_inc, u32 *pd) 1558 { 1559 const u32 max = delay ? iocfg->dqs_en_delay_max : 1560 iocfg->dqs_en_phase_max; 1561 u32 ret; 1562 1563 for (; *pd <= max; (*pd)++) { 1564 if (delay) 1565 scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd); 1566 else 1567 scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd); 1568 1569 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1570 PASS_ONE_BIT, 0); 1571 if (!working) 1572 ret = !ret; 1573 1574 if (ret) 1575 return 0; 1576 1577 if (work) 1578 *work += work_inc; 1579 } 1580 1581 return -EINVAL; 1582 } 1583 /** 1584 * sdr_find_phase() - Find DQS enable phase 1585 * @working: If 1, look for working phase, if 0, look for non-working phase 1586 * @grp: Read/Write group 1587 * @work: Working window position 1588 * @i: Iterator 1589 * @p: DQS Phase Iterator 1590 * 1591 * Find working or non-working DQS enable phase setting. 1592 */ 1593 static int sdr_find_phase(int working, const u32 grp, u32 *work, 1594 u32 *i, u32 *p) 1595 { 1596 const u32 end = misccfg->read_valid_fifo_size + (working ? 0 : 1); 1597 int ret; 1598 1599 for (; *i < end; (*i)++) { 1600 if (working) 1601 *p = 0; 1602 1603 ret = sdr_find_phase_delay(working, 0, grp, work, 1604 iocfg->delay_per_opa_tap, p); 1605 if (!ret) 1606 return 0; 1607 1608 if (*p > iocfg->dqs_en_phase_max) { 1609 /* Fiddle with FIFO. */ 1610 rw_mgr_incr_vfifo(grp); 1611 if (!working) 1612 *p = 0; 1613 } 1614 } 1615 1616 return -EINVAL; 1617 } 1618 1619 /** 1620 * sdr_working_phase() - Find working DQS enable phase 1621 * @grp: Read/Write group 1622 * @work_bgn: Working window start position 1623 * @d: dtaps output value 1624 * @p: DQS Phase Iterator 1625 * @i: Iterator 1626 * 1627 * Find working DQS enable phase setting. 1628 */ 1629 static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d, 1630 u32 *p, u32 *i) 1631 { 1632 const u32 dtaps_per_ptap = iocfg->delay_per_opa_tap / 1633 iocfg->delay_per_dqs_en_dchain_tap; 1634 int ret; 1635 1636 *work_bgn = 0; 1637 1638 for (*d = 0; *d <= dtaps_per_ptap; (*d)++) { 1639 *i = 0; 1640 scc_mgr_set_dqs_en_delay_all_ranks(grp, *d); 1641 ret = sdr_find_phase(1, grp, work_bgn, i, p); 1642 if (!ret) 1643 return 0; 1644 *work_bgn += iocfg->delay_per_dqs_en_dchain_tap; 1645 } 1646 1647 /* Cannot find working solution */ 1648 debug_cond(DLEVEL >= 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n", 1649 __func__, __LINE__); 1650 return -EINVAL; 1651 } 1652 1653 /** 1654 * sdr_backup_phase() - Find DQS enable backup phase 1655 * @grp: Read/Write group 1656 * @work_bgn: Working window start position 1657 * @p: DQS Phase Iterator 1658 * 1659 * Find DQS enable backup phase setting. 1660 */ 1661 static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p) 1662 { 1663 u32 tmp_delay, d; 1664 int ret; 1665 1666 /* Special case code for backing up a phase */ 1667 if (*p == 0) { 1668 *p = iocfg->dqs_en_phase_max; 1669 rw_mgr_decr_vfifo(grp); 1670 } else { 1671 (*p)--; 1672 } 1673 tmp_delay = *work_bgn - iocfg->delay_per_opa_tap; 1674 scc_mgr_set_dqs_en_phase_all_ranks(grp, *p); 1675 1676 for (d = 0; d <= iocfg->dqs_en_delay_max && tmp_delay < *work_bgn; 1677 d++) { 1678 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1679 1680 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1681 PASS_ONE_BIT, 0); 1682 if (ret) { 1683 *work_bgn = tmp_delay; 1684 break; 1685 } 1686 1687 tmp_delay += iocfg->delay_per_dqs_en_dchain_tap; 1688 } 1689 1690 /* Restore VFIFO to old state before we decremented it (if needed). */ 1691 (*p)++; 1692 if (*p > iocfg->dqs_en_phase_max) { 1693 *p = 0; 1694 rw_mgr_incr_vfifo(grp); 1695 } 1696 1697 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); 1698 } 1699 1700 /** 1701 * sdr_nonworking_phase() - Find non-working DQS enable phase 1702 * @grp: Read/Write group 1703 * @work_end: Working window end position 1704 * @p: DQS Phase Iterator 1705 * @i: Iterator 1706 * 1707 * Find non-working DQS enable phase setting. 1708 */ 1709 static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i) 1710 { 1711 int ret; 1712 1713 (*p)++; 1714 *work_end += iocfg->delay_per_opa_tap; 1715 if (*p > iocfg->dqs_en_phase_max) { 1716 /* Fiddle with FIFO. */ 1717 *p = 0; 1718 rw_mgr_incr_vfifo(grp); 1719 } 1720 1721 ret = sdr_find_phase(0, grp, work_end, i, p); 1722 if (ret) { 1723 /* Cannot see edge of failing read. */ 1724 debug_cond(DLEVEL >= 2, "%s:%d: end: failed\n", 1725 __func__, __LINE__); 1726 } 1727 1728 return ret; 1729 } 1730 1731 /** 1732 * sdr_find_window_center() - Find center of the working DQS window. 1733 * @grp: Read/Write group 1734 * @work_bgn: First working settings 1735 * @work_end: Last working settings 1736 * 1737 * Find center of the working DQS enable window. 1738 */ 1739 static int sdr_find_window_center(const u32 grp, const u32 work_bgn, 1740 const u32 work_end) 1741 { 1742 u32 work_mid; 1743 int tmp_delay = 0; 1744 int i, p, d; 1745 1746 work_mid = (work_bgn + work_end) / 2; 1747 1748 debug_cond(DLEVEL >= 2, "work_bgn=%d work_end=%d work_mid=%d\n", 1749 work_bgn, work_end, work_mid); 1750 /* Get the middle delay to be less than a VFIFO delay */ 1751 tmp_delay = (iocfg->dqs_en_phase_max + 1) * iocfg->delay_per_opa_tap; 1752 1753 debug_cond(DLEVEL >= 2, "vfifo ptap delay %d\n", tmp_delay); 1754 work_mid %= tmp_delay; 1755 debug_cond(DLEVEL >= 2, "new work_mid %d\n", work_mid); 1756 1757 tmp_delay = rounddown(work_mid, iocfg->delay_per_opa_tap); 1758 if (tmp_delay > iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap) 1759 tmp_delay = iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap; 1760 p = tmp_delay / iocfg->delay_per_opa_tap; 1761 1762 debug_cond(DLEVEL >= 2, "new p %d, tmp_delay=%d\n", p, tmp_delay); 1763 1764 d = DIV_ROUND_UP(work_mid - tmp_delay, 1765 iocfg->delay_per_dqs_en_dchain_tap); 1766 if (d > iocfg->dqs_en_delay_max) 1767 d = iocfg->dqs_en_delay_max; 1768 tmp_delay += d * iocfg->delay_per_dqs_en_dchain_tap; 1769 1770 debug_cond(DLEVEL >= 2, "new d %d, tmp_delay=%d\n", d, tmp_delay); 1771 1772 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1773 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1774 1775 /* 1776 * push vfifo until we can successfully calibrate. We can do this 1777 * because the largest possible margin in 1 VFIFO cycle. 1778 */ 1779 for (i = 0; i < misccfg->read_valid_fifo_size; i++) { 1780 debug_cond(DLEVEL >= 2, "find_dqs_en_phase: center\n"); 1781 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1782 PASS_ONE_BIT, 1783 0)) { 1784 debug_cond(DLEVEL >= 2, 1785 "%s:%d center: found: ptap=%u dtap=%u\n", 1786 __func__, __LINE__, p, d); 1787 return 0; 1788 } 1789 1790 /* Fiddle with FIFO. */ 1791 rw_mgr_incr_vfifo(grp); 1792 } 1793 1794 debug_cond(DLEVEL >= 2, "%s:%d center: failed.\n", 1795 __func__, __LINE__); 1796 return -EINVAL; 1797 } 1798 1799 /** 1800 * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use 1801 * @grp: Read/Write Group 1802 * 1803 * Find a good DQS enable to use. 1804 */ 1805 static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) 1806 { 1807 u32 d, p, i; 1808 u32 dtaps_per_ptap; 1809 u32 work_bgn, work_end; 1810 u32 found_passing_read, found_failing_read = 0, initial_failing_dtap; 1811 int ret; 1812 1813 debug("%s:%d %u\n", __func__, __LINE__, grp); 1814 1815 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); 1816 1817 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); 1818 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); 1819 1820 /* Step 0: Determine number of delay taps for each phase tap. */ 1821 dtaps_per_ptap = iocfg->delay_per_opa_tap / 1822 iocfg->delay_per_dqs_en_dchain_tap; 1823 1824 /* Step 1: First push vfifo until we get a failing read. */ 1825 find_vfifo_failing_read(grp); 1826 1827 /* Step 2: Find first working phase, increment in ptaps. */ 1828 work_bgn = 0; 1829 ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i); 1830 if (ret) 1831 return ret; 1832 1833 work_end = work_bgn; 1834 1835 /* 1836 * If d is 0 then the working window covers a phase tap and we can 1837 * follow the old procedure. Otherwise, we've found the beginning 1838 * and we need to increment the dtaps until we find the end. 1839 */ 1840 if (d == 0) { 1841 /* 1842 * Step 3a: If we have room, back off by one and 1843 * increment in dtaps. 1844 */ 1845 sdr_backup_phase(grp, &work_bgn, &p); 1846 1847 /* 1848 * Step 4a: go forward from working phase to non working 1849 * phase, increment in ptaps. 1850 */ 1851 ret = sdr_nonworking_phase(grp, &work_end, &p, &i); 1852 if (ret) 1853 return ret; 1854 1855 /* Step 5a: Back off one from last, increment in dtaps. */ 1856 1857 /* Special case code for backing up a phase */ 1858 if (p == 0) { 1859 p = iocfg->dqs_en_phase_max; 1860 rw_mgr_decr_vfifo(grp); 1861 } else { 1862 p = p - 1; 1863 } 1864 1865 work_end -= iocfg->delay_per_opa_tap; 1866 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1867 1868 d = 0; 1869 1870 debug_cond(DLEVEL >= 2, "%s:%d p: ptap=%u\n", 1871 __func__, __LINE__, p); 1872 } 1873 1874 /* The dtap increment to find the failing edge is done here. */ 1875 sdr_find_phase_delay(0, 1, grp, &work_end, 1876 iocfg->delay_per_dqs_en_dchain_tap, &d); 1877 1878 /* Go back to working dtap */ 1879 if (d != 0) 1880 work_end -= iocfg->delay_per_dqs_en_dchain_tap; 1881 1882 debug_cond(DLEVEL >= 2, 1883 "%s:%d p/d: ptap=%u dtap=%u end=%u\n", 1884 __func__, __LINE__, p, d - 1, work_end); 1885 1886 if (work_end < work_bgn) { 1887 /* nil range */ 1888 debug_cond(DLEVEL >= 2, "%s:%d end-2: failed\n", 1889 __func__, __LINE__); 1890 return -EINVAL; 1891 } 1892 1893 debug_cond(DLEVEL >= 2, "%s:%d found range [%u,%u]\n", 1894 __func__, __LINE__, work_bgn, work_end); 1895 1896 /* 1897 * We need to calculate the number of dtaps that equal a ptap. 1898 * To do that we'll back up a ptap and re-find the edge of the 1899 * window using dtaps 1900 */ 1901 debug_cond(DLEVEL >= 2, "%s:%d calculate dtaps_per_ptap for tracking\n", 1902 __func__, __LINE__); 1903 1904 /* Special case code for backing up a phase */ 1905 if (p == 0) { 1906 p = iocfg->dqs_en_phase_max; 1907 rw_mgr_decr_vfifo(grp); 1908 debug_cond(DLEVEL >= 2, "%s:%d backedup cycle/phase: p=%u\n", 1909 __func__, __LINE__, p); 1910 } else { 1911 p = p - 1; 1912 debug_cond(DLEVEL >= 2, "%s:%d backedup phase only: p=%u", 1913 __func__, __LINE__, p); 1914 } 1915 1916 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1917 1918 /* 1919 * Increase dtap until we first see a passing read (in case the 1920 * window is smaller than a ptap), and then a failing read to 1921 * mark the edge of the window again. 1922 */ 1923 1924 /* Find a passing read. */ 1925 debug_cond(DLEVEL >= 2, "%s:%d find passing read\n", 1926 __func__, __LINE__); 1927 1928 initial_failing_dtap = d; 1929 1930 found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d); 1931 if (found_passing_read) { 1932 /* Find a failing read. */ 1933 debug_cond(DLEVEL >= 2, "%s:%d find failing read\n", 1934 __func__, __LINE__); 1935 d++; 1936 found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0, 1937 &d); 1938 } else { 1939 debug_cond(DLEVEL >= 1, 1940 "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n", 1941 __func__, __LINE__); 1942 } 1943 1944 /* 1945 * The dynamically calculated dtaps_per_ptap is only valid if we 1946 * found a passing/failing read. If we didn't, it means d hit the max 1947 * (iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its 1948 * statically calculated value. 1949 */ 1950 if (found_passing_read && found_failing_read) 1951 dtaps_per_ptap = d - initial_failing_dtap; 1952 1953 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap); 1954 debug_cond(DLEVEL >= 2, "%s:%d dtaps_per_ptap=%u - %u = %u", 1955 __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap); 1956 1957 /* Step 6: Find the centre of the window. */ 1958 ret = sdr_find_window_center(grp, work_bgn, work_end); 1959 1960 return ret; 1961 } 1962 1963 /** 1964 * search_stop_check() - Check if the detected edge is valid 1965 * @write: Perform read (Stage 2) or write (Stage 3) calibration 1966 * @d: DQS delay 1967 * @rank_bgn: Rank number 1968 * @write_group: Write Group 1969 * @read_group: Read Group 1970 * @bit_chk: Resulting bit mask after the test 1971 * @sticky_bit_chk: Resulting sticky bit mask after the test 1972 * @use_read_test: Perform read test 1973 * 1974 * Test if the found edge is valid. 1975 */ 1976 static u32 search_stop_check(const int write, const int d, const int rank_bgn, 1977 const u32 write_group, const u32 read_group, 1978 u32 *bit_chk, u32 *sticky_bit_chk, 1979 const u32 use_read_test) 1980 { 1981 const u32 ratio = rwcfg->mem_if_read_dqs_width / 1982 rwcfg->mem_if_write_dqs_width; 1983 const u32 correct_mask = write ? param->write_correct_mask : 1984 param->read_correct_mask; 1985 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs : 1986 rwcfg->mem_dq_per_read_dqs; 1987 u32 ret; 1988 /* 1989 * Stop searching when the read test doesn't pass AND when 1990 * we've seen a passing read on every bit. 1991 */ 1992 if (write) { /* WRITE-ONLY */ 1993 ret = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1994 0, PASS_ONE_BIT, 1995 bit_chk, 0); 1996 } else if (use_read_test) { /* READ-ONLY */ 1997 ret = !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group, 1998 NUM_READ_PB_TESTS, 1999 PASS_ONE_BIT, bit_chk, 2000 0, 0); 2001 } else { /* READ-ONLY */ 2002 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, 2003 PASS_ONE_BIT, bit_chk, 0); 2004 *bit_chk = *bit_chk >> (per_dqs * 2005 (read_group - (write_group * ratio))); 2006 ret = (*bit_chk == 0); 2007 } 2008 *sticky_bit_chk = *sticky_bit_chk | *bit_chk; 2009 ret = ret && (*sticky_bit_chk == correct_mask); 2010 debug_cond(DLEVEL >= 2, 2011 "%s:%d center(left): dtap=%u => %u == %u && %u", 2012 __func__, __LINE__, d, 2013 *sticky_bit_chk, correct_mask, ret); 2014 return ret; 2015 } 2016 2017 /** 2018 * search_left_edge() - Find left edge of DQ/DQS working phase 2019 * @write: Perform read (Stage 2) or write (Stage 3) calibration 2020 * @rank_bgn: Rank number 2021 * @write_group: Write Group 2022 * @read_group: Read Group 2023 * @test_bgn: Rank number to begin the test 2024 * @sticky_bit_chk: Resulting sticky bit mask after the test 2025 * @left_edge: Left edge of the DQ/DQS phase 2026 * @right_edge: Right edge of the DQ/DQS phase 2027 * @use_read_test: Perform read test 2028 * 2029 * Find left edge of DQ/DQS working phase. 2030 */ 2031 static void search_left_edge(const int write, const int rank_bgn, 2032 const u32 write_group, const u32 read_group, const u32 test_bgn, 2033 u32 *sticky_bit_chk, 2034 int *left_edge, int *right_edge, const u32 use_read_test) 2035 { 2036 const u32 delay_max = write ? iocfg->io_out1_delay_max : 2037 iocfg->io_in_delay_max; 2038 const u32 dqs_max = write ? iocfg->io_out1_delay_max : 2039 iocfg->dqs_in_delay_max; 2040 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs : 2041 rwcfg->mem_dq_per_read_dqs; 2042 u32 stop, bit_chk; 2043 int i, d; 2044 2045 for (d = 0; d <= dqs_max; d++) { 2046 if (write) 2047 scc_mgr_apply_group_dq_out1_delay(d); 2048 else 2049 scc_mgr_apply_group_dq_in_delay(test_bgn, d); 2050 2051 writel(0, &sdr_scc_mgr->update); 2052 2053 stop = search_stop_check(write, d, rank_bgn, write_group, 2054 read_group, &bit_chk, sticky_bit_chk, 2055 use_read_test); 2056 if (stop == 1) 2057 break; 2058 2059 /* stop != 1 */ 2060 for (i = 0; i < per_dqs; i++) { 2061 if (bit_chk & 1) { 2062 /* 2063 * Remember a passing test as 2064 * the left_edge. 2065 */ 2066 left_edge[i] = d; 2067 } else { 2068 /* 2069 * If a left edge has not been seen 2070 * yet, then a future passing test 2071 * will mark this edge as the right 2072 * edge. 2073 */ 2074 if (left_edge[i] == delay_max + 1) 2075 right_edge[i] = -(d + 1); 2076 } 2077 bit_chk >>= 1; 2078 } 2079 } 2080 2081 /* Reset DQ delay chains to 0 */ 2082 if (write) 2083 scc_mgr_apply_group_dq_out1_delay(0); 2084 else 2085 scc_mgr_apply_group_dq_in_delay(test_bgn, 0); 2086 2087 *sticky_bit_chk = 0; 2088 for (i = per_dqs - 1; i >= 0; i--) { 2089 debug_cond(DLEVEL >= 2, 2090 "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n", 2091 __func__, __LINE__, i, left_edge[i], 2092 i, right_edge[i]); 2093 2094 /* 2095 * Check for cases where we haven't found the left edge, 2096 * which makes our assignment of the the right edge invalid. 2097 * Reset it to the illegal value. 2098 */ 2099 if ((left_edge[i] == delay_max + 1) && 2100 (right_edge[i] != delay_max + 1)) { 2101 right_edge[i] = delay_max + 1; 2102 debug_cond(DLEVEL >= 2, 2103 "%s:%d vfifo_center: reset right_edge[%u]: %d\n", 2104 __func__, __LINE__, i, right_edge[i]); 2105 } 2106 2107 /* 2108 * Reset sticky bit 2109 * READ: except for bits where we have seen both 2110 * the left and right edge. 2111 * WRITE: except for bits where we have seen the 2112 * left edge. 2113 */ 2114 *sticky_bit_chk <<= 1; 2115 if (write) { 2116 if (left_edge[i] != delay_max + 1) 2117 *sticky_bit_chk |= 1; 2118 } else { 2119 if ((left_edge[i] != delay_max + 1) && 2120 (right_edge[i] != delay_max + 1)) 2121 *sticky_bit_chk |= 1; 2122 } 2123 } 2124 } 2125 2126 /** 2127 * search_right_edge() - Find right edge of DQ/DQS working phase 2128 * @write: Perform read (Stage 2) or write (Stage 3) calibration 2129 * @rank_bgn: Rank number 2130 * @write_group: Write Group 2131 * @read_group: Read Group 2132 * @start_dqs: DQS start phase 2133 * @start_dqs_en: DQS enable start phase 2134 * @sticky_bit_chk: Resulting sticky bit mask after the test 2135 * @left_edge: Left edge of the DQ/DQS phase 2136 * @right_edge: Right edge of the DQ/DQS phase 2137 * @use_read_test: Perform read test 2138 * 2139 * Find right edge of DQ/DQS working phase. 2140 */ 2141 static int search_right_edge(const int write, const int rank_bgn, 2142 const u32 write_group, const u32 read_group, 2143 const int start_dqs, const int start_dqs_en, 2144 u32 *sticky_bit_chk, 2145 int *left_edge, int *right_edge, const u32 use_read_test) 2146 { 2147 const u32 delay_max = write ? iocfg->io_out1_delay_max : 2148 iocfg->io_in_delay_max; 2149 const u32 dqs_max = write ? iocfg->io_out1_delay_max : 2150 iocfg->dqs_in_delay_max; 2151 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs : 2152 rwcfg->mem_dq_per_read_dqs; 2153 u32 stop, bit_chk; 2154 int i, d; 2155 2156 for (d = 0; d <= dqs_max - start_dqs; d++) { 2157 if (write) { /* WRITE-ONLY */ 2158 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, 2159 d + start_dqs); 2160 } else { /* READ-ONLY */ 2161 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs); 2162 if (iocfg->shift_dqs_en_when_shift_dqs) { 2163 u32 delay = d + start_dqs_en; 2164 if (delay > iocfg->dqs_en_delay_max) 2165 delay = iocfg->dqs_en_delay_max; 2166 scc_mgr_set_dqs_en_delay(read_group, delay); 2167 } 2168 scc_mgr_load_dqs(read_group); 2169 } 2170 2171 writel(0, &sdr_scc_mgr->update); 2172 2173 stop = search_stop_check(write, d, rank_bgn, write_group, 2174 read_group, &bit_chk, sticky_bit_chk, 2175 use_read_test); 2176 if (stop == 1) { 2177 if (write && (d == 0)) { /* WRITE-ONLY */ 2178 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; 2179 i++) { 2180 /* 2181 * d = 0 failed, but it passed when 2182 * testing the left edge, so it must be 2183 * marginal, set it to -1 2184 */ 2185 if (right_edge[i] == delay_max + 1 && 2186 left_edge[i] != delay_max + 1) 2187 right_edge[i] = -1; 2188 } 2189 } 2190 break; 2191 } 2192 2193 /* stop != 1 */ 2194 for (i = 0; i < per_dqs; i++) { 2195 if (bit_chk & 1) { 2196 /* 2197 * Remember a passing test as 2198 * the right_edge. 2199 */ 2200 right_edge[i] = d; 2201 } else { 2202 if (d != 0) { 2203 /* 2204 * If a right edge has not 2205 * been seen yet, then a future 2206 * passing test will mark this 2207 * edge as the left edge. 2208 */ 2209 if (right_edge[i] == delay_max + 1) 2210 left_edge[i] = -(d + 1); 2211 } else { 2212 /* 2213 * d = 0 failed, but it passed 2214 * when testing the left edge, 2215 * so it must be marginal, set 2216 * it to -1 2217 */ 2218 if (right_edge[i] == delay_max + 1 && 2219 left_edge[i] != delay_max + 1) 2220 right_edge[i] = -1; 2221 /* 2222 * If a right edge has not been 2223 * seen yet, then a future 2224 * passing test will mark this 2225 * edge as the left edge. 2226 */ 2227 else if (right_edge[i] == delay_max + 1) 2228 left_edge[i] = -(d + 1); 2229 } 2230 } 2231 2232 debug_cond(DLEVEL >= 2, "%s:%d center[r,d=%u]: ", 2233 __func__, __LINE__, d); 2234 debug_cond(DLEVEL >= 2, 2235 "bit_chk_test=%i left_edge[%u]: %d ", 2236 bit_chk & 1, i, left_edge[i]); 2237 debug_cond(DLEVEL >= 2, "right_edge[%u]: %d\n", i, 2238 right_edge[i]); 2239 bit_chk >>= 1; 2240 } 2241 } 2242 2243 /* Check that all bits have a window */ 2244 for (i = 0; i < per_dqs; i++) { 2245 debug_cond(DLEVEL >= 2, 2246 "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d", 2247 __func__, __LINE__, i, left_edge[i], 2248 i, right_edge[i]); 2249 if ((left_edge[i] == dqs_max + 1) || 2250 (right_edge[i] == dqs_max + 1)) 2251 return i + 1; /* FIXME: If we fail, retval > 0 */ 2252 } 2253 2254 return 0; 2255 } 2256 2257 /** 2258 * get_window_mid_index() - Find the best middle setting of DQ/DQS phase 2259 * @write: Perform read (Stage 2) or write (Stage 3) calibration 2260 * @left_edge: Left edge of the DQ/DQS phase 2261 * @right_edge: Right edge of the DQ/DQS phase 2262 * @mid_min: Best DQ/DQS phase middle setting 2263 * 2264 * Find index and value of the middle of the DQ/DQS working phase. 2265 */ 2266 static int get_window_mid_index(const int write, int *left_edge, 2267 int *right_edge, int *mid_min) 2268 { 2269 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs : 2270 rwcfg->mem_dq_per_read_dqs; 2271 int i, mid, min_index; 2272 2273 /* Find middle of window for each DQ bit */ 2274 *mid_min = left_edge[0] - right_edge[0]; 2275 min_index = 0; 2276 for (i = 1; i < per_dqs; i++) { 2277 mid = left_edge[i] - right_edge[i]; 2278 if (mid < *mid_min) { 2279 *mid_min = mid; 2280 min_index = i; 2281 } 2282 } 2283 2284 /* 2285 * -mid_min/2 represents the amount that we need to move DQS. 2286 * If mid_min is odd and positive we'll need to add one to make 2287 * sure the rounding in further calculations is correct (always 2288 * bias to the right), so just add 1 for all positive values. 2289 */ 2290 if (*mid_min > 0) 2291 (*mid_min)++; 2292 *mid_min = *mid_min / 2; 2293 2294 debug_cond(DLEVEL >= 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n", 2295 __func__, __LINE__, *mid_min, min_index); 2296 return min_index; 2297 } 2298 2299 /** 2300 * center_dq_windows() - Center the DQ/DQS windows 2301 * @write: Perform read (Stage 2) or write (Stage 3) calibration 2302 * @left_edge: Left edge of the DQ/DQS phase 2303 * @right_edge: Right edge of the DQ/DQS phase 2304 * @mid_min: Adjusted DQ/DQS phase middle setting 2305 * @orig_mid_min: Original DQ/DQS phase middle setting 2306 * @min_index: DQ/DQS phase middle setting index 2307 * @test_bgn: Rank number to begin the test 2308 * @dq_margin: Amount of shift for the DQ 2309 * @dqs_margin: Amount of shift for the DQS 2310 * 2311 * Align the DQ/DQS windows in each group. 2312 */ 2313 static void center_dq_windows(const int write, int *left_edge, int *right_edge, 2314 const int mid_min, const int orig_mid_min, 2315 const int min_index, const int test_bgn, 2316 int *dq_margin, int *dqs_margin) 2317 { 2318 const s32 delay_max = write ? iocfg->io_out1_delay_max : 2319 iocfg->io_in_delay_max; 2320 const s32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs : 2321 rwcfg->mem_dq_per_read_dqs; 2322 const s32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET : 2323 SCC_MGR_IO_IN_DELAY_OFFSET; 2324 const s32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off; 2325 2326 s32 temp_dq_io_delay1; 2327 int shift_dq, i, p; 2328 2329 /* Initialize data for export structures */ 2330 *dqs_margin = delay_max + 1; 2331 *dq_margin = delay_max + 1; 2332 2333 /* add delay to bring centre of all DQ windows to the same "level" */ 2334 for (i = 0, p = test_bgn; i < per_dqs; i++, p++) { 2335 /* Use values before divide by 2 to reduce round off error */ 2336 shift_dq = (left_edge[i] - right_edge[i] - 2337 (left_edge[min_index] - right_edge[min_index]))/2 + 2338 (orig_mid_min - mid_min); 2339 2340 debug_cond(DLEVEL >= 2, 2341 "vfifo_center: before: shift_dq[%u]=%d\n", 2342 i, shift_dq); 2343 2344 temp_dq_io_delay1 = readl(addr + (i << 2)); 2345 2346 if (shift_dq + temp_dq_io_delay1 > delay_max) 2347 shift_dq = delay_max - temp_dq_io_delay1; 2348 else if (shift_dq + temp_dq_io_delay1 < 0) 2349 shift_dq = -temp_dq_io_delay1; 2350 2351 debug_cond(DLEVEL >= 2, 2352 "vfifo_center: after: shift_dq[%u]=%d\n", 2353 i, shift_dq); 2354 2355 if (write) 2356 scc_mgr_set_dq_out1_delay(i, 2357 temp_dq_io_delay1 + shift_dq); 2358 else 2359 scc_mgr_set_dq_in_delay(p, 2360 temp_dq_io_delay1 + shift_dq); 2361 2362 scc_mgr_load_dq(p); 2363 2364 debug_cond(DLEVEL >= 2, 2365 "vfifo_center: margin[%u]=[%d,%d]\n", i, 2366 left_edge[i] - shift_dq + (-mid_min), 2367 right_edge[i] + shift_dq - (-mid_min)); 2368 2369 /* To determine values for export structures */ 2370 if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin) 2371 *dq_margin = left_edge[i] - shift_dq + (-mid_min); 2372 2373 if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin) 2374 *dqs_margin = right_edge[i] + shift_dq - (-mid_min); 2375 } 2376 } 2377 2378 /** 2379 * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering 2380 * @rank_bgn: Rank number 2381 * @rw_group: Read/Write Group 2382 * @test_bgn: Rank at which the test begins 2383 * @use_read_test: Perform a read test 2384 * @update_fom: Update FOM 2385 * 2386 * Per-bit deskew DQ and centering. 2387 */ 2388 static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn, 2389 const u32 rw_group, const u32 test_bgn, 2390 const int use_read_test, const int update_fom) 2391 { 2392 const u32 addr = 2393 SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET + 2394 (rw_group << 2); 2395 /* 2396 * Store these as signed since there are comparisons with 2397 * signed numbers. 2398 */ 2399 u32 sticky_bit_chk; 2400 int32_t left_edge[rwcfg->mem_dq_per_read_dqs]; 2401 int32_t right_edge[rwcfg->mem_dq_per_read_dqs]; 2402 int32_t orig_mid_min, mid_min; 2403 int32_t new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en; 2404 int32_t dq_margin, dqs_margin; 2405 int i, min_index; 2406 int ret; 2407 2408 debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn); 2409 2410 start_dqs = readl(addr); 2411 if (iocfg->shift_dqs_en_when_shift_dqs) 2412 start_dqs_en = readl(addr - iocfg->dqs_en_delay_offset); 2413 2414 /* set the left and right edge of each bit to an illegal value */ 2415 /* use (iocfg->io_in_delay_max + 1) as an illegal value */ 2416 sticky_bit_chk = 0; 2417 for (i = 0; i < rwcfg->mem_dq_per_read_dqs; i++) { 2418 left_edge[i] = iocfg->io_in_delay_max + 1; 2419 right_edge[i] = iocfg->io_in_delay_max + 1; 2420 } 2421 2422 /* Search for the left edge of the window for each bit */ 2423 search_left_edge(0, rank_bgn, rw_group, rw_group, test_bgn, 2424 &sticky_bit_chk, 2425 left_edge, right_edge, use_read_test); 2426 2427 2428 /* Search for the right edge of the window for each bit */ 2429 ret = search_right_edge(0, rank_bgn, rw_group, rw_group, 2430 start_dqs, start_dqs_en, 2431 &sticky_bit_chk, 2432 left_edge, right_edge, use_read_test); 2433 if (ret) { 2434 /* 2435 * Restore delay chain settings before letting the loop 2436 * in rw_mgr_mem_calibrate_vfifo to retry different 2437 * dqs/ck relationships. 2438 */ 2439 scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs); 2440 if (iocfg->shift_dqs_en_when_shift_dqs) 2441 scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en); 2442 2443 scc_mgr_load_dqs(rw_group); 2444 writel(0, &sdr_scc_mgr->update); 2445 2446 debug_cond(DLEVEL >= 1, 2447 "%s:%d vfifo_center: failed to find edge [%u]: %d %d", 2448 __func__, __LINE__, i, left_edge[i], right_edge[i]); 2449 if (use_read_test) { 2450 set_failing_group_stage(rw_group * 2451 rwcfg->mem_dq_per_read_dqs + i, 2452 CAL_STAGE_VFIFO, 2453 CAL_SUBSTAGE_VFIFO_CENTER); 2454 } else { 2455 set_failing_group_stage(rw_group * 2456 rwcfg->mem_dq_per_read_dqs + i, 2457 CAL_STAGE_VFIFO_AFTER_WRITES, 2458 CAL_SUBSTAGE_VFIFO_CENTER); 2459 } 2460 return -EIO; 2461 } 2462 2463 min_index = get_window_mid_index(0, left_edge, right_edge, &mid_min); 2464 2465 /* Determine the amount we can change DQS (which is -mid_min) */ 2466 orig_mid_min = mid_min; 2467 new_dqs = start_dqs - mid_min; 2468 if (new_dqs > iocfg->dqs_in_delay_max) 2469 new_dqs = iocfg->dqs_in_delay_max; 2470 else if (new_dqs < 0) 2471 new_dqs = 0; 2472 2473 mid_min = start_dqs - new_dqs; 2474 debug_cond(DLEVEL >= 1, "vfifo_center: new mid_min=%d new_dqs=%d\n", 2475 mid_min, new_dqs); 2476 2477 if (iocfg->shift_dqs_en_when_shift_dqs) { 2478 if (start_dqs_en - mid_min > iocfg->dqs_en_delay_max) 2479 mid_min += start_dqs_en - mid_min - 2480 iocfg->dqs_en_delay_max; 2481 else if (start_dqs_en - mid_min < 0) 2482 mid_min += start_dqs_en - mid_min; 2483 } 2484 new_dqs = start_dqs - mid_min; 2485 2486 debug_cond(DLEVEL >= 1, 2487 "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n", 2488 start_dqs, 2489 iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1, 2490 new_dqs, mid_min); 2491 2492 /* Add delay to bring centre of all DQ windows to the same "level". */ 2493 center_dq_windows(0, left_edge, right_edge, mid_min, orig_mid_min, 2494 min_index, test_bgn, &dq_margin, &dqs_margin); 2495 2496 /* Move DQS-en */ 2497 if (iocfg->shift_dqs_en_when_shift_dqs) { 2498 final_dqs_en = start_dqs_en - mid_min; 2499 scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en); 2500 scc_mgr_load_dqs(rw_group); 2501 } 2502 2503 /* Move DQS */ 2504 scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs); 2505 scc_mgr_load_dqs(rw_group); 2506 debug_cond(DLEVEL >= 2, 2507 "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d", 2508 __func__, __LINE__, dq_margin, dqs_margin); 2509 2510 /* 2511 * Do not remove this line as it makes sure all of our decisions 2512 * have been applied. Apply the update bit. 2513 */ 2514 writel(0, &sdr_scc_mgr->update); 2515 2516 if ((dq_margin < 0) || (dqs_margin < 0)) 2517 return -EINVAL; 2518 2519 return 0; 2520 } 2521 2522 /** 2523 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device 2524 * @rw_group: Read/Write Group 2525 * @phase: DQ/DQS phase 2526 * 2527 * Because initially no communication ca be reliably performed with the memory 2528 * device, the sequencer uses a guaranteed write mechanism to write data into 2529 * the memory device. 2530 */ 2531 static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group, 2532 const u32 phase) 2533 { 2534 int ret; 2535 2536 /* Set a particular DQ/DQS phase. */ 2537 scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase); 2538 2539 debug_cond(DLEVEL >= 1, "%s:%d guaranteed write: g=%u p=%u\n", 2540 __func__, __LINE__, rw_group, phase); 2541 2542 /* 2543 * Altera EMI_RM 2015.05.04 :: Figure 1-25 2544 * Load up the patterns used by read calibration using the 2545 * current DQDQS phase. 2546 */ 2547 rw_mgr_mem_calibrate_read_load_patterns(0, 1); 2548 2549 if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ) 2550 return 0; 2551 2552 /* 2553 * Altera EMI_RM 2015.05.04 :: Figure 1-26 2554 * Back-to-Back reads of the patterns used for calibration. 2555 */ 2556 ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1); 2557 if (ret) 2558 debug_cond(DLEVEL >= 1, 2559 "%s:%d Guaranteed read test failed: g=%u p=%u\n", 2560 __func__, __LINE__, rw_group, phase); 2561 return ret; 2562 } 2563 2564 /** 2565 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration 2566 * @rw_group: Read/Write Group 2567 * @test_bgn: Rank at which the test begins 2568 * 2569 * DQS enable calibration ensures reliable capture of the DQ signal without 2570 * glitches on the DQS line. 2571 */ 2572 static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group, 2573 const u32 test_bgn) 2574 { 2575 /* 2576 * Altera EMI_RM 2015.05.04 :: Figure 1-27 2577 * DQS and DQS Eanble Signal Relationships. 2578 */ 2579 2580 /* We start at zero, so have one less dq to devide among */ 2581 const u32 delay_step = iocfg->io_in_delay_max / 2582 (rwcfg->mem_dq_per_read_dqs - 1); 2583 int ret; 2584 u32 i, p, d, r; 2585 2586 debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn); 2587 2588 /* Try different dq_in_delays since the DQ path is shorter than DQS. */ 2589 for (r = 0; r < rwcfg->mem_number_of_ranks; 2590 r += NUM_RANKS_PER_SHADOW_REG) { 2591 for (i = 0, p = test_bgn, d = 0; 2592 i < rwcfg->mem_dq_per_read_dqs; 2593 i++, p++, d += delay_step) { 2594 debug_cond(DLEVEL >= 1, 2595 "%s:%d: g=%u r=%u i=%u p=%u d=%u\n", 2596 __func__, __LINE__, rw_group, r, i, p, d); 2597 2598 scc_mgr_set_dq_in_delay(p, d); 2599 scc_mgr_load_dq(p); 2600 } 2601 2602 writel(0, &sdr_scc_mgr->update); 2603 } 2604 2605 /* 2606 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different 2607 * dq_in_delay values 2608 */ 2609 ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group); 2610 2611 debug_cond(DLEVEL >= 1, 2612 "%s:%d: g=%u found=%u; Reseting delay chain to zero\n", 2613 __func__, __LINE__, rw_group, !ret); 2614 2615 for (r = 0; r < rwcfg->mem_number_of_ranks; 2616 r += NUM_RANKS_PER_SHADOW_REG) { 2617 scc_mgr_apply_group_dq_in_delay(test_bgn, 0); 2618 writel(0, &sdr_scc_mgr->update); 2619 } 2620 2621 return ret; 2622 } 2623 2624 /** 2625 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS 2626 * @rw_group: Read/Write Group 2627 * @test_bgn: Rank at which the test begins 2628 * @use_read_test: Perform a read test 2629 * @update_fom: Update FOM 2630 * 2631 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads 2632 * within a group. 2633 */ 2634 static int 2635 rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn, 2636 const int use_read_test, 2637 const int update_fom) 2638 2639 { 2640 int ret, grp_calibrated; 2641 u32 rank_bgn, sr; 2642 2643 /* 2644 * Altera EMI_RM 2015.05.04 :: Figure 1-28 2645 * Read per-bit deskew can be done on a per shadow register basis. 2646 */ 2647 grp_calibrated = 1; 2648 for (rank_bgn = 0, sr = 0; 2649 rank_bgn < rwcfg->mem_number_of_ranks; 2650 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) { 2651 ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group, 2652 test_bgn, 2653 use_read_test, 2654 update_fom); 2655 if (!ret) 2656 continue; 2657 2658 grp_calibrated = 0; 2659 } 2660 2661 if (!grp_calibrated) 2662 return -EIO; 2663 2664 return 0; 2665 } 2666 2667 /** 2668 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO 2669 * @rw_group: Read/Write Group 2670 * @test_bgn: Rank at which the test begins 2671 * 2672 * Stage 1: Calibrate the read valid prediction FIFO. 2673 * 2674 * This function implements UniPHY calibration Stage 1, as explained in 2675 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". 2676 * 2677 * - read valid prediction will consist of finding: 2678 * - DQS enable phase and DQS enable delay (DQS Enable Calibration) 2679 * - DQS input phase and DQS input delay (DQ/DQS Centering) 2680 * - we also do a per-bit deskew on the DQ lines. 2681 */ 2682 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn) 2683 { 2684 u32 p, d; 2685 u32 dtaps_per_ptap; 2686 u32 failed_substage; 2687 2688 int ret; 2689 2690 debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn); 2691 2692 /* Update info for sims */ 2693 reg_file_set_group(rw_group); 2694 reg_file_set_stage(CAL_STAGE_VFIFO); 2695 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ); 2696 2697 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; 2698 2699 /* USER Determine number of delay taps for each phase tap. */ 2700 dtaps_per_ptap = DIV_ROUND_UP(iocfg->delay_per_opa_tap, 2701 iocfg->delay_per_dqs_en_dchain_tap) - 1; 2702 2703 for (d = 0; d <= dtaps_per_ptap; d += 2) { 2704 /* 2705 * In RLDRAMX we may be messing the delay of pins in 2706 * the same write rw_group but outside of the current read 2707 * the rw_group, but that's ok because we haven't calibrated 2708 * output side yet. 2709 */ 2710 if (d > 0) { 2711 scc_mgr_apply_group_all_out_delay_add_all_ranks( 2712 rw_group, d); 2713 } 2714 2715 for (p = 0; p <= iocfg->dqdqs_out_phase_max; p++) { 2716 /* 1) Guaranteed Write */ 2717 ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p); 2718 if (ret) 2719 break; 2720 2721 /* 2) DQS Enable Calibration */ 2722 ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group, 2723 test_bgn); 2724 if (ret) { 2725 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; 2726 continue; 2727 } 2728 2729 /* 3) Centering DQ/DQS */ 2730 /* 2731 * If doing read after write calibration, do not update 2732 * FOM now. Do it then. 2733 */ 2734 ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, 2735 test_bgn, 1, 0); 2736 if (ret) { 2737 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER; 2738 continue; 2739 } 2740 2741 /* All done. */ 2742 goto cal_done_ok; 2743 } 2744 } 2745 2746 /* Calibration Stage 1 failed. */ 2747 set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage); 2748 return 0; 2749 2750 /* Calibration Stage 1 completed OK. */ 2751 cal_done_ok: 2752 /* 2753 * Reset the delay chains back to zero if they have moved > 1 2754 * (check for > 1 because loop will increase d even when pass in 2755 * first case). 2756 */ 2757 if (d > 2) 2758 scc_mgr_zero_group(rw_group, 1); 2759 2760 return 1; 2761 } 2762 2763 /** 2764 * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering. 2765 * @rw_group: Read/Write Group 2766 * @test_bgn: Rank at which the test begins 2767 * 2768 * Stage 3: DQ/DQS Centering. 2769 * 2770 * This function implements UniPHY calibration Stage 3, as explained in 2771 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". 2772 */ 2773 static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group, 2774 const u32 test_bgn) 2775 { 2776 int ret; 2777 2778 debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn); 2779 2780 /* Update info for sims. */ 2781 reg_file_set_group(rw_group); 2782 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES); 2783 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); 2784 2785 ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, test_bgn, 0, 1); 2786 if (ret) 2787 set_failing_group_stage(rw_group, 2788 CAL_STAGE_VFIFO_AFTER_WRITES, 2789 CAL_SUBSTAGE_VFIFO_CENTER); 2790 return ret; 2791 } 2792 2793 /** 2794 * rw_mgr_mem_calibrate_lfifo() - Minimize latency 2795 * 2796 * Stage 4: Minimize latency. 2797 * 2798 * This function implements UniPHY calibration Stage 4, as explained in 2799 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". 2800 * Calibrate LFIFO to find smallest read latency. 2801 */ 2802 static u32 rw_mgr_mem_calibrate_lfifo(void) 2803 { 2804 int found_one = 0; 2805 2806 debug("%s:%d\n", __func__, __LINE__); 2807 2808 /* Update info for sims. */ 2809 reg_file_set_stage(CAL_STAGE_LFIFO); 2810 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY); 2811 2812 /* Load up the patterns used by read calibration for all ranks */ 2813 rw_mgr_mem_calibrate_read_load_patterns(0, 1); 2814 2815 do { 2816 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 2817 debug_cond(DLEVEL >= 2, "%s:%d lfifo: read_lat=%u", 2818 __func__, __LINE__, gbl->curr_read_lat); 2819 2820 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, NUM_READ_TESTS, 2821 PASS_ALL_BITS, 1)) 2822 break; 2823 2824 found_one = 1; 2825 /* 2826 * Reduce read latency and see if things are 2827 * working correctly. 2828 */ 2829 gbl->curr_read_lat--; 2830 } while (gbl->curr_read_lat > 0); 2831 2832 /* Reset the fifos to get pointers to known state. */ 2833 writel(0, &phy_mgr_cmd->fifo_reset); 2834 2835 if (found_one) { 2836 /* Add a fudge factor to the read latency that was determined */ 2837 gbl->curr_read_lat += 2; 2838 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 2839 debug_cond(DLEVEL >= 2, 2840 "%s:%d lfifo: success: using read_lat=%u\n", 2841 __func__, __LINE__, gbl->curr_read_lat); 2842 } else { 2843 set_failing_group_stage(0xff, CAL_STAGE_LFIFO, 2844 CAL_SUBSTAGE_READ_LATENCY); 2845 2846 debug_cond(DLEVEL >= 2, 2847 "%s:%d lfifo: failed at initial read_lat=%u\n", 2848 __func__, __LINE__, gbl->curr_read_lat); 2849 } 2850 2851 return found_one; 2852 } 2853 2854 /** 2855 * search_window() - Search for the/part of the window with DM/DQS shift 2856 * @search_dm: If 1, search for the DM shift, if 0, search for DQS shift 2857 * @rank_bgn: Rank number 2858 * @write_group: Write Group 2859 * @bgn_curr: Current window begin 2860 * @end_curr: Current window end 2861 * @bgn_best: Current best window begin 2862 * @end_best: Current best window end 2863 * @win_best: Size of the best window 2864 * @new_dqs: New DQS value (only applicable if search_dm = 0). 2865 * 2866 * Search for the/part of the window with DM/DQS shift. 2867 */ 2868 static void search_window(const int search_dm, 2869 const u32 rank_bgn, const u32 write_group, 2870 int *bgn_curr, int *end_curr, int *bgn_best, 2871 int *end_best, int *win_best, int new_dqs) 2872 { 2873 u32 bit_chk; 2874 const int max = iocfg->io_out1_delay_max - new_dqs; 2875 int d, di; 2876 2877 /* Search for the/part of the window with DM/DQS shift. */ 2878 for (di = max; di >= 0; di -= DELTA_D) { 2879 if (search_dm) { 2880 d = di; 2881 scc_mgr_apply_group_dm_out1_delay(d); 2882 } else { 2883 /* For DQS, we go from 0...max */ 2884 d = max - di; 2885 /* 2886 * Note: This only shifts DQS, so are we limiting 2887 * ourselves to width of DQ unnecessarily. 2888 */ 2889 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, 2890 d + new_dqs); 2891 } 2892 2893 writel(0, &sdr_scc_mgr->update); 2894 2895 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, 2896 PASS_ALL_BITS, &bit_chk, 2897 0)) { 2898 /* Set current end of the window. */ 2899 *end_curr = search_dm ? -d : d; 2900 2901 /* 2902 * If a starting edge of our window has not been seen 2903 * this is our current start of the DM window. 2904 */ 2905 if (*bgn_curr == iocfg->io_out1_delay_max + 1) 2906 *bgn_curr = search_dm ? -d : d; 2907 2908 /* 2909 * If current window is bigger than best seen. 2910 * Set best seen to be current window. 2911 */ 2912 if ((*end_curr - *bgn_curr + 1) > *win_best) { 2913 *win_best = *end_curr - *bgn_curr + 1; 2914 *bgn_best = *bgn_curr; 2915 *end_best = *end_curr; 2916 } 2917 } else { 2918 /* We just saw a failing test. Reset temp edge. */ 2919 *bgn_curr = iocfg->io_out1_delay_max + 1; 2920 *end_curr = iocfg->io_out1_delay_max + 1; 2921 2922 /* Early exit is only applicable to DQS. */ 2923 if (search_dm) 2924 continue; 2925 2926 /* 2927 * Early exit optimization: if the remaining delay 2928 * chain space is less than already seen largest 2929 * window we can exit. 2930 */ 2931 if (*win_best - 1 > iocfg->io_out1_delay_max - new_dqs - d) 2932 break; 2933 } 2934 } 2935 } 2936 2937 /* 2938 * rw_mgr_mem_calibrate_writes_center() - Center all windows 2939 * @rank_bgn: Rank number 2940 * @write_group: Write group 2941 * @test_bgn: Rank at which the test begins 2942 * 2943 * Center all windows. Do per-bit-deskew to possibly increase size of 2944 * certain windows. 2945 */ 2946 static int 2947 rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group, 2948 const u32 test_bgn) 2949 { 2950 int i; 2951 u32 sticky_bit_chk; 2952 u32 min_index; 2953 int left_edge[rwcfg->mem_dq_per_write_dqs]; 2954 int right_edge[rwcfg->mem_dq_per_write_dqs]; 2955 int mid; 2956 int mid_min, orig_mid_min; 2957 int new_dqs, start_dqs; 2958 int dq_margin, dqs_margin, dm_margin; 2959 int bgn_curr = iocfg->io_out1_delay_max + 1; 2960 int end_curr = iocfg->io_out1_delay_max + 1; 2961 int bgn_best = iocfg->io_out1_delay_max + 1; 2962 int end_best = iocfg->io_out1_delay_max + 1; 2963 int win_best = 0; 2964 2965 int ret; 2966 2967 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn); 2968 2969 dm_margin = 0; 2970 2971 start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS | 2972 SCC_MGR_IO_OUT1_DELAY_OFFSET) + 2973 (rwcfg->mem_dq_per_write_dqs << 2)); 2974 2975 /* Per-bit deskew. */ 2976 2977 /* 2978 * Set the left and right edge of each bit to an illegal value. 2979 * Use (iocfg->io_out1_delay_max + 1) as an illegal value. 2980 */ 2981 sticky_bit_chk = 0; 2982 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) { 2983 left_edge[i] = iocfg->io_out1_delay_max + 1; 2984 right_edge[i] = iocfg->io_out1_delay_max + 1; 2985 } 2986 2987 /* Search for the left edge of the window for each bit. */ 2988 search_left_edge(1, rank_bgn, write_group, 0, test_bgn, 2989 &sticky_bit_chk, 2990 left_edge, right_edge, 0); 2991 2992 /* Search for the right edge of the window for each bit. */ 2993 ret = search_right_edge(1, rank_bgn, write_group, 0, 2994 start_dqs, 0, 2995 &sticky_bit_chk, 2996 left_edge, right_edge, 0); 2997 if (ret) { 2998 set_failing_group_stage(test_bgn + ret - 1, CAL_STAGE_WRITES, 2999 CAL_SUBSTAGE_WRITES_CENTER); 3000 return -EINVAL; 3001 } 3002 3003 min_index = get_window_mid_index(1, left_edge, right_edge, &mid_min); 3004 3005 /* Determine the amount we can change DQS (which is -mid_min). */ 3006 orig_mid_min = mid_min; 3007 new_dqs = start_dqs; 3008 mid_min = 0; 3009 debug_cond(DLEVEL >= 1, 3010 "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n", 3011 __func__, __LINE__, start_dqs, new_dqs, mid_min); 3012 3013 /* Add delay to bring centre of all DQ windows to the same "level". */ 3014 center_dq_windows(1, left_edge, right_edge, mid_min, orig_mid_min, 3015 min_index, 0, &dq_margin, &dqs_margin); 3016 3017 /* Move DQS */ 3018 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); 3019 writel(0, &sdr_scc_mgr->update); 3020 3021 /* Centre DM */ 3022 debug_cond(DLEVEL >= 2, "%s:%d write_center: DM\n", __func__, __LINE__); 3023 3024 /* 3025 * Set the left and right edge of each bit to an illegal value. 3026 * Use (iocfg->io_out1_delay_max + 1) as an illegal value. 3027 */ 3028 left_edge[0] = iocfg->io_out1_delay_max + 1; 3029 right_edge[0] = iocfg->io_out1_delay_max + 1; 3030 3031 /* Search for the/part of the window with DM shift. */ 3032 search_window(1, rank_bgn, write_group, &bgn_curr, &end_curr, 3033 &bgn_best, &end_best, &win_best, 0); 3034 3035 /* Reset DM delay chains to 0. */ 3036 scc_mgr_apply_group_dm_out1_delay(0); 3037 3038 /* 3039 * Check to see if the current window nudges up aganist 0 delay. 3040 * If so we need to continue the search by shifting DQS otherwise DQS 3041 * search begins as a new search. 3042 */ 3043 if (end_curr != 0) { 3044 bgn_curr = iocfg->io_out1_delay_max + 1; 3045 end_curr = iocfg->io_out1_delay_max + 1; 3046 } 3047 3048 /* Search for the/part of the window with DQS shifts. */ 3049 search_window(0, rank_bgn, write_group, &bgn_curr, &end_curr, 3050 &bgn_best, &end_best, &win_best, new_dqs); 3051 3052 /* Assign left and right edge for cal and reporting. */ 3053 left_edge[0] = -1 * bgn_best; 3054 right_edge[0] = end_best; 3055 3056 debug_cond(DLEVEL >= 2, "%s:%d dm_calib: left=%d right=%d\n", 3057 __func__, __LINE__, left_edge[0], right_edge[0]); 3058 3059 /* Move DQS (back to orig). */ 3060 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); 3061 3062 /* Move DM */ 3063 3064 /* Find middle of window for the DM bit. */ 3065 mid = (left_edge[0] - right_edge[0]) / 2; 3066 3067 /* Only move right, since we are not moving DQS/DQ. */ 3068 if (mid < 0) 3069 mid = 0; 3070 3071 /* dm_marign should fail if we never find a window. */ 3072 if (win_best == 0) 3073 dm_margin = -1; 3074 else 3075 dm_margin = left_edge[0] - mid; 3076 3077 scc_mgr_apply_group_dm_out1_delay(mid); 3078 writel(0, &sdr_scc_mgr->update); 3079 3080 debug_cond(DLEVEL >= 2, 3081 "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n", 3082 __func__, __LINE__, left_edge[0], right_edge[0], 3083 mid, dm_margin); 3084 /* Export values. */ 3085 gbl->fom_out += dq_margin + dqs_margin; 3086 3087 debug_cond(DLEVEL >= 2, 3088 "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n", 3089 __func__, __LINE__, dq_margin, dqs_margin, dm_margin); 3090 3091 /* 3092 * Do not remove this line as it makes sure all of our 3093 * decisions have been applied. 3094 */ 3095 writel(0, &sdr_scc_mgr->update); 3096 3097 if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0)) 3098 return -EINVAL; 3099 3100 return 0; 3101 } 3102 3103 /** 3104 * rw_mgr_mem_calibrate_writes() - Write Calibration Part One 3105 * @rank_bgn: Rank number 3106 * @group: Read/Write Group 3107 * @test_bgn: Rank at which the test begins 3108 * 3109 * Stage 2: Write Calibration Part One. 3110 * 3111 * This function implements UniPHY calibration Stage 2, as explained in 3112 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". 3113 */ 3114 static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group, 3115 const u32 test_bgn) 3116 { 3117 int ret; 3118 3119 /* Update info for sims */ 3120 debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn); 3121 3122 reg_file_set_group(group); 3123 reg_file_set_stage(CAL_STAGE_WRITES); 3124 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER); 3125 3126 ret = rw_mgr_mem_calibrate_writes_center(rank_bgn, group, test_bgn); 3127 if (ret) 3128 set_failing_group_stage(group, CAL_STAGE_WRITES, 3129 CAL_SUBSTAGE_WRITES_CENTER); 3130 3131 return ret; 3132 } 3133 3134 /** 3135 * mem_precharge_and_activate() - Precharge all banks and activate 3136 * 3137 * Precharge all banks and activate row 0 in bank "000..." and bank "111...". 3138 */ 3139 static void mem_precharge_and_activate(void) 3140 { 3141 int r; 3142 3143 for (r = 0; r < rwcfg->mem_number_of_ranks; r++) { 3144 /* Set rank. */ 3145 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 3146 3147 /* Precharge all banks. */ 3148 writel(rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS | 3149 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 3150 3151 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0); 3152 writel(rwcfg->activate_0_and_1_wait1, 3153 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 3154 3155 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1); 3156 writel(rwcfg->activate_0_and_1_wait2, 3157 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 3158 3159 /* Activate rows. */ 3160 writel(rwcfg->activate_0_and_1, SDR_PHYGRP_RWMGRGRP_ADDRESS | 3161 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 3162 } 3163 } 3164 3165 /** 3166 * mem_init_latency() - Configure memory RLAT and WLAT settings 3167 * 3168 * Configure memory RLAT and WLAT parameters. 3169 */ 3170 static void mem_init_latency(void) 3171 { 3172 /* 3173 * For AV/CV, LFIFO is hardened and always runs at full rate 3174 * so max latency in AFI clocks, used here, is correspondingly 3175 * smaller. 3176 */ 3177 const u32 max_latency = (1 << misccfg->max_latency_count_width) - 1; 3178 u32 rlat, wlat; 3179 3180 debug("%s:%d\n", __func__, __LINE__); 3181 3182 /* 3183 * Read in write latency. 3184 * WL for Hard PHY does not include additive latency. 3185 */ 3186 wlat = readl(&data_mgr->t_wl_add); 3187 wlat += readl(&data_mgr->mem_t_add); 3188 3189 gbl->rw_wl_nop_cycles = wlat - 1; 3190 3191 /* Read in readl latency. */ 3192 rlat = readl(&data_mgr->t_rl_add); 3193 3194 /* Set a pretty high read latency initially. */ 3195 gbl->curr_read_lat = rlat + 16; 3196 if (gbl->curr_read_lat > max_latency) 3197 gbl->curr_read_lat = max_latency; 3198 3199 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 3200 3201 /* Advertise write latency. */ 3202 writel(wlat, &phy_mgr_cfg->afi_wlat); 3203 } 3204 3205 /** 3206 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings 3207 * 3208 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode. 3209 */ 3210 static void mem_skip_calibrate(void) 3211 { 3212 u32 vfifo_offset; 3213 u32 i, j, r; 3214 3215 debug("%s:%d\n", __func__, __LINE__); 3216 /* Need to update every shadow register set used by the interface */ 3217 for (r = 0; r < rwcfg->mem_number_of_ranks; 3218 r += NUM_RANKS_PER_SHADOW_REG) { 3219 /* 3220 * Set output phase alignment settings appropriate for 3221 * skip calibration. 3222 */ 3223 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) { 3224 scc_mgr_set_dqs_en_phase(i, 0); 3225 if (iocfg->dll_chain_length == 6) 3226 scc_mgr_set_dqdqs_output_phase(i, 6); 3227 else 3228 scc_mgr_set_dqdqs_output_phase(i, 7); 3229 /* 3230 * Case:33398 3231 * 3232 * Write data arrives to the I/O two cycles before write 3233 * latency is reached (720 deg). 3234 * -> due to bit-slip in a/c bus 3235 * -> to allow board skew where dqs is longer than ck 3236 * -> how often can this happen!? 3237 * -> can claim back some ptaps for high freq 3238 * support if we can relax this, but i digress... 3239 * 3240 * The write_clk leads mem_ck by 90 deg 3241 * The minimum ptap of the OPA is 180 deg 3242 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay 3243 * The write_clk is always delayed by 2 ptaps 3244 * 3245 * Hence, to make DQS aligned to CK, we need to delay 3246 * DQS by: 3247 * (720 - 90 - 180 - 2) * 3248 * (360 / iocfg->dll_chain_length) 3249 * 3250 * Dividing the above by (360 / iocfg->dll_chain_length) 3251 * gives us the number of ptaps, which simplies to: 3252 * 3253 * (1.25 * iocfg->dll_chain_length - 2) 3254 */ 3255 scc_mgr_set_dqdqs_output_phase(i, 3256 ((125 * iocfg->dll_chain_length) / 100) - 2); 3257 } 3258 writel(0xff, &sdr_scc_mgr->dqs_ena); 3259 writel(0xff, &sdr_scc_mgr->dqs_io_ena); 3260 3261 for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) { 3262 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | 3263 SCC_MGR_GROUP_COUNTER_OFFSET); 3264 } 3265 writel(0xff, &sdr_scc_mgr->dq_ena); 3266 writel(0xff, &sdr_scc_mgr->dm_ena); 3267 writel(0, &sdr_scc_mgr->update); 3268 } 3269 3270 /* Compensate for simulation model behaviour */ 3271 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) { 3272 scc_mgr_set_dqs_bus_in_delay(i, 10); 3273 scc_mgr_load_dqs(i); 3274 } 3275 writel(0, &sdr_scc_mgr->update); 3276 3277 /* 3278 * ArriaV has hard FIFOs that can only be initialized by incrementing 3279 * in sequencer. 3280 */ 3281 vfifo_offset = misccfg->calib_vfifo_offset; 3282 for (j = 0; j < vfifo_offset; j++) 3283 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy); 3284 writel(0, &phy_mgr_cmd->fifo_reset); 3285 3286 /* 3287 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal 3288 * setting from generation-time constant. 3289 */ 3290 gbl->curr_read_lat = misccfg->calib_lfifo_offset; 3291 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 3292 } 3293 3294 /** 3295 * mem_calibrate() - Memory calibration entry point. 3296 * 3297 * Perform memory calibration. 3298 */ 3299 static u32 mem_calibrate(void) 3300 { 3301 u32 i; 3302 u32 rank_bgn, sr; 3303 u32 write_group, write_test_bgn; 3304 u32 read_group, read_test_bgn; 3305 u32 run_groups, current_run; 3306 u32 failing_groups = 0; 3307 u32 group_failed = 0; 3308 3309 const u32 rwdqs_ratio = rwcfg->mem_if_read_dqs_width / 3310 rwcfg->mem_if_write_dqs_width; 3311 3312 debug("%s:%d\n", __func__, __LINE__); 3313 3314 /* Initialize the data settings */ 3315 gbl->error_substage = CAL_SUBSTAGE_NIL; 3316 gbl->error_stage = CAL_STAGE_NIL; 3317 gbl->error_group = 0xff; 3318 gbl->fom_in = 0; 3319 gbl->fom_out = 0; 3320 3321 /* Initialize WLAT and RLAT. */ 3322 mem_init_latency(); 3323 3324 /* Initialize bit slips. */ 3325 mem_precharge_and_activate(); 3326 3327 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) { 3328 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | 3329 SCC_MGR_GROUP_COUNTER_OFFSET); 3330 /* Only needed once to set all groups, pins, DQ, DQS, DM. */ 3331 if (i == 0) 3332 scc_mgr_set_hhp_extras(); 3333 3334 scc_set_bypass_mode(i); 3335 } 3336 3337 /* Calibration is skipped. */ 3338 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) { 3339 /* 3340 * Set VFIFO and LFIFO to instant-on settings in skip 3341 * calibration mode. 3342 */ 3343 mem_skip_calibrate(); 3344 3345 /* 3346 * Do not remove this line as it makes sure all of our 3347 * decisions have been applied. 3348 */ 3349 writel(0, &sdr_scc_mgr->update); 3350 return 1; 3351 } 3352 3353 /* Calibration is not skipped. */ 3354 for (i = 0; i < NUM_CALIB_REPEAT; i++) { 3355 /* 3356 * Zero all delay chain/phase settings for all 3357 * groups and all shadow register sets. 3358 */ 3359 scc_mgr_zero_all(); 3360 3361 run_groups = ~0; 3362 3363 for (write_group = 0, write_test_bgn = 0; write_group 3364 < rwcfg->mem_if_write_dqs_width; write_group++, 3365 write_test_bgn += rwcfg->mem_dq_per_write_dqs) { 3366 /* Initialize the group failure */ 3367 group_failed = 0; 3368 3369 current_run = run_groups & ((1 << 3370 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1); 3371 run_groups = run_groups >> 3372 RW_MGR_NUM_DQS_PER_WRITE_GROUP; 3373 3374 if (current_run == 0) 3375 continue; 3376 3377 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS | 3378 SCC_MGR_GROUP_COUNTER_OFFSET); 3379 scc_mgr_zero_group(write_group, 0); 3380 3381 for (read_group = write_group * rwdqs_ratio, 3382 read_test_bgn = 0; 3383 read_group < (write_group + 1) * rwdqs_ratio; 3384 read_group++, 3385 read_test_bgn += rwcfg->mem_dq_per_read_dqs) { 3386 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO) 3387 continue; 3388 3389 /* Calibrate the VFIFO */ 3390 if (rw_mgr_mem_calibrate_vfifo(read_group, 3391 read_test_bgn)) 3392 continue; 3393 3394 if (!(gbl->phy_debug_mode_flags & 3395 PHY_DEBUG_SWEEP_ALL_GROUPS)) 3396 return 0; 3397 3398 /* The group failed, we're done. */ 3399 goto grp_failed; 3400 } 3401 3402 /* Calibrate the output side */ 3403 for (rank_bgn = 0, sr = 0; 3404 rank_bgn < rwcfg->mem_number_of_ranks; 3405 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) { 3406 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) 3407 continue; 3408 3409 /* Not needed in quick mode! */ 3410 if (STATIC_CALIB_STEPS & 3411 CALIB_SKIP_DELAY_SWEEPS) 3412 continue; 3413 3414 /* Calibrate WRITEs */ 3415 if (!rw_mgr_mem_calibrate_writes(rank_bgn, 3416 write_group, 3417 write_test_bgn)) 3418 continue; 3419 3420 group_failed = 1; 3421 if (!(gbl->phy_debug_mode_flags & 3422 PHY_DEBUG_SWEEP_ALL_GROUPS)) 3423 return 0; 3424 } 3425 3426 /* Some group failed, we're done. */ 3427 if (group_failed) 3428 goto grp_failed; 3429 3430 for (read_group = write_group * rwdqs_ratio, 3431 read_test_bgn = 0; 3432 read_group < (write_group + 1) * rwdqs_ratio; 3433 read_group++, 3434 read_test_bgn += rwcfg->mem_dq_per_read_dqs) { 3435 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) 3436 continue; 3437 3438 if (!rw_mgr_mem_calibrate_vfifo_end(read_group, 3439 read_test_bgn)) 3440 continue; 3441 3442 if (!(gbl->phy_debug_mode_flags & 3443 PHY_DEBUG_SWEEP_ALL_GROUPS)) 3444 return 0; 3445 3446 /* The group failed, we're done. */ 3447 goto grp_failed; 3448 } 3449 3450 /* No group failed, continue as usual. */ 3451 continue; 3452 3453 grp_failed: /* A group failed, increment the counter. */ 3454 failing_groups++; 3455 } 3456 3457 /* 3458 * USER If there are any failing groups then report 3459 * the failure. 3460 */ 3461 if (failing_groups != 0) 3462 return 0; 3463 3464 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO) 3465 continue; 3466 3467 /* Calibrate the LFIFO */ 3468 if (!rw_mgr_mem_calibrate_lfifo()) 3469 return 0; 3470 } 3471 3472 /* 3473 * Do not remove this line as it makes sure all of our decisions 3474 * have been applied. 3475 */ 3476 writel(0, &sdr_scc_mgr->update); 3477 return 1; 3478 } 3479 3480 /** 3481 * run_mem_calibrate() - Perform memory calibration 3482 * 3483 * This function triggers the entire memory calibration procedure. 3484 */ 3485 static int run_mem_calibrate(void) 3486 { 3487 int pass; 3488 u32 ctrl_cfg; 3489 3490 debug("%s:%d\n", __func__, __LINE__); 3491 3492 /* Reset pass/fail status shown on afi_cal_success/fail */ 3493 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status); 3494 3495 /* Stop tracking manager. */ 3496 ctrl_cfg = readl(&sdr_ctrl->ctrl_cfg); 3497 writel(ctrl_cfg & ~SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK, 3498 &sdr_ctrl->ctrl_cfg); 3499 3500 phy_mgr_initialize(); 3501 rw_mgr_mem_initialize(); 3502 3503 /* Perform the actual memory calibration. */ 3504 pass = mem_calibrate(); 3505 3506 mem_precharge_and_activate(); 3507 writel(0, &phy_mgr_cmd->fifo_reset); 3508 3509 /* Handoff. */ 3510 rw_mgr_mem_handoff(); 3511 /* 3512 * In Hard PHY this is a 2-bit control: 3513 * 0: AFI Mux Select 3514 * 1: DDIO Mux Select 3515 */ 3516 writel(0x2, &phy_mgr_cfg->mux_sel); 3517 3518 /* Start tracking manager. */ 3519 writel(ctrl_cfg, &sdr_ctrl->ctrl_cfg); 3520 3521 return pass; 3522 } 3523 3524 /** 3525 * debug_mem_calibrate() - Report result of memory calibration 3526 * @pass: Value indicating whether calibration passed or failed 3527 * 3528 * This function reports the results of the memory calibration 3529 * and writes debug information into the register file. 3530 */ 3531 static void debug_mem_calibrate(int pass) 3532 { 3533 u32 debug_info; 3534 3535 if (pass) { 3536 debug("%s: CALIBRATION PASSED\n", __FILE__); 3537 3538 gbl->fom_in /= 2; 3539 gbl->fom_out /= 2; 3540 3541 if (gbl->fom_in > 0xff) 3542 gbl->fom_in = 0xff; 3543 3544 if (gbl->fom_out > 0xff) 3545 gbl->fom_out = 0xff; 3546 3547 /* Update the FOM in the register file */ 3548 debug_info = gbl->fom_in; 3549 debug_info |= gbl->fom_out << 8; 3550 writel(debug_info, &sdr_reg_file->fom); 3551 3552 writel(debug_info, &phy_mgr_cfg->cal_debug_info); 3553 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status); 3554 } else { 3555 debug("%s: CALIBRATION FAILED\n", __FILE__); 3556 3557 debug_info = gbl->error_stage; 3558 debug_info |= gbl->error_substage << 8; 3559 debug_info |= gbl->error_group << 16; 3560 3561 writel(debug_info, &sdr_reg_file->failing_stage); 3562 writel(debug_info, &phy_mgr_cfg->cal_debug_info); 3563 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status); 3564 3565 /* Update the failing group/stage in the register file */ 3566 debug_info = gbl->error_stage; 3567 debug_info |= gbl->error_substage << 8; 3568 debug_info |= gbl->error_group << 16; 3569 writel(debug_info, &sdr_reg_file->failing_stage); 3570 } 3571 3572 debug("%s: Calibration complete\n", __FILE__); 3573 } 3574 3575 /** 3576 * hc_initialize_rom_data() - Initialize ROM data 3577 * 3578 * Initialize ROM data. 3579 */ 3580 static void hc_initialize_rom_data(void) 3581 { 3582 unsigned int nelem = 0; 3583 const u32 *rom_init; 3584 u32 i, addr; 3585 3586 socfpga_get_seq_inst_init(&rom_init, &nelem); 3587 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET; 3588 for (i = 0; i < nelem; i++) 3589 writel(rom_init[i], addr + (i << 2)); 3590 3591 socfpga_get_seq_ac_init(&rom_init, &nelem); 3592 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET; 3593 for (i = 0; i < nelem; i++) 3594 writel(rom_init[i], addr + (i << 2)); 3595 } 3596 3597 /** 3598 * initialize_reg_file() - Initialize SDR register file 3599 * 3600 * Initialize SDR register file. 3601 */ 3602 static void initialize_reg_file(void) 3603 { 3604 /* Initialize the register file with the correct data */ 3605 writel(misccfg->reg_file_init_seq_signature, &sdr_reg_file->signature); 3606 writel(0, &sdr_reg_file->debug_data_addr); 3607 writel(0, &sdr_reg_file->cur_stage); 3608 writel(0, &sdr_reg_file->fom); 3609 writel(0, &sdr_reg_file->failing_stage); 3610 writel(0, &sdr_reg_file->debug1); 3611 writel(0, &sdr_reg_file->debug2); 3612 } 3613 3614 /** 3615 * initialize_hps_phy() - Initialize HPS PHY 3616 * 3617 * Initialize HPS PHY. 3618 */ 3619 static void initialize_hps_phy(void) 3620 { 3621 u32 reg; 3622 /* 3623 * Tracking also gets configured here because it's in the 3624 * same register. 3625 */ 3626 u32 trk_sample_count = 7500; 3627 u32 trk_long_idle_sample_count = (10 << 16) | 100; 3628 /* 3629 * Format is number of outer loops in the 16 MSB, sample 3630 * count in 16 LSB. 3631 */ 3632 3633 reg = 0; 3634 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2); 3635 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1); 3636 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1); 3637 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1); 3638 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0); 3639 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1); 3640 /* 3641 * This field selects the intrinsic latency to RDATA_EN/FULL path. 3642 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles. 3643 */ 3644 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0); 3645 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET( 3646 trk_sample_count); 3647 writel(reg, &sdr_ctrl->phy_ctrl0); 3648 3649 reg = 0; 3650 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET( 3651 trk_sample_count >> 3652 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH); 3653 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET( 3654 trk_long_idle_sample_count); 3655 writel(reg, &sdr_ctrl->phy_ctrl1); 3656 3657 reg = 0; 3658 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET( 3659 trk_long_idle_sample_count >> 3660 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH); 3661 writel(reg, &sdr_ctrl->phy_ctrl2); 3662 } 3663 3664 /** 3665 * initialize_tracking() - Initialize tracking 3666 * 3667 * Initialize the register file with usable initial data. 3668 */ 3669 static void initialize_tracking(void) 3670 { 3671 /* 3672 * Initialize the register file with the correct data. 3673 * Compute usable version of value in case we skip full 3674 * computation later. 3675 */ 3676 writel(DIV_ROUND_UP(iocfg->delay_per_opa_tap, 3677 iocfg->delay_per_dchain_tap) - 1, 3678 &sdr_reg_file->dtaps_per_ptap); 3679 3680 /* trk_sample_count */ 3681 writel(7500, &sdr_reg_file->trk_sample_count); 3682 3683 /* longidle outer loop [15:0] */ 3684 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle); 3685 3686 /* 3687 * longidle sample count [31:24] 3688 * trfc, worst case of 933Mhz 4Gb [23:16] 3689 * trcd, worst case [15:8] 3690 * vfifo wait [7:0] 3691 */ 3692 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0), 3693 &sdr_reg_file->delays); 3694 3695 /* mux delay */ 3696 writel((rwcfg->idle << 24) | (rwcfg->activate_1 << 16) | 3697 (rwcfg->sgle_read << 8) | (rwcfg->precharge_all << 0), 3698 &sdr_reg_file->trk_rw_mgr_addr); 3699 3700 writel(rwcfg->mem_if_read_dqs_width, 3701 &sdr_reg_file->trk_read_dqs_width); 3702 3703 /* trefi [7:0] */ 3704 writel((rwcfg->refresh_all << 24) | (1000 << 0), 3705 &sdr_reg_file->trk_rfsh); 3706 } 3707 3708 int sdram_calibration_full(void) 3709 { 3710 struct param_type my_param; 3711 struct gbl_type my_gbl; 3712 u32 pass; 3713 3714 memset(&my_param, 0, sizeof(my_param)); 3715 memset(&my_gbl, 0, sizeof(my_gbl)); 3716 3717 param = &my_param; 3718 gbl = &my_gbl; 3719 3720 rwcfg = socfpga_get_sdram_rwmgr_config(); 3721 iocfg = socfpga_get_sdram_io_config(); 3722 misccfg = socfpga_get_sdram_misc_config(); 3723 3724 /* Set the calibration enabled by default */ 3725 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT; 3726 /* 3727 * Only sweep all groups (regardless of fail state) by default 3728 * Set enabled read test by default. 3729 */ 3730 #if DISABLE_GUARANTEED_READ 3731 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ; 3732 #endif 3733 /* Initialize the register file */ 3734 initialize_reg_file(); 3735 3736 /* Initialize any PHY CSR */ 3737 initialize_hps_phy(); 3738 3739 scc_mgr_initialize(); 3740 3741 initialize_tracking(); 3742 3743 debug("%s: Preparing to start memory calibration\n", __FILE__); 3744 3745 debug("%s:%d\n", __func__, __LINE__); 3746 debug_cond(DLEVEL >= 1, 3747 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ", 3748 rwcfg->mem_number_of_ranks, rwcfg->mem_number_of_cs_per_dimm, 3749 rwcfg->mem_dq_per_read_dqs, rwcfg->mem_dq_per_write_dqs, 3750 rwcfg->mem_virtual_groups_per_read_dqs, 3751 rwcfg->mem_virtual_groups_per_write_dqs); 3752 debug_cond(DLEVEL >= 1, 3753 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ", 3754 rwcfg->mem_if_read_dqs_width, rwcfg->mem_if_write_dqs_width, 3755 rwcfg->mem_data_width, rwcfg->mem_data_mask_width, 3756 iocfg->delay_per_opa_tap, iocfg->delay_per_dchain_tap); 3757 debug_cond(DLEVEL >= 1, "dtap_dqsen_delay=%u, dll=%u", 3758 iocfg->delay_per_dqs_en_dchain_tap, iocfg->dll_chain_length); 3759 debug_cond(DLEVEL >= 1, 3760 "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ", 3761 iocfg->dqs_en_phase_max, iocfg->dqdqs_out_phase_max, 3762 iocfg->dqs_en_delay_max, iocfg->dqs_in_delay_max); 3763 debug_cond(DLEVEL >= 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ", 3764 iocfg->io_in_delay_max, iocfg->io_out1_delay_max, 3765 iocfg->io_out2_delay_max); 3766 debug_cond(DLEVEL >= 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n", 3767 iocfg->dqs_in_reserve, iocfg->dqs_out_reserve); 3768 3769 hc_initialize_rom_data(); 3770 3771 /* update info for sims */ 3772 reg_file_set_stage(CAL_STAGE_NIL); 3773 reg_file_set_group(0); 3774 3775 /* 3776 * Load global needed for those actions that require 3777 * some dynamic calibration support. 3778 */ 3779 dyn_calib_steps = STATIC_CALIB_STEPS; 3780 /* 3781 * Load global to allow dynamic selection of delay loop settings 3782 * based on calibration mode. 3783 */ 3784 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS)) 3785 skip_delay_mask = 0xff; 3786 else 3787 skip_delay_mask = 0x0; 3788 3789 pass = run_mem_calibrate(); 3790 debug_mem_calibrate(pass); 3791 return pass; 3792 } 3793