1 /* 2 * Copyright Altera Corporation (C) 2012-2015 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <asm/arch/sdram.h> 10 #include "sequencer.h" 11 #include "sequencer_auto.h" 12 #include "sequencer_auto_ac_init.h" 13 #include "sequencer_auto_inst_init.h" 14 #include "sequencer_defines.h" 15 16 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs = 17 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800); 18 19 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs = 20 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00); 21 22 static struct socfpga_sdr_reg_file *sdr_reg_file = 23 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS; 24 25 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr = 26 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00); 27 28 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd = 29 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS; 30 31 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg = 32 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40); 33 34 static struct socfpga_data_mgr *data_mgr = 35 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS; 36 37 static struct socfpga_sdr_ctrl *sdr_ctrl = 38 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS; 39 40 #define DELTA_D 1 41 42 /* 43 * In order to reduce ROM size, most of the selectable calibration steps are 44 * decided at compile time based on the user's calibration mode selection, 45 * as captured by the STATIC_CALIB_STEPS selection below. 46 * 47 * However, to support simulation-time selection of fast simulation mode, where 48 * we skip everything except the bare minimum, we need a few of the steps to 49 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the 50 * check, which is based on the rtl-supplied value, or we dynamically compute 51 * the value to use based on the dynamically-chosen calibration mode 52 */ 53 54 #define DLEVEL 0 55 #define STATIC_IN_RTL_SIM 0 56 #define STATIC_SKIP_DELAY_LOOPS 0 57 58 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \ 59 STATIC_SKIP_DELAY_LOOPS) 60 61 /* calibration steps requested by the rtl */ 62 uint16_t dyn_calib_steps; 63 64 /* 65 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option 66 * instead of static, we use boolean logic to select between 67 * non-skip and skip values 68 * 69 * The mask is set to include all bits when not-skipping, but is 70 * zero when skipping 71 */ 72 73 uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */ 74 75 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \ 76 ((non_skip_value) & skip_delay_mask) 77 78 struct gbl_type *gbl; 79 struct param_type *param; 80 uint32_t curr_shadow_reg; 81 82 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, 83 uint32_t write_group, uint32_t use_dm, 84 uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks); 85 86 static void set_failing_group_stage(uint32_t group, uint32_t stage, 87 uint32_t substage) 88 { 89 /* 90 * Only set the global stage if there was not been any other 91 * failing group 92 */ 93 if (gbl->error_stage == CAL_STAGE_NIL) { 94 gbl->error_substage = substage; 95 gbl->error_stage = stage; 96 gbl->error_group = group; 97 } 98 } 99 100 static void reg_file_set_group(u16 set_group) 101 { 102 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16); 103 } 104 105 static void reg_file_set_stage(u8 set_stage) 106 { 107 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff); 108 } 109 110 static void reg_file_set_sub_stage(u8 set_sub_stage) 111 { 112 set_sub_stage &= 0xff; 113 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8); 114 } 115 116 static void initialize(void) 117 { 118 debug("%s:%d\n", __func__, __LINE__); 119 /* USER calibration has control over path to memory */ 120 /* 121 * In Hard PHY this is a 2-bit control: 122 * 0: AFI Mux Select 123 * 1: DDIO Mux Select 124 */ 125 writel(0x3, &phy_mgr_cfg->mux_sel); 126 127 /* USER memory clock is not stable we begin initialization */ 128 writel(0, &phy_mgr_cfg->reset_mem_stbl); 129 130 /* USER calibration status all set to zero */ 131 writel(0, &phy_mgr_cfg->cal_status); 132 133 writel(0, &phy_mgr_cfg->cal_debug_info); 134 135 if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) { 136 param->read_correct_mask_vg = ((uint32_t)1 << 137 (RW_MGR_MEM_DQ_PER_READ_DQS / 138 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1; 139 param->write_correct_mask_vg = ((uint32_t)1 << 140 (RW_MGR_MEM_DQ_PER_READ_DQS / 141 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1; 142 param->read_correct_mask = ((uint32_t)1 << 143 RW_MGR_MEM_DQ_PER_READ_DQS) - 1; 144 param->write_correct_mask = ((uint32_t)1 << 145 RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1; 146 param->dm_correct_mask = ((uint32_t)1 << 147 (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH)) 148 - 1; 149 } 150 } 151 152 static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode) 153 { 154 uint32_t odt_mask_0 = 0; 155 uint32_t odt_mask_1 = 0; 156 uint32_t cs_and_odt_mask; 157 158 if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) { 159 if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) { 160 /* 161 * 1 Rank 162 * Read: ODT = 0 163 * Write: ODT = 1 164 */ 165 odt_mask_0 = 0x0; 166 odt_mask_1 = 0x1; 167 } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) { 168 /* 2 Ranks */ 169 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) { 170 /* - Dual-Slot , Single-Rank 171 * (1 chip-select per DIMM) 172 * OR 173 * - RDIMM, 4 total CS (2 CS per DIMM) 174 * means 2 DIMM 175 * Since MEM_NUMBER_OF_RANKS is 2 they are 176 * both single rank 177 * with 2 CS each (special for RDIMM) 178 * Read: Turn on ODT on the opposite rank 179 * Write: Turn on ODT on all ranks 180 */ 181 odt_mask_0 = 0x3 & ~(1 << rank); 182 odt_mask_1 = 0x3; 183 } else { 184 /* 185 * USER - Single-Slot , Dual-rank DIMMs 186 * (2 chip-selects per DIMM) 187 * USER Read: Turn on ODT off on all ranks 188 * USER Write: Turn on ODT on active rank 189 */ 190 odt_mask_0 = 0x0; 191 odt_mask_1 = 0x3 & (1 << rank); 192 } 193 } else { 194 /* 4 Ranks 195 * Read: 196 * ----------+-----------------------+ 197 * | | 198 * | ODT | 199 * Read From +-----------------------+ 200 * Rank | 3 | 2 | 1 | 0 | 201 * ----------+-----+-----+-----+-----+ 202 * 0 | 0 | 1 | 0 | 0 | 203 * 1 | 1 | 0 | 0 | 0 | 204 * 2 | 0 | 0 | 0 | 1 | 205 * 3 | 0 | 0 | 1 | 0 | 206 * ----------+-----+-----+-----+-----+ 207 * 208 * Write: 209 * ----------+-----------------------+ 210 * | | 211 * | ODT | 212 * Write To +-----------------------+ 213 * Rank | 3 | 2 | 1 | 0 | 214 * ----------+-----+-----+-----+-----+ 215 * 0 | 0 | 1 | 0 | 1 | 216 * 1 | 1 | 0 | 1 | 0 | 217 * 2 | 0 | 1 | 0 | 1 | 218 * 3 | 1 | 0 | 1 | 0 | 219 * ----------+-----+-----+-----+-----+ 220 */ 221 switch (rank) { 222 case 0: 223 odt_mask_0 = 0x4; 224 odt_mask_1 = 0x5; 225 break; 226 case 1: 227 odt_mask_0 = 0x8; 228 odt_mask_1 = 0xA; 229 break; 230 case 2: 231 odt_mask_0 = 0x1; 232 odt_mask_1 = 0x5; 233 break; 234 case 3: 235 odt_mask_0 = 0x2; 236 odt_mask_1 = 0xA; 237 break; 238 } 239 } 240 } else { 241 odt_mask_0 = 0x0; 242 odt_mask_1 = 0x0; 243 } 244 245 cs_and_odt_mask = 246 (0xFF & ~(1 << rank)) | 247 ((0xFF & odt_mask_0) << 8) | 248 ((0xFF & odt_mask_1) << 16); 249 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS | 250 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); 251 } 252 253 /** 254 * scc_mgr_set() - Set SCC Manager register 255 * @off: Base offset in SCC Manager space 256 * @grp: Read/Write group 257 * @val: Value to be set 258 * 259 * This function sets the SCC Manager (Scan Chain Control Manager) register. 260 */ 261 static void scc_mgr_set(u32 off, u32 grp, u32 val) 262 { 263 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2)); 264 } 265 266 /** 267 * scc_mgr_initialize() - Initialize SCC Manager registers 268 * 269 * Initialize SCC Manager registers. 270 */ 271 static void scc_mgr_initialize(void) 272 { 273 /* 274 * Clear register file for HPS. 16 (2^4) is the size of the 275 * full register file in the scc mgr: 276 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS + 277 * MEM_IF_READ_DQS_WIDTH - 1); 278 */ 279 int i; 280 281 for (i = 0; i < 16; i++) { 282 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n", 283 __func__, __LINE__, i); 284 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i); 285 } 286 } 287 288 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase) 289 { 290 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase); 291 } 292 293 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay) 294 { 295 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay); 296 } 297 298 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase) 299 { 300 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase); 301 } 302 303 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay) 304 { 305 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay); 306 } 307 308 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay) 309 { 310 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS, 311 delay); 312 } 313 314 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay) 315 { 316 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay); 317 } 318 319 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay) 320 { 321 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay); 322 } 323 324 static void scc_mgr_set_dqs_out1_delay(uint32_t delay) 325 { 326 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS, 327 delay); 328 } 329 330 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay) 331 { 332 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, 333 RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm, 334 delay); 335 } 336 337 /* load up dqs config settings */ 338 static void scc_mgr_load_dqs(uint32_t dqs) 339 { 340 writel(dqs, &sdr_scc_mgr->dqs_ena); 341 } 342 343 /* load up dqs io config settings */ 344 static void scc_mgr_load_dqs_io(void) 345 { 346 writel(0, &sdr_scc_mgr->dqs_io_ena); 347 } 348 349 /* load up dq config settings */ 350 static void scc_mgr_load_dq(uint32_t dq_in_group) 351 { 352 writel(dq_in_group, &sdr_scc_mgr->dq_ena); 353 } 354 355 /* load up dm config settings */ 356 static void scc_mgr_load_dm(uint32_t dm) 357 { 358 writel(dm, &sdr_scc_mgr->dm_ena); 359 } 360 361 /** 362 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks 363 * @off: Base offset in SCC Manager space 364 * @grp: Read/Write group 365 * @val: Value to be set 366 * @update: If non-zero, trigger SCC Manager update for all ranks 367 * 368 * This function sets the SCC Manager (Scan Chain Control Manager) register 369 * and optionally triggers the SCC update for all ranks. 370 */ 371 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val, 372 const int update) 373 { 374 u32 r; 375 376 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 377 r += NUM_RANKS_PER_SHADOW_REG) { 378 scc_mgr_set(off, grp, val); 379 380 if (update || (r == 0)) { 381 writel(grp, &sdr_scc_mgr->dqs_ena); 382 writel(0, &sdr_scc_mgr->update); 383 } 384 } 385 } 386 387 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase) 388 { 389 /* 390 * USER although the h/w doesn't support different phases per 391 * shadow register, for simplicity our scc manager modeling 392 * keeps different phase settings per shadow reg, and it's 393 * important for us to keep them in sync to match h/w. 394 * for efficiency, the scan chain update should occur only 395 * once to sr0. 396 */ 397 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET, 398 read_group, phase, 0); 399 } 400 401 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group, 402 uint32_t phase) 403 { 404 /* 405 * USER although the h/w doesn't support different phases per 406 * shadow register, for simplicity our scc manager modeling 407 * keeps different phase settings per shadow reg, and it's 408 * important for us to keep them in sync to match h/w. 409 * for efficiency, the scan chain update should occur only 410 * once to sr0. 411 */ 412 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, 413 write_group, phase, 0); 414 } 415 416 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group, 417 uint32_t delay) 418 { 419 /* 420 * In shadow register mode, the T11 settings are stored in 421 * registers in the core, which are updated by the DQS_ENA 422 * signals. Not issuing the SCC_MGR_UPD command allows us to 423 * save lots of rank switching overhead, by calling 424 * select_shadow_regs_for_update with update_scan_chains 425 * set to 0. 426 */ 427 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET, 428 read_group, delay, 1); 429 writel(0, &sdr_scc_mgr->update); 430 } 431 432 /** 433 * scc_mgr_set_oct_out1_delay() - Set OCT output delay 434 * @write_group: Write group 435 * @delay: Delay value 436 * 437 * This function sets the OCT output delay in SCC manager. 438 */ 439 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay) 440 { 441 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH / 442 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; 443 const int base = write_group * ratio; 444 int i; 445 /* 446 * Load the setting in the SCC manager 447 * Although OCT affects only write data, the OCT delay is controlled 448 * by the DQS logic block which is instantiated once per read group. 449 * For protocols where a write group consists of multiple read groups, 450 * the setting must be set multiple times. 451 */ 452 for (i = 0; i < ratio; i++) 453 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay); 454 } 455 456 /** 457 * scc_mgr_set_hhp_extras() - Set HHP extras. 458 * 459 * Load the fixed setting in the SCC manager HHP extras. 460 */ 461 static void scc_mgr_set_hhp_extras(void) 462 { 463 /* 464 * Load the fixed setting in the SCC manager 465 * bits: 0:0 = 1'b1 - DQS bypass 466 * bits: 1:1 = 1'b1 - DQ bypass 467 * bits: 4:2 = 3'b001 - rfifo_mode 468 * bits: 6:5 = 2'b01 - rfifo clock_select 469 * bits: 7:7 = 1'b0 - separate gating from ungating setting 470 * bits: 8:8 = 1'b0 - separate OE from Output delay setting 471 */ 472 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) | 473 (1 << 2) | (1 << 1) | (1 << 0); 474 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | 475 SCC_MGR_HHP_GLOBALS_OFFSET | 476 SCC_MGR_HHP_EXTRAS_OFFSET; 477 478 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n", 479 __func__, __LINE__); 480 writel(value, addr); 481 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n", 482 __func__, __LINE__); 483 } 484 485 /** 486 * scc_mgr_zero_all() - Zero all DQS config 487 * 488 * Zero all DQS config. 489 */ 490 static void scc_mgr_zero_all(void) 491 { 492 int i, r; 493 494 /* 495 * USER Zero all DQS config settings, across all groups and all 496 * shadow registers 497 */ 498 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 499 r += NUM_RANKS_PER_SHADOW_REG) { 500 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 501 /* 502 * The phases actually don't exist on a per-rank basis, 503 * but there's no harm updating them several times, so 504 * let's keep the code simple. 505 */ 506 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE); 507 scc_mgr_set_dqs_en_phase(i, 0); 508 scc_mgr_set_dqs_en_delay(i, 0); 509 } 510 511 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { 512 scc_mgr_set_dqdqs_output_phase(i, 0); 513 /* Arria V/Cyclone V don't have out2. */ 514 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE); 515 } 516 } 517 518 /* Multicast to all DQS group enables. */ 519 writel(0xff, &sdr_scc_mgr->dqs_ena); 520 writel(0, &sdr_scc_mgr->update); 521 } 522 523 /** 524 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update 525 * @write_group: Write group 526 * 527 * Set bypass mode and trigger SCC update. 528 */ 529 static void scc_set_bypass_mode(const u32 write_group) 530 { 531 /* Multicast to all DQ enables. */ 532 writel(0xff, &sdr_scc_mgr->dq_ena); 533 writel(0xff, &sdr_scc_mgr->dm_ena); 534 535 /* Update current DQS IO enable. */ 536 writel(0, &sdr_scc_mgr->dqs_io_ena); 537 538 /* Update the DQS logic. */ 539 writel(write_group, &sdr_scc_mgr->dqs_ena); 540 541 /* Hit update. */ 542 writel(0, &sdr_scc_mgr->update); 543 } 544 545 /** 546 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group 547 * @write_group: Write group 548 * 549 * Load DQS settings for Write Group, do not trigger SCC update. 550 */ 551 static void scc_mgr_load_dqs_for_write_group(const u32 write_group) 552 { 553 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH / 554 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; 555 const int base = write_group * ratio; 556 int i; 557 /* 558 * Load the setting in the SCC manager 559 * Although OCT affects only write data, the OCT delay is controlled 560 * by the DQS logic block which is instantiated once per read group. 561 * For protocols where a write group consists of multiple read groups, 562 * the setting must be set multiple times. 563 */ 564 for (i = 0; i < ratio; i++) 565 writel(base + i, &sdr_scc_mgr->dqs_ena); 566 } 567 568 /** 569 * scc_mgr_zero_group() - Zero all configs for a group 570 * 571 * Zero DQ, DM, DQS and OCT configs for a group. 572 */ 573 static void scc_mgr_zero_group(const u32 write_group, const int out_only) 574 { 575 int i, r; 576 577 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 578 r += NUM_RANKS_PER_SHADOW_REG) { 579 /* Zero all DQ config settings. */ 580 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 581 scc_mgr_set_dq_out1_delay(i, 0); 582 if (!out_only) 583 scc_mgr_set_dq_in_delay(i, 0); 584 } 585 586 /* Multicast to all DQ enables. */ 587 writel(0xff, &sdr_scc_mgr->dq_ena); 588 589 /* Zero all DM config settings. */ 590 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) 591 scc_mgr_set_dm_out1_delay(i, 0); 592 593 /* Multicast to all DM enables. */ 594 writel(0xff, &sdr_scc_mgr->dm_ena); 595 596 /* Zero all DQS IO settings. */ 597 if (!out_only) 598 scc_mgr_set_dqs_io_in_delay(0); 599 600 /* Arria V/Cyclone V don't have out2. */ 601 scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE); 602 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE); 603 scc_mgr_load_dqs_for_write_group(write_group); 604 605 /* Multicast to all DQS IO enables (only 1 in total). */ 606 writel(0, &sdr_scc_mgr->dqs_io_ena); 607 608 /* Hit update to zero everything. */ 609 writel(0, &sdr_scc_mgr->update); 610 } 611 } 612 613 /* 614 * apply and load a particular input delay for the DQ pins in a group 615 * group_bgn is the index of the first dq pin (in the write group) 616 */ 617 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay) 618 { 619 uint32_t i, p; 620 621 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { 622 scc_mgr_set_dq_in_delay(p, delay); 623 scc_mgr_load_dq(p); 624 } 625 } 626 627 /** 628 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group 629 * @delay: Delay value 630 * 631 * Apply and load a particular output delay for the DQ pins in a group. 632 */ 633 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay) 634 { 635 int i; 636 637 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 638 scc_mgr_set_dq_out1_delay(i, delay); 639 scc_mgr_load_dq(i); 640 } 641 } 642 643 /* apply and load a particular output delay for the DM pins in a group */ 644 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1) 645 { 646 uint32_t i; 647 648 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { 649 scc_mgr_set_dm_out1_delay(i, delay1); 650 scc_mgr_load_dm(i); 651 } 652 } 653 654 655 /* apply and load delay on both DQS and OCT out1 */ 656 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group, 657 uint32_t delay) 658 { 659 scc_mgr_set_dqs_out1_delay(delay); 660 scc_mgr_load_dqs_io(); 661 662 scc_mgr_set_oct_out1_delay(write_group, delay); 663 scc_mgr_load_dqs_for_write_group(write_group); 664 } 665 666 /** 667 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT 668 * @write_group: Write group 669 * @delay: Delay value 670 * 671 * Apply a delay to the entire output side: DQ, DM, DQS, OCT. 672 */ 673 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group, 674 const u32 delay) 675 { 676 u32 i, new_delay; 677 678 /* DQ shift */ 679 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) 680 scc_mgr_load_dq(i); 681 682 /* DM shift */ 683 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) 684 scc_mgr_load_dm(i); 685 686 /* DQS shift */ 687 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay; 688 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 689 debug_cond(DLEVEL == 1, 690 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n", 691 __func__, __LINE__, write_group, delay, new_delay, 692 IO_IO_OUT2_DELAY_MAX, 693 new_delay - IO_IO_OUT2_DELAY_MAX); 694 new_delay -= IO_IO_OUT2_DELAY_MAX; 695 scc_mgr_set_dqs_out1_delay(new_delay); 696 } 697 698 scc_mgr_load_dqs_io(); 699 700 /* OCT shift */ 701 new_delay = READ_SCC_OCT_OUT2_DELAY + delay; 702 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 703 debug_cond(DLEVEL == 1, 704 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n", 705 __func__, __LINE__, write_group, delay, 706 new_delay, IO_IO_OUT2_DELAY_MAX, 707 new_delay - IO_IO_OUT2_DELAY_MAX); 708 new_delay -= IO_IO_OUT2_DELAY_MAX; 709 scc_mgr_set_oct_out1_delay(write_group, new_delay); 710 } 711 712 scc_mgr_load_dqs_for_write_group(write_group); 713 } 714 715 /** 716 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks 717 * @write_group: Write group 718 * @delay: Delay value 719 * 720 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks. 721 */ 722 static void 723 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group, 724 const u32 delay) 725 { 726 int r; 727 728 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 729 r += NUM_RANKS_PER_SHADOW_REG) { 730 scc_mgr_apply_group_all_out_delay_add(write_group, delay); 731 writel(0, &sdr_scc_mgr->update); 732 } 733 } 734 735 /* optimization used to recover some slots in ddr3 inst_rom */ 736 /* could be applied to other protocols if we wanted to */ 737 static void set_jump_as_return(void) 738 { 739 /* 740 * to save space, we replace return with jump to special shared 741 * RETURN instruction so we set the counter to large value so that 742 * we always jump 743 */ 744 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0); 745 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 746 } 747 748 /* 749 * should always use constants as argument to ensure all computations are 750 * performed at compile time 751 */ 752 static void delay_for_n_mem_clocks(const uint32_t clocks) 753 { 754 uint32_t afi_clocks; 755 uint8_t inner = 0; 756 uint8_t outer = 0; 757 uint16_t c_loop = 0; 758 759 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks); 760 761 762 afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO; 763 /* scale (rounding up) to get afi clocks */ 764 765 /* 766 * Note, we don't bother accounting for being off a little bit 767 * because of a few extra instructions in outer loops 768 * Note, the loops have a test at the end, and do the test before 769 * the decrement, and so always perform the loop 770 * 1 time more than the counter value 771 */ 772 if (afi_clocks == 0) { 773 ; 774 } else if (afi_clocks <= 0x100) { 775 inner = afi_clocks-1; 776 outer = 0; 777 c_loop = 0; 778 } else if (afi_clocks <= 0x10000) { 779 inner = 0xff; 780 outer = (afi_clocks-1) >> 8; 781 c_loop = 0; 782 } else { 783 inner = 0xff; 784 outer = 0xff; 785 c_loop = (afi_clocks-1) >> 16; 786 } 787 788 /* 789 * rom instructions are structured as follows: 790 * 791 * IDLE_LOOP2: jnz cntr0, TARGET_A 792 * IDLE_LOOP1: jnz cntr1, TARGET_B 793 * return 794 * 795 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and 796 * TARGET_B is set to IDLE_LOOP2 as well 797 * 798 * if we have no outer loop, though, then we can use IDLE_LOOP1 only, 799 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely 800 * 801 * a little confusing, but it helps save precious space in the inst_rom 802 * and sequencer rom and keeps the delays more accurate and reduces 803 * overhead 804 */ 805 if (afi_clocks <= 0x100) { 806 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), 807 &sdr_rw_load_mgr_regs->load_cntr1); 808 809 writel(RW_MGR_IDLE_LOOP1, 810 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 811 812 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS | 813 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 814 } else { 815 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), 816 &sdr_rw_load_mgr_regs->load_cntr0); 817 818 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer), 819 &sdr_rw_load_mgr_regs->load_cntr1); 820 821 writel(RW_MGR_IDLE_LOOP2, 822 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 823 824 writel(RW_MGR_IDLE_LOOP2, 825 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 826 827 /* hack to get around compiler not being smart enough */ 828 if (afi_clocks <= 0x10000) { 829 /* only need to run once */ 830 writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS | 831 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 832 } else { 833 do { 834 writel(RW_MGR_IDLE_LOOP2, 835 SDR_PHYGRP_RWMGRGRP_ADDRESS | 836 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 837 } while (c_loop-- != 0); 838 } 839 } 840 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks); 841 } 842 843 /** 844 * rw_mgr_mem_init_load_regs() - Load instruction registers 845 * @cntr0: Counter 0 value 846 * @cntr1: Counter 1 value 847 * @cntr2: Counter 2 value 848 * @jump: Jump instruction value 849 * 850 * Load instruction registers. 851 */ 852 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump) 853 { 854 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 855 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 856 857 /* Load counters */ 858 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0), 859 &sdr_rw_load_mgr_regs->load_cntr0); 860 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1), 861 &sdr_rw_load_mgr_regs->load_cntr1); 862 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2), 863 &sdr_rw_load_mgr_regs->load_cntr2); 864 865 /* Load jump address */ 866 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 867 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1); 868 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2); 869 870 /* Execute count instruction */ 871 writel(jump, grpaddr); 872 } 873 874 /** 875 * rw_mgr_mem_load_user() - Load user calibration values 876 * @fin1: Final instruction 1 877 * @fin2: Final instruction 2 878 * @precharge: If 1, precharge the banks at the end 879 * 880 * Load user calibration values and optionally precharge the banks. 881 */ 882 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2, 883 const int precharge) 884 { 885 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 886 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 887 u32 r; 888 889 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { 890 if (param->skip_ranks[r]) { 891 /* request to skip the rank */ 892 continue; 893 } 894 895 /* set rank */ 896 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 897 898 /* precharge all banks ... */ 899 if (precharge) 900 writel(RW_MGR_PRECHARGE_ALL, grpaddr); 901 902 /* 903 * USER Use Mirror-ed commands for odd ranks if address 904 * mirrorring is on 905 */ 906 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) { 907 set_jump_as_return(); 908 writel(RW_MGR_MRS2_MIRR, grpaddr); 909 delay_for_n_mem_clocks(4); 910 set_jump_as_return(); 911 writel(RW_MGR_MRS3_MIRR, grpaddr); 912 delay_for_n_mem_clocks(4); 913 set_jump_as_return(); 914 writel(RW_MGR_MRS1_MIRR, grpaddr); 915 delay_for_n_mem_clocks(4); 916 set_jump_as_return(); 917 writel(fin1, grpaddr); 918 } else { 919 set_jump_as_return(); 920 writel(RW_MGR_MRS2, grpaddr); 921 delay_for_n_mem_clocks(4); 922 set_jump_as_return(); 923 writel(RW_MGR_MRS3, grpaddr); 924 delay_for_n_mem_clocks(4); 925 set_jump_as_return(); 926 writel(RW_MGR_MRS1, grpaddr); 927 set_jump_as_return(); 928 writel(fin2, grpaddr); 929 } 930 931 if (precharge) 932 continue; 933 934 set_jump_as_return(); 935 writel(RW_MGR_ZQCL, grpaddr); 936 937 /* tZQinit = tDLLK = 512 ck cycles */ 938 delay_for_n_mem_clocks(512); 939 } 940 } 941 942 static void rw_mgr_mem_initialize(void) 943 { 944 debug("%s:%d\n", __func__, __LINE__); 945 946 /* The reset / cke part of initialization is broadcasted to all ranks */ 947 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS | 948 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); 949 950 /* 951 * Here's how you load register for a loop 952 * Counters are located @ 0x800 953 * Jump address are located @ 0xC00 954 * For both, registers 0 to 3 are selected using bits 3 and 2, like 955 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C 956 * I know this ain't pretty, but Avalon bus throws away the 2 least 957 * significant bits 958 */ 959 960 /* start with memory RESET activated */ 961 962 /* tINIT = 200us */ 963 964 /* 965 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles 966 * If a and b are the number of iteration in 2 nested loops 967 * it takes the following number of cycles to complete the operation: 968 * number_of_cycles = ((2 + n) * a + 2) * b 969 * where n is the number of instruction in the inner loop 970 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF, 971 * b = 6A 972 */ 973 rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL, 974 SEQ_TINIT_CNTR2_VAL, 975 RW_MGR_INIT_RESET_0_CKE_0); 976 977 /* indicate that memory is stable */ 978 writel(1, &phy_mgr_cfg->reset_mem_stbl); 979 980 /* 981 * transition the RESET to high 982 * Wait for 500us 983 */ 984 985 /* 986 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles 987 * If a and b are the number of iteration in 2 nested loops 988 * it takes the following number of cycles to complete the operation 989 * number_of_cycles = ((2 + n) * a + 2) * b 990 * where n is the number of instruction in the inner loop 991 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83, 992 * b = FF 993 */ 994 rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL, 995 SEQ_TRESET_CNTR2_VAL, 996 RW_MGR_INIT_RESET_1_CKE_0); 997 998 /* bring up clock enable */ 999 1000 /* tXRP < 250 ck cycles */ 1001 delay_for_n_mem_clocks(250); 1002 1003 rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET, 1004 0); 1005 } 1006 1007 /* 1008 * At the end of calibration we have to program the user settings in, and 1009 * USER hand off the memory to the user. 1010 */ 1011 static void rw_mgr_mem_handoff(void) 1012 { 1013 rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1); 1014 /* 1015 * USER need to wait tMOD (12CK or 15ns) time before issuing 1016 * other commands, but we will have plenty of NIOS cycles before 1017 * actual handoff so its okay. 1018 */ 1019 } 1020 1021 /* 1022 * performs a guaranteed read on the patterns we are going to use during a 1023 * read test to ensure memory works 1024 */ 1025 static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn, 1026 uint32_t group, uint32_t num_tries, uint32_t *bit_chk, 1027 uint32_t all_ranks) 1028 { 1029 uint32_t r, vg; 1030 uint32_t correct_mask_vg; 1031 uint32_t tmp_bit_chk; 1032 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 1033 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1034 uint32_t addr; 1035 uint32_t base_rw_mgr; 1036 1037 *bit_chk = param->read_correct_mask; 1038 correct_mask_vg = param->read_correct_mask_vg; 1039 1040 for (r = rank_bgn; r < rank_end; r++) { 1041 if (param->skip_ranks[r]) 1042 /* request to skip the rank */ 1043 continue; 1044 1045 /* set rank */ 1046 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1047 1048 /* Load up a constant bursts of read commands */ 1049 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); 1050 writel(RW_MGR_GUARANTEED_READ, 1051 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1052 1053 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); 1054 writel(RW_MGR_GUARANTEED_READ_CONT, 1055 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1056 1057 tmp_bit_chk = 0; 1058 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) { 1059 /* reset the fifos to get pointers to known state */ 1060 1061 writel(0, &phy_mgr_cmd->fifo_reset); 1062 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1063 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1064 1065 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS 1066 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS); 1067 1068 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1069 writel(RW_MGR_GUARANTEED_READ, addr + 1070 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + 1071 vg) << 2)); 1072 1073 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1074 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr)); 1075 1076 if (vg == 0) 1077 break; 1078 } 1079 *bit_chk &= tmp_bit_chk; 1080 } 1081 1082 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1083 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2)); 1084 1085 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1086 debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\ 1087 %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask, 1088 (long unsigned int)(*bit_chk == param->read_correct_mask)); 1089 return *bit_chk == param->read_correct_mask; 1090 } 1091 1092 static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks 1093 (uint32_t group, uint32_t num_tries, uint32_t *bit_chk) 1094 { 1095 return rw_mgr_mem_calibrate_read_test_patterns(0, group, 1096 num_tries, bit_chk, 1); 1097 } 1098 1099 /* load up the patterns we are going to use during a read test */ 1100 static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn, 1101 uint32_t all_ranks) 1102 { 1103 uint32_t r; 1104 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 1105 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1106 1107 debug("%s:%d\n", __func__, __LINE__); 1108 for (r = rank_bgn; r < rank_end; r++) { 1109 if (param->skip_ranks[r]) 1110 /* request to skip the rank */ 1111 continue; 1112 1113 /* set rank */ 1114 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1115 1116 /* Load up a constant bursts */ 1117 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); 1118 1119 writel(RW_MGR_GUARANTEED_WRITE_WAIT0, 1120 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1121 1122 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); 1123 1124 writel(RW_MGR_GUARANTEED_WRITE_WAIT1, 1125 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1126 1127 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2); 1128 1129 writel(RW_MGR_GUARANTEED_WRITE_WAIT2, 1130 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1131 1132 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3); 1133 1134 writel(RW_MGR_GUARANTEED_WRITE_WAIT3, 1135 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1136 1137 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1138 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 1139 } 1140 1141 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1142 } 1143 1144 /* 1145 * try a read and see if it returns correct data back. has dummy reads 1146 * inserted into the mix used to align dqs enable. has more thorough checks 1147 * than the regular read test. 1148 */ 1149 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group, 1150 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk, 1151 uint32_t all_groups, uint32_t all_ranks) 1152 { 1153 uint32_t r, vg; 1154 uint32_t correct_mask_vg; 1155 uint32_t tmp_bit_chk; 1156 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 1157 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1158 uint32_t addr; 1159 uint32_t base_rw_mgr; 1160 1161 *bit_chk = param->read_correct_mask; 1162 correct_mask_vg = param->read_correct_mask_vg; 1163 1164 uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) & 1165 CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION); 1166 1167 for (r = rank_bgn; r < rank_end; r++) { 1168 if (param->skip_ranks[r]) 1169 /* request to skip the rank */ 1170 continue; 1171 1172 /* set rank */ 1173 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1174 1175 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1); 1176 1177 writel(RW_MGR_READ_B2B_WAIT1, 1178 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1179 1180 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2); 1181 writel(RW_MGR_READ_B2B_WAIT2, 1182 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1183 1184 if (quick_read_mode) 1185 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0); 1186 /* need at least two (1+1) reads to capture failures */ 1187 else if (all_groups) 1188 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0); 1189 else 1190 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0); 1191 1192 writel(RW_MGR_READ_B2B, 1193 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1194 if (all_groups) 1195 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH * 1196 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1, 1197 &sdr_rw_load_mgr_regs->load_cntr3); 1198 else 1199 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3); 1200 1201 writel(RW_MGR_READ_B2B, 1202 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1203 1204 tmp_bit_chk = 0; 1205 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) { 1206 /* reset the fifos to get pointers to known state */ 1207 writel(0, &phy_mgr_cmd->fifo_reset); 1208 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1209 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1210 1211 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS 1212 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS); 1213 1214 if (all_groups) 1215 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET; 1216 else 1217 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1218 1219 writel(RW_MGR_READ_B2B, addr + 1220 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + 1221 vg) << 2)); 1222 1223 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1224 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr)); 1225 1226 if (vg == 0) 1227 break; 1228 } 1229 *bit_chk &= tmp_bit_chk; 1230 } 1231 1232 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1233 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2)); 1234 1235 if (all_correct) { 1236 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1237 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\ 1238 (%u == %u) => %lu", __func__, __LINE__, group, 1239 all_groups, *bit_chk, param->read_correct_mask, 1240 (long unsigned int)(*bit_chk == 1241 param->read_correct_mask)); 1242 return *bit_chk == param->read_correct_mask; 1243 } else { 1244 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1245 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\ 1246 (%u != %lu) => %lu\n", __func__, __LINE__, 1247 group, all_groups, *bit_chk, (long unsigned int)0, 1248 (long unsigned int)(*bit_chk != 0x00)); 1249 return *bit_chk != 0x00; 1250 } 1251 } 1252 1253 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group, 1254 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk, 1255 uint32_t all_groups) 1256 { 1257 return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct, 1258 bit_chk, all_groups, 1); 1259 } 1260 1261 static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v) 1262 { 1263 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy); 1264 (*v)++; 1265 } 1266 1267 static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v) 1268 { 1269 uint32_t i; 1270 1271 for (i = 0; i < VFIFO_SIZE-1; i++) 1272 rw_mgr_incr_vfifo(grp, v); 1273 } 1274 1275 static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk) 1276 { 1277 uint32_t v; 1278 uint32_t fail_cnt = 0; 1279 uint32_t test_status; 1280 1281 for (v = 0; v < VFIFO_SIZE; ) { 1282 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n", 1283 __func__, __LINE__, v); 1284 test_status = rw_mgr_mem_calibrate_read_test_all_ranks 1285 (grp, 1, PASS_ONE_BIT, bit_chk, 0); 1286 if (!test_status) { 1287 fail_cnt++; 1288 1289 if (fail_cnt == 2) 1290 break; 1291 } 1292 1293 /* fiddle with FIFO */ 1294 rw_mgr_incr_vfifo(grp, &v); 1295 } 1296 1297 if (v >= VFIFO_SIZE) { 1298 /* no failing read found!! Something must have gone wrong */ 1299 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n", 1300 __func__, __LINE__); 1301 return 0; 1302 } else { 1303 return v; 1304 } 1305 } 1306 1307 static int find_working_phase(uint32_t *grp, uint32_t *bit_chk, 1308 uint32_t dtaps_per_ptap, uint32_t *work_bgn, 1309 uint32_t *v, uint32_t *d, uint32_t *p, 1310 uint32_t *i, uint32_t *max_working_cnt) 1311 { 1312 uint32_t found_begin = 0; 1313 uint32_t tmp_delay = 0; 1314 uint32_t test_status; 1315 1316 for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay += 1317 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { 1318 *work_bgn = tmp_delay; 1319 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d); 1320 1321 for (*i = 0; *i < VFIFO_SIZE; (*i)++) { 1322 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn += 1323 IO_DELAY_PER_OPA_TAP) { 1324 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p); 1325 1326 test_status = 1327 rw_mgr_mem_calibrate_read_test_all_ranks 1328 (*grp, 1, PASS_ONE_BIT, bit_chk, 0); 1329 1330 if (test_status) { 1331 *max_working_cnt = 1; 1332 found_begin = 1; 1333 break; 1334 } 1335 } 1336 1337 if (found_begin) 1338 break; 1339 1340 if (*p > IO_DQS_EN_PHASE_MAX) 1341 /* fiddle with FIFO */ 1342 rw_mgr_incr_vfifo(*grp, v); 1343 } 1344 1345 if (found_begin) 1346 break; 1347 } 1348 1349 if (*i >= VFIFO_SIZE) { 1350 /* cannot find working solution */ 1351 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\ 1352 ptap/dtap\n", __func__, __LINE__); 1353 return 0; 1354 } else { 1355 return 1; 1356 } 1357 } 1358 1359 static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk, 1360 uint32_t *work_bgn, uint32_t *v, uint32_t *d, 1361 uint32_t *p, uint32_t *max_working_cnt) 1362 { 1363 uint32_t found_begin = 0; 1364 uint32_t tmp_delay; 1365 1366 /* Special case code for backing up a phase */ 1367 if (*p == 0) { 1368 *p = IO_DQS_EN_PHASE_MAX; 1369 rw_mgr_decr_vfifo(*grp, v); 1370 } else { 1371 (*p)--; 1372 } 1373 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP; 1374 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p); 1375 1376 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; 1377 (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { 1378 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d); 1379 1380 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1, 1381 PASS_ONE_BIT, 1382 bit_chk, 0)) { 1383 found_begin = 1; 1384 *work_bgn = tmp_delay; 1385 break; 1386 } 1387 } 1388 1389 /* We have found a working dtap before the ptap found above */ 1390 if (found_begin == 1) 1391 (*max_working_cnt)++; 1392 1393 /* 1394 * Restore VFIFO to old state before we decremented it 1395 * (if needed). 1396 */ 1397 (*p)++; 1398 if (*p > IO_DQS_EN_PHASE_MAX) { 1399 *p = 0; 1400 rw_mgr_incr_vfifo(*grp, v); 1401 } 1402 1403 scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0); 1404 } 1405 1406 static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk, 1407 uint32_t *work_bgn, uint32_t *v, uint32_t *d, 1408 uint32_t *p, uint32_t *i, uint32_t *max_working_cnt, 1409 uint32_t *work_end) 1410 { 1411 uint32_t found_end = 0; 1412 1413 (*p)++; 1414 *work_end += IO_DELAY_PER_OPA_TAP; 1415 if (*p > IO_DQS_EN_PHASE_MAX) { 1416 /* fiddle with FIFO */ 1417 *p = 0; 1418 rw_mgr_incr_vfifo(*grp, v); 1419 } 1420 1421 for (; *i < VFIFO_SIZE + 1; (*i)++) { 1422 for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end 1423 += IO_DELAY_PER_OPA_TAP) { 1424 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p); 1425 1426 if (!rw_mgr_mem_calibrate_read_test_all_ranks 1427 (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) { 1428 found_end = 1; 1429 break; 1430 } else { 1431 (*max_working_cnt)++; 1432 } 1433 } 1434 1435 if (found_end) 1436 break; 1437 1438 if (*p > IO_DQS_EN_PHASE_MAX) { 1439 /* fiddle with FIFO */ 1440 rw_mgr_incr_vfifo(*grp, v); 1441 *p = 0; 1442 } 1443 } 1444 1445 if (*i >= VFIFO_SIZE + 1) { 1446 /* cannot see edge of failing read */ 1447 debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\ 1448 failed\n", __func__, __LINE__); 1449 return 0; 1450 } else { 1451 return 1; 1452 } 1453 } 1454 1455 static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk, 1456 uint32_t *work_bgn, uint32_t *v, uint32_t *d, 1457 uint32_t *p, uint32_t *work_mid, 1458 uint32_t *work_end) 1459 { 1460 int i; 1461 int tmp_delay = 0; 1462 1463 *work_mid = (*work_bgn + *work_end) / 2; 1464 1465 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n", 1466 *work_bgn, *work_end, *work_mid); 1467 /* Get the middle delay to be less than a VFIFO delay */ 1468 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; 1469 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP) 1470 ; 1471 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay); 1472 while (*work_mid > tmp_delay) 1473 *work_mid -= tmp_delay; 1474 debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid); 1475 1476 tmp_delay = 0; 1477 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid; 1478 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP) 1479 ; 1480 tmp_delay -= IO_DELAY_PER_OPA_TAP; 1481 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay); 1482 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++, 1483 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) 1484 ; 1485 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay); 1486 1487 scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1); 1488 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d); 1489 1490 /* 1491 * push vfifo until we can successfully calibrate. We can do this 1492 * because the largest possible margin in 1 VFIFO cycle. 1493 */ 1494 for (i = 0; i < VFIFO_SIZE; i++) { 1495 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n", 1496 *v); 1497 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1, 1498 PASS_ONE_BIT, 1499 bit_chk, 0)) { 1500 break; 1501 } 1502 1503 /* fiddle with FIFO */ 1504 rw_mgr_incr_vfifo(*grp, v); 1505 } 1506 1507 if (i >= VFIFO_SIZE) { 1508 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \ 1509 failed\n", __func__, __LINE__); 1510 return 0; 1511 } else { 1512 return 1; 1513 } 1514 } 1515 1516 /* find a good dqs enable to use */ 1517 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp) 1518 { 1519 uint32_t v, d, p, i; 1520 uint32_t max_working_cnt; 1521 uint32_t bit_chk; 1522 uint32_t dtaps_per_ptap; 1523 uint32_t work_bgn, work_mid, work_end; 1524 uint32_t found_passing_read, found_failing_read, initial_failing_dtap; 1525 1526 debug("%s:%d %u\n", __func__, __LINE__, grp); 1527 1528 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); 1529 1530 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); 1531 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); 1532 1533 /* ************************************************************** */ 1534 /* * Step 0 : Determine number of delay taps for each phase tap * */ 1535 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1536 1537 /* ********************************************************* */ 1538 /* * Step 1 : First push vfifo until we get a failing read * */ 1539 v = find_vfifo_read(grp, &bit_chk); 1540 1541 max_working_cnt = 0; 1542 1543 /* ******************************************************** */ 1544 /* * step 2: find first working phase, increment in ptaps * */ 1545 work_bgn = 0; 1546 if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d, 1547 &p, &i, &max_working_cnt) == 0) 1548 return 0; 1549 1550 work_end = work_bgn; 1551 1552 /* 1553 * If d is 0 then the working window covers a phase tap and 1554 * we can follow the old procedure otherwise, we've found the beginning, 1555 * and we need to increment the dtaps until we find the end. 1556 */ 1557 if (d == 0) { 1558 /* ********************************************************* */ 1559 /* * step 3a: if we have room, back off by one and 1560 increment in dtaps * */ 1561 1562 sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p, 1563 &max_working_cnt); 1564 1565 /* ********************************************************* */ 1566 /* * step 4a: go forward from working phase to non working 1567 phase, increment in ptaps * */ 1568 if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p, 1569 &i, &max_working_cnt, &work_end) == 0) 1570 return 0; 1571 1572 /* ********************************************************* */ 1573 /* * step 5a: back off one from last, increment in dtaps * */ 1574 1575 /* Special case code for backing up a phase */ 1576 if (p == 0) { 1577 p = IO_DQS_EN_PHASE_MAX; 1578 rw_mgr_decr_vfifo(grp, &v); 1579 } else { 1580 p = p - 1; 1581 } 1582 1583 work_end -= IO_DELAY_PER_OPA_TAP; 1584 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1585 1586 /* * The actual increment of dtaps is done outside of 1587 the if/else loop to share code */ 1588 d = 0; 1589 1590 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \ 1591 vfifo=%u ptap=%u\n", __func__, __LINE__, 1592 v, p); 1593 } else { 1594 /* ******************************************************* */ 1595 /* * step 3-5b: Find the right edge of the window using 1596 delay taps * */ 1597 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \ 1598 ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__, 1599 v, p, d, work_bgn); 1600 1601 work_end = work_bgn; 1602 1603 /* * The actual increment of dtaps is done outside of the 1604 if/else loop to share code */ 1605 1606 /* Only here to counterbalance a subtract later on which is 1607 not needed if this branch of the algorithm is taken */ 1608 max_working_cnt++; 1609 } 1610 1611 /* The dtap increment to find the failing edge is done here */ 1612 for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end += 1613 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { 1614 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \ 1615 end-2: dtap=%u\n", __func__, __LINE__, d); 1616 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1617 1618 if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1619 PASS_ONE_BIT, 1620 &bit_chk, 0)) { 1621 break; 1622 } 1623 } 1624 1625 /* Go back to working dtap */ 1626 if (d != 0) 1627 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1628 1629 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \ 1630 ptap=%u dtap=%u end=%u\n", __func__, __LINE__, 1631 v, p, d-1, work_end); 1632 1633 if (work_end < work_bgn) { 1634 /* nil range */ 1635 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \ 1636 failed\n", __func__, __LINE__); 1637 return 0; 1638 } 1639 1640 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n", 1641 __func__, __LINE__, work_bgn, work_end); 1642 1643 /* *************************************************************** */ 1644 /* 1645 * * We need to calculate the number of dtaps that equal a ptap 1646 * * To do that we'll back up a ptap and re-find the edge of the 1647 * * window using dtaps 1648 */ 1649 1650 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \ 1651 for tracking\n", __func__, __LINE__); 1652 1653 /* Special case code for backing up a phase */ 1654 if (p == 0) { 1655 p = IO_DQS_EN_PHASE_MAX; 1656 rw_mgr_decr_vfifo(grp, &v); 1657 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \ 1658 cycle/phase: v=%u p=%u\n", __func__, __LINE__, 1659 v, p); 1660 } else { 1661 p = p - 1; 1662 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \ 1663 phase only: v=%u p=%u", __func__, __LINE__, 1664 v, p); 1665 } 1666 1667 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1668 1669 /* 1670 * Increase dtap until we first see a passing read (in case the 1671 * window is smaller than a ptap), 1672 * and then a failing read to mark the edge of the window again 1673 */ 1674 1675 /* Find a passing read */ 1676 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n", 1677 __func__, __LINE__); 1678 found_passing_read = 0; 1679 found_failing_read = 0; 1680 initial_failing_dtap = d; 1681 for (; d <= IO_DQS_EN_DELAY_MAX; d++) { 1682 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \ 1683 read d=%u\n", __func__, __LINE__, d); 1684 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1685 1686 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1687 PASS_ONE_BIT, 1688 &bit_chk, 0)) { 1689 found_passing_read = 1; 1690 break; 1691 } 1692 } 1693 1694 if (found_passing_read) { 1695 /* Find a failing read */ 1696 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \ 1697 read\n", __func__, __LINE__); 1698 for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) { 1699 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \ 1700 testing read d=%u\n", __func__, __LINE__, d); 1701 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1702 1703 if (!rw_mgr_mem_calibrate_read_test_all_ranks 1704 (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { 1705 found_failing_read = 1; 1706 break; 1707 } 1708 } 1709 } else { 1710 debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \ 1711 calculate dtaps", __func__, __LINE__); 1712 debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n"); 1713 } 1714 1715 /* 1716 * The dynamically calculated dtaps_per_ptap is only valid if we 1717 * found a passing/failing read. If we didn't, it means d hit the max 1718 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its 1719 * statically calculated value. 1720 */ 1721 if (found_passing_read && found_failing_read) 1722 dtaps_per_ptap = d - initial_failing_dtap; 1723 1724 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap); 1725 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \ 1726 - %u = %u", __func__, __LINE__, d, 1727 initial_failing_dtap, dtaps_per_ptap); 1728 1729 /* ******************************************** */ 1730 /* * step 6: Find the centre of the window * */ 1731 if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p, 1732 &work_mid, &work_end) == 0) 1733 return 0; 1734 1735 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \ 1736 vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__, 1737 v, p-1, d); 1738 return 1; 1739 } 1740 1741 /* 1742 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different 1743 * dq_in_delay values 1744 */ 1745 static uint32_t 1746 rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay 1747 (uint32_t write_group, uint32_t read_group, uint32_t test_bgn) 1748 { 1749 uint32_t found; 1750 uint32_t i; 1751 uint32_t p; 1752 uint32_t d; 1753 uint32_t r; 1754 1755 const uint32_t delay_step = IO_IO_IN_DELAY_MAX / 1756 (RW_MGR_MEM_DQ_PER_READ_DQS-1); 1757 /* we start at zero, so have one less dq to devide among */ 1758 1759 debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group, 1760 test_bgn); 1761 1762 /* try different dq_in_delays since the dq path is shorter than dqs */ 1763 1764 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 1765 r += NUM_RANKS_PER_SHADOW_REG) { 1766 for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++, d += delay_step) { 1767 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\ 1768 vfifo_find_dqs_", __func__, __LINE__); 1769 debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ", 1770 write_group, read_group); 1771 debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d); 1772 scc_mgr_set_dq_in_delay(p, d); 1773 scc_mgr_load_dq(p); 1774 } 1775 writel(0, &sdr_scc_mgr->update); 1776 } 1777 1778 found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group); 1779 1780 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\ 1781 en_phase_sweep_dq", __func__, __LINE__); 1782 debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \ 1783 chain to zero\n", write_group, read_group, found); 1784 1785 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 1786 r += NUM_RANKS_PER_SHADOW_REG) { 1787 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; 1788 i++, p++) { 1789 scc_mgr_set_dq_in_delay(p, 0); 1790 scc_mgr_load_dq(p); 1791 } 1792 writel(0, &sdr_scc_mgr->update); 1793 } 1794 1795 return found; 1796 } 1797 1798 /* per-bit deskew DQ and center */ 1799 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn, 1800 uint32_t write_group, uint32_t read_group, uint32_t test_bgn, 1801 uint32_t use_read_test, uint32_t update_fom) 1802 { 1803 uint32_t i, p, d, min_index; 1804 /* 1805 * Store these as signed since there are comparisons with 1806 * signed numbers. 1807 */ 1808 uint32_t bit_chk; 1809 uint32_t sticky_bit_chk; 1810 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; 1811 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; 1812 int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS]; 1813 int32_t mid; 1814 int32_t orig_mid_min, mid_min; 1815 int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs, 1816 final_dqs_en; 1817 int32_t dq_margin, dqs_margin; 1818 uint32_t stop; 1819 uint32_t temp_dq_in_delay1, temp_dq_in_delay2; 1820 uint32_t addr; 1821 1822 debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn); 1823 1824 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET; 1825 start_dqs = readl(addr + (read_group << 2)); 1826 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) 1827 start_dqs_en = readl(addr + ((read_group << 2) 1828 - IO_DQS_EN_DELAY_OFFSET)); 1829 1830 /* set the left and right edge of each bit to an illegal value */ 1831 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */ 1832 sticky_bit_chk = 0; 1833 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 1834 left_edge[i] = IO_IO_IN_DELAY_MAX + 1; 1835 right_edge[i] = IO_IO_IN_DELAY_MAX + 1; 1836 } 1837 1838 /* Search for the left edge of the window for each bit */ 1839 for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) { 1840 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d); 1841 1842 writel(0, &sdr_scc_mgr->update); 1843 1844 /* 1845 * Stop searching when the read test doesn't pass AND when 1846 * we've seen a passing read on every bit. 1847 */ 1848 if (use_read_test) { 1849 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn, 1850 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT, 1851 &bit_chk, 0, 0); 1852 } else { 1853 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1854 0, PASS_ONE_BIT, 1855 &bit_chk, 0); 1856 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS * 1857 (read_group - (write_group * 1858 RW_MGR_MEM_IF_READ_DQS_WIDTH / 1859 RW_MGR_MEM_IF_WRITE_DQS_WIDTH))); 1860 stop = (bit_chk == 0); 1861 } 1862 sticky_bit_chk = sticky_bit_chk | bit_chk; 1863 stop = stop && (sticky_bit_chk == param->read_correct_mask); 1864 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \ 1865 && %u", __func__, __LINE__, d, 1866 sticky_bit_chk, 1867 param->read_correct_mask, stop); 1868 1869 if (stop == 1) { 1870 break; 1871 } else { 1872 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 1873 if (bit_chk & 1) { 1874 /* Remember a passing test as the 1875 left_edge */ 1876 left_edge[i] = d; 1877 } else { 1878 /* If a left edge has not been seen yet, 1879 then a future passing test will mark 1880 this edge as the right edge */ 1881 if (left_edge[i] == 1882 IO_IO_IN_DELAY_MAX + 1) { 1883 right_edge[i] = -(d + 1); 1884 } 1885 } 1886 bit_chk = bit_chk >> 1; 1887 } 1888 } 1889 } 1890 1891 /* Reset DQ delay chains to 0 */ 1892 scc_mgr_apply_group_dq_in_delay(test_bgn, 0); 1893 sticky_bit_chk = 0; 1894 for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) { 1895 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \ 1896 %d right_edge[%u]: %d\n", __func__, __LINE__, 1897 i, left_edge[i], i, right_edge[i]); 1898 1899 /* 1900 * Check for cases where we haven't found the left edge, 1901 * which makes our assignment of the the right edge invalid. 1902 * Reset it to the illegal value. 1903 */ 1904 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && ( 1905 right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) { 1906 right_edge[i] = IO_IO_IN_DELAY_MAX + 1; 1907 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \ 1908 right_edge[%u]: %d\n", __func__, __LINE__, 1909 i, right_edge[i]); 1910 } 1911 1912 /* 1913 * Reset sticky bit (except for bits where we have seen 1914 * both the left and right edge). 1915 */ 1916 sticky_bit_chk = sticky_bit_chk << 1; 1917 if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) && 1918 (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) { 1919 sticky_bit_chk = sticky_bit_chk | 1; 1920 } 1921 1922 if (i == 0) 1923 break; 1924 } 1925 1926 /* Search for the right edge of the window for each bit */ 1927 for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) { 1928 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs); 1929 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 1930 uint32_t delay = d + start_dqs_en; 1931 if (delay > IO_DQS_EN_DELAY_MAX) 1932 delay = IO_DQS_EN_DELAY_MAX; 1933 scc_mgr_set_dqs_en_delay(read_group, delay); 1934 } 1935 scc_mgr_load_dqs(read_group); 1936 1937 writel(0, &sdr_scc_mgr->update); 1938 1939 /* 1940 * Stop searching when the read test doesn't pass AND when 1941 * we've seen a passing read on every bit. 1942 */ 1943 if (use_read_test) { 1944 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn, 1945 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT, 1946 &bit_chk, 0, 0); 1947 } else { 1948 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1949 0, PASS_ONE_BIT, 1950 &bit_chk, 0); 1951 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS * 1952 (read_group - (write_group * 1953 RW_MGR_MEM_IF_READ_DQS_WIDTH / 1954 RW_MGR_MEM_IF_WRITE_DQS_WIDTH))); 1955 stop = (bit_chk == 0); 1956 } 1957 sticky_bit_chk = sticky_bit_chk | bit_chk; 1958 stop = stop && (sticky_bit_chk == param->read_correct_mask); 1959 1960 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \ 1961 %u && %u", __func__, __LINE__, d, 1962 sticky_bit_chk, param->read_correct_mask, stop); 1963 1964 if (stop == 1) { 1965 break; 1966 } else { 1967 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 1968 if (bit_chk & 1) { 1969 /* Remember a passing test as 1970 the right_edge */ 1971 right_edge[i] = d; 1972 } else { 1973 if (d != 0) { 1974 /* If a right edge has not been 1975 seen yet, then a future passing 1976 test will mark this edge as the 1977 left edge */ 1978 if (right_edge[i] == 1979 IO_IO_IN_DELAY_MAX + 1) { 1980 left_edge[i] = -(d + 1); 1981 } 1982 } else { 1983 /* d = 0 failed, but it passed 1984 when testing the left edge, 1985 so it must be marginal, 1986 set it to -1 */ 1987 if (right_edge[i] == 1988 IO_IO_IN_DELAY_MAX + 1 && 1989 left_edge[i] != 1990 IO_IO_IN_DELAY_MAX 1991 + 1) { 1992 right_edge[i] = -1; 1993 } 1994 /* If a right edge has not been 1995 seen yet, then a future passing 1996 test will mark this edge as the 1997 left edge */ 1998 else if (right_edge[i] == 1999 IO_IO_IN_DELAY_MAX + 2000 1) { 2001 left_edge[i] = -(d + 1); 2002 } 2003 } 2004 } 2005 2006 debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\ 2007 d=%u]: ", __func__, __LINE__, d); 2008 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ", 2009 (int)(bit_chk & 1), i, left_edge[i]); 2010 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i, 2011 right_edge[i]); 2012 bit_chk = bit_chk >> 1; 2013 } 2014 } 2015 } 2016 2017 /* Check that all bits have a window */ 2018 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 2019 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \ 2020 %d right_edge[%u]: %d", __func__, __LINE__, 2021 i, left_edge[i], i, right_edge[i]); 2022 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i] 2023 == IO_IO_IN_DELAY_MAX + 1)) { 2024 /* 2025 * Restore delay chain settings before letting the loop 2026 * in rw_mgr_mem_calibrate_vfifo to retry different 2027 * dqs/ck relationships. 2028 */ 2029 scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs); 2030 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2031 scc_mgr_set_dqs_en_delay(read_group, 2032 start_dqs_en); 2033 } 2034 scc_mgr_load_dqs(read_group); 2035 writel(0, &sdr_scc_mgr->update); 2036 2037 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \ 2038 find edge [%u]: %d %d", __func__, __LINE__, 2039 i, left_edge[i], right_edge[i]); 2040 if (use_read_test) { 2041 set_failing_group_stage(read_group * 2042 RW_MGR_MEM_DQ_PER_READ_DQS + i, 2043 CAL_STAGE_VFIFO, 2044 CAL_SUBSTAGE_VFIFO_CENTER); 2045 } else { 2046 set_failing_group_stage(read_group * 2047 RW_MGR_MEM_DQ_PER_READ_DQS + i, 2048 CAL_STAGE_VFIFO_AFTER_WRITES, 2049 CAL_SUBSTAGE_VFIFO_CENTER); 2050 } 2051 return 0; 2052 } 2053 } 2054 2055 /* Find middle of window for each DQ bit */ 2056 mid_min = left_edge[0] - right_edge[0]; 2057 min_index = 0; 2058 for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 2059 mid = left_edge[i] - right_edge[i]; 2060 if (mid < mid_min) { 2061 mid_min = mid; 2062 min_index = i; 2063 } 2064 } 2065 2066 /* 2067 * -mid_min/2 represents the amount that we need to move DQS. 2068 * If mid_min is odd and positive we'll need to add one to 2069 * make sure the rounding in further calculations is correct 2070 * (always bias to the right), so just add 1 for all positive values. 2071 */ 2072 if (mid_min > 0) 2073 mid_min++; 2074 2075 mid_min = mid_min / 2; 2076 2077 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n", 2078 __func__, __LINE__, mid_min, min_index); 2079 2080 /* Determine the amount we can change DQS (which is -mid_min) */ 2081 orig_mid_min = mid_min; 2082 new_dqs = start_dqs - mid_min; 2083 if (new_dqs > IO_DQS_IN_DELAY_MAX) 2084 new_dqs = IO_DQS_IN_DELAY_MAX; 2085 else if (new_dqs < 0) 2086 new_dqs = 0; 2087 2088 mid_min = start_dqs - new_dqs; 2089 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n", 2090 mid_min, new_dqs); 2091 2092 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2093 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX) 2094 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX; 2095 else if (start_dqs_en - mid_min < 0) 2096 mid_min += start_dqs_en - mid_min; 2097 } 2098 new_dqs = start_dqs - mid_min; 2099 2100 debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \ 2101 new_dqs=%d mid_min=%d\n", start_dqs, 2102 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1, 2103 new_dqs, mid_min); 2104 2105 /* Initialize data for export structures */ 2106 dqs_margin = IO_IO_IN_DELAY_MAX + 1; 2107 dq_margin = IO_IO_IN_DELAY_MAX + 1; 2108 2109 /* add delay to bring centre of all DQ windows to the same "level" */ 2110 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { 2111 /* Use values before divide by 2 to reduce round off error */ 2112 shift_dq = (left_edge[i] - right_edge[i] - 2113 (left_edge[min_index] - right_edge[min_index]))/2 + 2114 (orig_mid_min - mid_min); 2115 2116 debug_cond(DLEVEL == 2, "vfifo_center: before: \ 2117 shift_dq[%u]=%d\n", i, shift_dq); 2118 2119 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET; 2120 temp_dq_in_delay1 = readl(addr + (p << 2)); 2121 temp_dq_in_delay2 = readl(addr + (i << 2)); 2122 2123 if (shift_dq + (int32_t)temp_dq_in_delay1 > 2124 (int32_t)IO_IO_IN_DELAY_MAX) { 2125 shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2; 2126 } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) { 2127 shift_dq = -(int32_t)temp_dq_in_delay1; 2128 } 2129 debug_cond(DLEVEL == 2, "vfifo_center: after: \ 2130 shift_dq[%u]=%d\n", i, shift_dq); 2131 final_dq[i] = temp_dq_in_delay1 + shift_dq; 2132 scc_mgr_set_dq_in_delay(p, final_dq[i]); 2133 scc_mgr_load_dq(p); 2134 2135 debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i, 2136 left_edge[i] - shift_dq + (-mid_min), 2137 right_edge[i] + shift_dq - (-mid_min)); 2138 /* To determine values for export structures */ 2139 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin) 2140 dq_margin = left_edge[i] - shift_dq + (-mid_min); 2141 2142 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin) 2143 dqs_margin = right_edge[i] + shift_dq - (-mid_min); 2144 } 2145 2146 final_dqs = new_dqs; 2147 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) 2148 final_dqs_en = start_dqs_en - mid_min; 2149 2150 /* Move DQS-en */ 2151 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2152 scc_mgr_set_dqs_en_delay(read_group, final_dqs_en); 2153 scc_mgr_load_dqs(read_group); 2154 } 2155 2156 /* Move DQS */ 2157 scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs); 2158 scc_mgr_load_dqs(read_group); 2159 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \ 2160 dqs_margin=%d", __func__, __LINE__, 2161 dq_margin, dqs_margin); 2162 2163 /* 2164 * Do not remove this line as it makes sure all of our decisions 2165 * have been applied. Apply the update bit. 2166 */ 2167 writel(0, &sdr_scc_mgr->update); 2168 2169 return (dq_margin >= 0) && (dqs_margin >= 0); 2170 } 2171 2172 /* 2173 * calibrate the read valid prediction FIFO. 2174 * 2175 * - read valid prediction will consist of finding a good DQS enable phase, 2176 * DQS enable delay, DQS input phase, and DQS input delay. 2177 * - we also do a per-bit deskew on the DQ lines. 2178 */ 2179 static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group, 2180 uint32_t test_bgn) 2181 { 2182 uint32_t p, d, rank_bgn, sr; 2183 uint32_t dtaps_per_ptap; 2184 uint32_t tmp_delay; 2185 uint32_t bit_chk; 2186 uint32_t grp_calibrated; 2187 uint32_t write_group, write_test_bgn; 2188 uint32_t failed_substage; 2189 2190 debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn); 2191 2192 /* update info for sims */ 2193 reg_file_set_stage(CAL_STAGE_VFIFO); 2194 2195 write_group = read_group; 2196 write_test_bgn = test_bgn; 2197 2198 /* USER Determine number of delay taps for each phase tap */ 2199 dtaps_per_ptap = 0; 2200 tmp_delay = 0; 2201 while (tmp_delay < IO_DELAY_PER_OPA_TAP) { 2202 dtaps_per_ptap++; 2203 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 2204 } 2205 dtaps_per_ptap--; 2206 tmp_delay = 0; 2207 2208 /* update info for sims */ 2209 reg_file_set_group(read_group); 2210 2211 grp_calibrated = 0; 2212 2213 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ); 2214 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; 2215 2216 for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) { 2217 /* 2218 * In RLDRAMX we may be messing the delay of pins in 2219 * the same write group but outside of the current read 2220 * the group, but that's ok because we haven't 2221 * calibrated output side yet. 2222 */ 2223 if (d > 0) { 2224 scc_mgr_apply_group_all_out_delay_add_all_ranks( 2225 write_group, d); 2226 } 2227 2228 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0; 2229 p++) { 2230 /* set a particular dqdqs phase */ 2231 scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p); 2232 2233 debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \ 2234 p=%u d=%u\n", __func__, __LINE__, 2235 read_group, p, d); 2236 2237 /* 2238 * Load up the patterns used by read calibration 2239 * using current DQDQS phase. 2240 */ 2241 rw_mgr_mem_calibrate_read_load_patterns(0, 1); 2242 if (!(gbl->phy_debug_mode_flags & 2243 PHY_DEBUG_DISABLE_GUARANTEED_READ)) { 2244 if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks 2245 (read_group, 1, &bit_chk)) { 2246 debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:", 2247 __func__, __LINE__); 2248 debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n", 2249 read_group, p, d); 2250 break; 2251 } 2252 } 2253 2254 /* case:56390 */ 2255 grp_calibrated = 1; 2256 if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay 2257 (write_group, read_group, test_bgn)) { 2258 /* 2259 * USER Read per-bit deskew can be done on a 2260 * per shadow register basis. 2261 */ 2262 for (rank_bgn = 0, sr = 0; 2263 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; 2264 rank_bgn += NUM_RANKS_PER_SHADOW_REG, 2265 ++sr) { 2266 /* 2267 * Determine if this set of ranks 2268 * should be skipped entirely. 2269 */ 2270 if (!param->skip_shadow_regs[sr]) { 2271 /* 2272 * If doing read after write 2273 * calibration, do not update 2274 * FOM, now - do it then. 2275 */ 2276 if (!rw_mgr_mem_calibrate_vfifo_center 2277 (rank_bgn, write_group, 2278 read_group, test_bgn, 1, 0)) { 2279 grp_calibrated = 0; 2280 failed_substage = 2281 CAL_SUBSTAGE_VFIFO_CENTER; 2282 } 2283 } 2284 } 2285 } else { 2286 grp_calibrated = 0; 2287 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; 2288 } 2289 } 2290 } 2291 2292 if (grp_calibrated == 0) { 2293 set_failing_group_stage(write_group, CAL_STAGE_VFIFO, 2294 failed_substage); 2295 return 0; 2296 } 2297 2298 /* 2299 * Reset the delay chains back to zero if they have moved > 1 2300 * (check for > 1 because loop will increase d even when pass in 2301 * first case). 2302 */ 2303 if (d > 2) 2304 scc_mgr_zero_group(write_group, 1); 2305 2306 return 1; 2307 } 2308 2309 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */ 2310 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group, 2311 uint32_t test_bgn) 2312 { 2313 uint32_t rank_bgn, sr; 2314 uint32_t grp_calibrated; 2315 uint32_t write_group; 2316 2317 debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn); 2318 2319 /* update info for sims */ 2320 2321 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES); 2322 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); 2323 2324 write_group = read_group; 2325 2326 /* update info for sims */ 2327 reg_file_set_group(read_group); 2328 2329 grp_calibrated = 1; 2330 /* Read per-bit deskew can be done on a per shadow register basis */ 2331 for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; 2332 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { 2333 /* Determine if this set of ranks should be skipped entirely */ 2334 if (!param->skip_shadow_regs[sr]) { 2335 /* This is the last calibration round, update FOM here */ 2336 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn, 2337 write_group, 2338 read_group, 2339 test_bgn, 0, 2340 1)) { 2341 grp_calibrated = 0; 2342 } 2343 } 2344 } 2345 2346 2347 if (grp_calibrated == 0) { 2348 set_failing_group_stage(write_group, 2349 CAL_STAGE_VFIFO_AFTER_WRITES, 2350 CAL_SUBSTAGE_VFIFO_CENTER); 2351 return 0; 2352 } 2353 2354 return 1; 2355 } 2356 2357 /* Calibrate LFIFO to find smallest read latency */ 2358 static uint32_t rw_mgr_mem_calibrate_lfifo(void) 2359 { 2360 uint32_t found_one; 2361 uint32_t bit_chk; 2362 2363 debug("%s:%d\n", __func__, __LINE__); 2364 2365 /* update info for sims */ 2366 reg_file_set_stage(CAL_STAGE_LFIFO); 2367 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY); 2368 2369 /* Load up the patterns used by read calibration for all ranks */ 2370 rw_mgr_mem_calibrate_read_load_patterns(0, 1); 2371 found_one = 0; 2372 2373 do { 2374 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 2375 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u", 2376 __func__, __LINE__, gbl->curr_read_lat); 2377 2378 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, 2379 NUM_READ_TESTS, 2380 PASS_ALL_BITS, 2381 &bit_chk, 1)) { 2382 break; 2383 } 2384 2385 found_one = 1; 2386 /* reduce read latency and see if things are working */ 2387 /* correctly */ 2388 gbl->curr_read_lat--; 2389 } while (gbl->curr_read_lat > 0); 2390 2391 /* reset the fifos to get pointers to known state */ 2392 2393 writel(0, &phy_mgr_cmd->fifo_reset); 2394 2395 if (found_one) { 2396 /* add a fudge factor to the read latency that was determined */ 2397 gbl->curr_read_lat += 2; 2398 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 2399 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \ 2400 read_lat=%u\n", __func__, __LINE__, 2401 gbl->curr_read_lat); 2402 return 1; 2403 } else { 2404 set_failing_group_stage(0xff, CAL_STAGE_LFIFO, 2405 CAL_SUBSTAGE_READ_LATENCY); 2406 2407 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \ 2408 read_lat=%u\n", __func__, __LINE__, 2409 gbl->curr_read_lat); 2410 return 0; 2411 } 2412 } 2413 2414 /* 2415 * issue write test command. 2416 * two variants are provided. one that just tests a write pattern and 2417 * another that tests datamask functionality. 2418 */ 2419 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group, 2420 uint32_t test_dm) 2421 { 2422 uint32_t mcc_instruction; 2423 uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) && 2424 ENABLE_SUPER_QUICK_CALIBRATION); 2425 uint32_t rw_wl_nop_cycles; 2426 uint32_t addr; 2427 2428 /* 2429 * Set counter and jump addresses for the right 2430 * number of NOP cycles. 2431 * The number of supported NOP cycles can range from -1 to infinity 2432 * Three different cases are handled: 2433 * 2434 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping 2435 * mechanism will be used to insert the right number of NOPs 2436 * 2437 * 2. For a number of NOP cycles equals to 0, the micro-instruction 2438 * issuing the write command will jump straight to the 2439 * micro-instruction that turns on DQS (for DDRx), or outputs write 2440 * data (for RLD), skipping 2441 * the NOP micro-instruction all together 2442 * 2443 * 3. A number of NOP cycles equal to -1 indicates that DQS must be 2444 * turned on in the same micro-instruction that issues the write 2445 * command. Then we need 2446 * to directly jump to the micro-instruction that sends out the data 2447 * 2448 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters 2449 * (2 and 3). One jump-counter (0) is used to perform multiple 2450 * write-read operations. 2451 * one counter left to issue this command in "multiple-group" mode 2452 */ 2453 2454 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles; 2455 2456 if (rw_wl_nop_cycles == -1) { 2457 /* 2458 * CNTR 2 - We want to execute the special write operation that 2459 * turns on DQS right away and then skip directly to the 2460 * instruction that sends out the data. We set the counter to a 2461 * large number so that the jump is always taken. 2462 */ 2463 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); 2464 2465 /* CNTR 3 - Not used */ 2466 if (test_dm) { 2467 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1; 2468 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA, 2469 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2470 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, 2471 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2472 } else { 2473 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1; 2474 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA, 2475 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2476 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP, 2477 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2478 } 2479 } else if (rw_wl_nop_cycles == 0) { 2480 /* 2481 * CNTR 2 - We want to skip the NOP operation and go straight 2482 * to the DQS enable instruction. We set the counter to a large 2483 * number so that the jump is always taken. 2484 */ 2485 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); 2486 2487 /* CNTR 3 - Not used */ 2488 if (test_dm) { 2489 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0; 2490 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS, 2491 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2492 } else { 2493 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0; 2494 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS, 2495 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2496 } 2497 } else { 2498 /* 2499 * CNTR 2 - In this case we want to execute the next instruction 2500 * and NOT take the jump. So we set the counter to 0. The jump 2501 * address doesn't count. 2502 */ 2503 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2); 2504 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2505 2506 /* 2507 * CNTR 3 - Set the nop counter to the number of cycles we 2508 * need to loop for, minus 1. 2509 */ 2510 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3); 2511 if (test_dm) { 2512 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0; 2513 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, 2514 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2515 } else { 2516 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0; 2517 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP, 2518 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2519 } 2520 } 2521 2522 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 2523 RW_MGR_RESET_READ_DATAPATH_OFFSET); 2524 2525 if (quick_write_mode) 2526 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0); 2527 else 2528 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0); 2529 2530 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 2531 2532 /* 2533 * CNTR 1 - This is used to ensure enough time elapses 2534 * for read data to come back. 2535 */ 2536 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1); 2537 2538 if (test_dm) { 2539 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT, 2540 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 2541 } else { 2542 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT, 2543 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 2544 } 2545 2546 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 2547 writel(mcc_instruction, addr + (group << 2)); 2548 } 2549 2550 /* Test writes, can check for a single bit pass or multiple bit pass */ 2551 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, 2552 uint32_t write_group, uint32_t use_dm, uint32_t all_correct, 2553 uint32_t *bit_chk, uint32_t all_ranks) 2554 { 2555 uint32_t r; 2556 uint32_t correct_mask_vg; 2557 uint32_t tmp_bit_chk; 2558 uint32_t vg; 2559 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 2560 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 2561 uint32_t addr_rw_mgr; 2562 uint32_t base_rw_mgr; 2563 2564 *bit_chk = param->write_correct_mask; 2565 correct_mask_vg = param->write_correct_mask_vg; 2566 2567 for (r = rank_bgn; r < rank_end; r++) { 2568 if (param->skip_ranks[r]) { 2569 /* request to skip the rank */ 2570 continue; 2571 } 2572 2573 /* set rank */ 2574 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 2575 2576 tmp_bit_chk = 0; 2577 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS; 2578 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) { 2579 /* reset the fifos to get pointers to known state */ 2580 writel(0, &phy_mgr_cmd->fifo_reset); 2581 2582 tmp_bit_chk = tmp_bit_chk << 2583 (RW_MGR_MEM_DQ_PER_WRITE_DQS / 2584 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS); 2585 rw_mgr_mem_calibrate_write_test_issue(write_group * 2586 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg, 2587 use_dm); 2588 2589 base_rw_mgr = readl(addr_rw_mgr); 2590 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr)); 2591 if (vg == 0) 2592 break; 2593 } 2594 *bit_chk &= tmp_bit_chk; 2595 } 2596 2597 if (all_correct) { 2598 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 2599 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \ 2600 %u => %lu", write_group, use_dm, 2601 *bit_chk, param->write_correct_mask, 2602 (long unsigned int)(*bit_chk == 2603 param->write_correct_mask)); 2604 return *bit_chk == param->write_correct_mask; 2605 } else { 2606 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 2607 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ", 2608 write_group, use_dm, *bit_chk); 2609 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0, 2610 (long unsigned int)(*bit_chk != 0)); 2611 return *bit_chk != 0x00; 2612 } 2613 } 2614 2615 /* 2616 * center all windows. do per-bit-deskew to possibly increase size of 2617 * certain windows. 2618 */ 2619 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn, 2620 uint32_t write_group, uint32_t test_bgn) 2621 { 2622 uint32_t i, p, min_index; 2623 int32_t d; 2624 /* 2625 * Store these as signed since there are comparisons with 2626 * signed numbers. 2627 */ 2628 uint32_t bit_chk; 2629 uint32_t sticky_bit_chk; 2630 int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; 2631 int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; 2632 int32_t mid; 2633 int32_t mid_min, orig_mid_min; 2634 int32_t new_dqs, start_dqs, shift_dq; 2635 int32_t dq_margin, dqs_margin, dm_margin; 2636 uint32_t stop; 2637 uint32_t temp_dq_out1_delay; 2638 uint32_t addr; 2639 2640 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn); 2641 2642 dm_margin = 0; 2643 2644 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET; 2645 start_dqs = readl(addr + 2646 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2)); 2647 2648 /* per-bit deskew */ 2649 2650 /* 2651 * set the left and right edge of each bit to an illegal value 2652 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value. 2653 */ 2654 sticky_bit_chk = 0; 2655 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2656 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2657 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2658 } 2659 2660 /* Search for the left edge of the window for each bit */ 2661 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) { 2662 scc_mgr_apply_group_dq_out1_delay(write_group, d); 2663 2664 writel(0, &sdr_scc_mgr->update); 2665 2666 /* 2667 * Stop searching when the read test doesn't pass AND when 2668 * we've seen a passing read on every bit. 2669 */ 2670 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 2671 0, PASS_ONE_BIT, &bit_chk, 0); 2672 sticky_bit_chk = sticky_bit_chk | bit_chk; 2673 stop = stop && (sticky_bit_chk == param->write_correct_mask); 2674 debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \ 2675 == %u && %u [bit_chk= %u ]\n", 2676 d, sticky_bit_chk, param->write_correct_mask, 2677 stop, bit_chk); 2678 2679 if (stop == 1) { 2680 break; 2681 } else { 2682 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2683 if (bit_chk & 1) { 2684 /* 2685 * Remember a passing test as the 2686 * left_edge. 2687 */ 2688 left_edge[i] = d; 2689 } else { 2690 /* 2691 * If a left edge has not been seen 2692 * yet, then a future passing test will 2693 * mark this edge as the right edge. 2694 */ 2695 if (left_edge[i] == 2696 IO_IO_OUT1_DELAY_MAX + 1) { 2697 right_edge[i] = -(d + 1); 2698 } 2699 } 2700 debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d); 2701 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d", 2702 (int)(bit_chk & 1), i, left_edge[i]); 2703 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i, 2704 right_edge[i]); 2705 bit_chk = bit_chk >> 1; 2706 } 2707 } 2708 } 2709 2710 /* Reset DQ delay chains to 0 */ 2711 scc_mgr_apply_group_dq_out1_delay(0); 2712 sticky_bit_chk = 0; 2713 for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) { 2714 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \ 2715 %d right_edge[%u]: %d\n", __func__, __LINE__, 2716 i, left_edge[i], i, right_edge[i]); 2717 2718 /* 2719 * Check for cases where we haven't found the left edge, 2720 * which makes our assignment of the the right edge invalid. 2721 * Reset it to the illegal value. 2722 */ 2723 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) && 2724 (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) { 2725 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2726 debug_cond(DLEVEL == 2, "%s:%d write_center: reset \ 2727 right_edge[%u]: %d\n", __func__, __LINE__, 2728 i, right_edge[i]); 2729 } 2730 2731 /* 2732 * Reset sticky bit (except for bits where we have 2733 * seen the left edge). 2734 */ 2735 sticky_bit_chk = sticky_bit_chk << 1; 2736 if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) 2737 sticky_bit_chk = sticky_bit_chk | 1; 2738 2739 if (i == 0) 2740 break; 2741 } 2742 2743 /* Search for the right edge of the window for each bit */ 2744 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) { 2745 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, 2746 d + start_dqs); 2747 2748 writel(0, &sdr_scc_mgr->update); 2749 2750 /* 2751 * Stop searching when the read test doesn't pass AND when 2752 * we've seen a passing read on every bit. 2753 */ 2754 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 2755 0, PASS_ONE_BIT, &bit_chk, 0); 2756 2757 sticky_bit_chk = sticky_bit_chk | bit_chk; 2758 stop = stop && (sticky_bit_chk == param->write_correct_mask); 2759 2760 debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \ 2761 %u && %u\n", d, sticky_bit_chk, 2762 param->write_correct_mask, stop); 2763 2764 if (stop == 1) { 2765 if (d == 0) { 2766 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; 2767 i++) { 2768 /* d = 0 failed, but it passed when 2769 testing the left edge, so it must be 2770 marginal, set it to -1 */ 2771 if (right_edge[i] == 2772 IO_IO_OUT1_DELAY_MAX + 1 && 2773 left_edge[i] != 2774 IO_IO_OUT1_DELAY_MAX + 1) { 2775 right_edge[i] = -1; 2776 } 2777 } 2778 } 2779 break; 2780 } else { 2781 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2782 if (bit_chk & 1) { 2783 /* 2784 * Remember a passing test as 2785 * the right_edge. 2786 */ 2787 right_edge[i] = d; 2788 } else { 2789 if (d != 0) { 2790 /* 2791 * If a right edge has not 2792 * been seen yet, then a future 2793 * passing test will mark this 2794 * edge as the left edge. 2795 */ 2796 if (right_edge[i] == 2797 IO_IO_OUT1_DELAY_MAX + 1) 2798 left_edge[i] = -(d + 1); 2799 } else { 2800 /* 2801 * d = 0 failed, but it passed 2802 * when testing the left edge, 2803 * so it must be marginal, set 2804 * it to -1. 2805 */ 2806 if (right_edge[i] == 2807 IO_IO_OUT1_DELAY_MAX + 1 && 2808 left_edge[i] != 2809 IO_IO_OUT1_DELAY_MAX + 1) 2810 right_edge[i] = -1; 2811 /* 2812 * If a right edge has not been 2813 * seen yet, then a future 2814 * passing test will mark this 2815 * edge as the left edge. 2816 */ 2817 else if (right_edge[i] == 2818 IO_IO_OUT1_DELAY_MAX + 2819 1) 2820 left_edge[i] = -(d + 1); 2821 } 2822 } 2823 debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d); 2824 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d", 2825 (int)(bit_chk & 1), i, left_edge[i]); 2826 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i, 2827 right_edge[i]); 2828 bit_chk = bit_chk >> 1; 2829 } 2830 } 2831 } 2832 2833 /* Check that all bits have a window */ 2834 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2835 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \ 2836 %d right_edge[%u]: %d", __func__, __LINE__, 2837 i, left_edge[i], i, right_edge[i]); 2838 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) || 2839 (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) { 2840 set_failing_group_stage(test_bgn + i, 2841 CAL_STAGE_WRITES, 2842 CAL_SUBSTAGE_WRITES_CENTER); 2843 return 0; 2844 } 2845 } 2846 2847 /* Find middle of window for each DQ bit */ 2848 mid_min = left_edge[0] - right_edge[0]; 2849 min_index = 0; 2850 for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2851 mid = left_edge[i] - right_edge[i]; 2852 if (mid < mid_min) { 2853 mid_min = mid; 2854 min_index = i; 2855 } 2856 } 2857 2858 /* 2859 * -mid_min/2 represents the amount that we need to move DQS. 2860 * If mid_min is odd and positive we'll need to add one to 2861 * make sure the rounding in further calculations is correct 2862 * (always bias to the right), so just add 1 for all positive values. 2863 */ 2864 if (mid_min > 0) 2865 mid_min++; 2866 mid_min = mid_min / 2; 2867 debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__, 2868 __LINE__, mid_min); 2869 2870 /* Determine the amount we can change DQS (which is -mid_min) */ 2871 orig_mid_min = mid_min; 2872 new_dqs = start_dqs; 2873 mid_min = 0; 2874 debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \ 2875 mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min); 2876 /* Initialize data for export structures */ 2877 dqs_margin = IO_IO_OUT1_DELAY_MAX + 1; 2878 dq_margin = IO_IO_OUT1_DELAY_MAX + 1; 2879 2880 /* add delay to bring centre of all DQ windows to the same "level" */ 2881 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { 2882 /* Use values before divide by 2 to reduce round off error */ 2883 shift_dq = (left_edge[i] - right_edge[i] - 2884 (left_edge[min_index] - right_edge[min_index]))/2 + 2885 (orig_mid_min - mid_min); 2886 2887 debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \ 2888 [%u]=%d\n", __func__, __LINE__, i, shift_dq); 2889 2890 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET; 2891 temp_dq_out1_delay = readl(addr + (i << 2)); 2892 if (shift_dq + (int32_t)temp_dq_out1_delay > 2893 (int32_t)IO_IO_OUT1_DELAY_MAX) { 2894 shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay; 2895 } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) { 2896 shift_dq = -(int32_t)temp_dq_out1_delay; 2897 } 2898 debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n", 2899 i, shift_dq); 2900 scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq); 2901 scc_mgr_load_dq(i); 2902 2903 debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i, 2904 left_edge[i] - shift_dq + (-mid_min), 2905 right_edge[i] + shift_dq - (-mid_min)); 2906 /* To determine values for export structures */ 2907 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin) 2908 dq_margin = left_edge[i] - shift_dq + (-mid_min); 2909 2910 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin) 2911 dqs_margin = right_edge[i] + shift_dq - (-mid_min); 2912 } 2913 2914 /* Move DQS */ 2915 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); 2916 writel(0, &sdr_scc_mgr->update); 2917 2918 /* Centre DM */ 2919 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__); 2920 2921 /* 2922 * set the left and right edge of each bit to an illegal value, 2923 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value, 2924 */ 2925 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; 2926 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; 2927 int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 2928 int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1; 2929 int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1; 2930 int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1; 2931 int32_t win_best = 0; 2932 2933 /* Search for the/part of the window with DM shift */ 2934 for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) { 2935 scc_mgr_apply_group_dm_out1_delay(d); 2936 writel(0, &sdr_scc_mgr->update); 2937 2938 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, 2939 PASS_ALL_BITS, &bit_chk, 2940 0)) { 2941 /* USE Set current end of the window */ 2942 end_curr = -d; 2943 /* 2944 * If a starting edge of our window has not been seen 2945 * this is our current start of the DM window. 2946 */ 2947 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) 2948 bgn_curr = -d; 2949 2950 /* 2951 * If current window is bigger than best seen. 2952 * Set best seen to be current window. 2953 */ 2954 if ((end_curr-bgn_curr+1) > win_best) { 2955 win_best = end_curr-bgn_curr+1; 2956 bgn_best = bgn_curr; 2957 end_best = end_curr; 2958 } 2959 } else { 2960 /* We just saw a failing test. Reset temp edge */ 2961 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 2962 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 2963 } 2964 } 2965 2966 2967 /* Reset DM delay chains to 0 */ 2968 scc_mgr_apply_group_dm_out1_delay(0); 2969 2970 /* 2971 * Check to see if the current window nudges up aganist 0 delay. 2972 * If so we need to continue the search by shifting DQS otherwise DQS 2973 * search begins as a new search. */ 2974 if (end_curr != 0) { 2975 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 2976 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 2977 } 2978 2979 /* Search for the/part of the window with DQS shifts */ 2980 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) { 2981 /* 2982 * Note: This only shifts DQS, so are we limiting ourselve to 2983 * width of DQ unnecessarily. 2984 */ 2985 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, 2986 d + new_dqs); 2987 2988 writel(0, &sdr_scc_mgr->update); 2989 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, 2990 PASS_ALL_BITS, &bit_chk, 2991 0)) { 2992 /* USE Set current end of the window */ 2993 end_curr = d; 2994 /* 2995 * If a beginning edge of our window has not been seen 2996 * this is our current begin of the DM window. 2997 */ 2998 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) 2999 bgn_curr = d; 3000 3001 /* 3002 * If current window is bigger than best seen. Set best 3003 * seen to be current window. 3004 */ 3005 if ((end_curr-bgn_curr+1) > win_best) { 3006 win_best = end_curr-bgn_curr+1; 3007 bgn_best = bgn_curr; 3008 end_best = end_curr; 3009 } 3010 } else { 3011 /* We just saw a failing test. Reset temp edge */ 3012 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 3013 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 3014 3015 /* Early exit optimization: if ther remaining delay 3016 chain space is less than already seen largest window 3017 we can exit */ 3018 if ((win_best-1) > 3019 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) { 3020 break; 3021 } 3022 } 3023 } 3024 3025 /* assign left and right edge for cal and reporting; */ 3026 left_edge[0] = -1*bgn_best; 3027 right_edge[0] = end_best; 3028 3029 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__, 3030 __LINE__, left_edge[0], right_edge[0]); 3031 3032 /* Move DQS (back to orig) */ 3033 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); 3034 3035 /* Move DM */ 3036 3037 /* Find middle of window for the DM bit */ 3038 mid = (left_edge[0] - right_edge[0]) / 2; 3039 3040 /* only move right, since we are not moving DQS/DQ */ 3041 if (mid < 0) 3042 mid = 0; 3043 3044 /* dm_marign should fail if we never find a window */ 3045 if (win_best == 0) 3046 dm_margin = -1; 3047 else 3048 dm_margin = left_edge[0] - mid; 3049 3050 scc_mgr_apply_group_dm_out1_delay(mid); 3051 writel(0, &sdr_scc_mgr->update); 3052 3053 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \ 3054 dm_margin=%d\n", __func__, __LINE__, left_edge[0], 3055 right_edge[0], mid, dm_margin); 3056 /* Export values */ 3057 gbl->fom_out += dq_margin + dqs_margin; 3058 3059 debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \ 3060 dqs_margin=%d dm_margin=%d\n", __func__, __LINE__, 3061 dq_margin, dqs_margin, dm_margin); 3062 3063 /* 3064 * Do not remove this line as it makes sure all of our 3065 * decisions have been applied. 3066 */ 3067 writel(0, &sdr_scc_mgr->update); 3068 return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0); 3069 } 3070 3071 /* calibrate the write operations */ 3072 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g, 3073 uint32_t test_bgn) 3074 { 3075 /* update info for sims */ 3076 debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn); 3077 3078 reg_file_set_stage(CAL_STAGE_WRITES); 3079 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER); 3080 3081 reg_file_set_group(g); 3082 3083 if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) { 3084 set_failing_group_stage(g, CAL_STAGE_WRITES, 3085 CAL_SUBSTAGE_WRITES_CENTER); 3086 return 0; 3087 } 3088 3089 return 1; 3090 } 3091 3092 /* precharge all banks and activate row 0 in bank "000..." and bank "111..." */ 3093 static void mem_precharge_and_activate(void) 3094 { 3095 uint32_t r; 3096 3097 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { 3098 if (param->skip_ranks[r]) { 3099 /* request to skip the rank */ 3100 continue; 3101 } 3102 3103 /* set rank */ 3104 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 3105 3106 /* precharge all banks ... */ 3107 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS | 3108 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 3109 3110 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0); 3111 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1, 3112 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 3113 3114 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1); 3115 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2, 3116 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 3117 3118 /* activate rows */ 3119 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS | 3120 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 3121 } 3122 } 3123 3124 /* Configure various memory related parameters. */ 3125 static void mem_config(void) 3126 { 3127 uint32_t rlat, wlat; 3128 uint32_t rw_wl_nop_cycles; 3129 uint32_t max_latency; 3130 3131 debug("%s:%d\n", __func__, __LINE__); 3132 /* read in write and read latency */ 3133 wlat = readl(&data_mgr->t_wl_add); 3134 wlat += readl(&data_mgr->mem_t_add); 3135 3136 /* WL for hard phy does not include additive latency */ 3137 3138 /* 3139 * add addtional write latency to offset the address/command extra 3140 * clock cycle. We change the AC mux setting causing AC to be delayed 3141 * by one mem clock cycle. Only do this for DDR3 3142 */ 3143 wlat = wlat + 1; 3144 3145 rlat = readl(&data_mgr->t_rl_add); 3146 3147 rw_wl_nop_cycles = wlat - 2; 3148 gbl->rw_wl_nop_cycles = rw_wl_nop_cycles; 3149 3150 /* 3151 * For AV/CV, lfifo is hardened and always runs at full rate so 3152 * max latency in AFI clocks, used here, is correspondingly smaller. 3153 */ 3154 max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1; 3155 /* configure for a burst length of 8 */ 3156 3157 /* write latency */ 3158 /* Adjust Write Latency for Hard PHY */ 3159 wlat = wlat + 1; 3160 3161 /* set a pretty high read latency initially */ 3162 gbl->curr_read_lat = rlat + 16; 3163 3164 if (gbl->curr_read_lat > max_latency) 3165 gbl->curr_read_lat = max_latency; 3166 3167 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 3168 3169 /* advertise write latency */ 3170 gbl->curr_write_lat = wlat; 3171 writel(wlat - 2, &phy_mgr_cfg->afi_wlat); 3172 3173 /* initialize bit slips */ 3174 mem_precharge_and_activate(); 3175 } 3176 3177 /* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */ 3178 static void mem_skip_calibrate(void) 3179 { 3180 uint32_t vfifo_offset; 3181 uint32_t i, j, r; 3182 3183 debug("%s:%d\n", __func__, __LINE__); 3184 /* Need to update every shadow register set used by the interface */ 3185 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 3186 r += NUM_RANKS_PER_SHADOW_REG) { 3187 /* 3188 * Set output phase alignment settings appropriate for 3189 * skip calibration. 3190 */ 3191 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3192 scc_mgr_set_dqs_en_phase(i, 0); 3193 #if IO_DLL_CHAIN_LENGTH == 6 3194 scc_mgr_set_dqdqs_output_phase(i, 6); 3195 #else 3196 scc_mgr_set_dqdqs_output_phase(i, 7); 3197 #endif 3198 /* 3199 * Case:33398 3200 * 3201 * Write data arrives to the I/O two cycles before write 3202 * latency is reached (720 deg). 3203 * -> due to bit-slip in a/c bus 3204 * -> to allow board skew where dqs is longer than ck 3205 * -> how often can this happen!? 3206 * -> can claim back some ptaps for high freq 3207 * support if we can relax this, but i digress... 3208 * 3209 * The write_clk leads mem_ck by 90 deg 3210 * The minimum ptap of the OPA is 180 deg 3211 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay 3212 * The write_clk is always delayed by 2 ptaps 3213 * 3214 * Hence, to make DQS aligned to CK, we need to delay 3215 * DQS by: 3216 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH)) 3217 * 3218 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH) 3219 * gives us the number of ptaps, which simplies to: 3220 * 3221 * (1.25 * IO_DLL_CHAIN_LENGTH - 2) 3222 */ 3223 scc_mgr_set_dqdqs_output_phase(i, (1.25 * 3224 IO_DLL_CHAIN_LENGTH - 2)); 3225 } 3226 writel(0xff, &sdr_scc_mgr->dqs_ena); 3227 writel(0xff, &sdr_scc_mgr->dqs_io_ena); 3228 3229 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { 3230 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | 3231 SCC_MGR_GROUP_COUNTER_OFFSET); 3232 } 3233 writel(0xff, &sdr_scc_mgr->dq_ena); 3234 writel(0xff, &sdr_scc_mgr->dm_ena); 3235 writel(0, &sdr_scc_mgr->update); 3236 } 3237 3238 /* Compensate for simulation model behaviour */ 3239 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3240 scc_mgr_set_dqs_bus_in_delay(i, 10); 3241 scc_mgr_load_dqs(i); 3242 } 3243 writel(0, &sdr_scc_mgr->update); 3244 3245 /* 3246 * ArriaV has hard FIFOs that can only be initialized by incrementing 3247 * in sequencer. 3248 */ 3249 vfifo_offset = CALIB_VFIFO_OFFSET; 3250 for (j = 0; j < vfifo_offset; j++) { 3251 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy); 3252 } 3253 writel(0, &phy_mgr_cmd->fifo_reset); 3254 3255 /* 3256 * For ACV with hard lfifo, we get the skip-cal setting from 3257 * generation-time constant. 3258 */ 3259 gbl->curr_read_lat = CALIB_LFIFO_OFFSET; 3260 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 3261 } 3262 3263 /* Memory calibration entry point */ 3264 static uint32_t mem_calibrate(void) 3265 { 3266 uint32_t i; 3267 uint32_t rank_bgn, sr; 3268 uint32_t write_group, write_test_bgn; 3269 uint32_t read_group, read_test_bgn; 3270 uint32_t run_groups, current_run; 3271 uint32_t failing_groups = 0; 3272 uint32_t group_failed = 0; 3273 uint32_t sr_failed = 0; 3274 3275 debug("%s:%d\n", __func__, __LINE__); 3276 /* Initialize the data settings */ 3277 3278 gbl->error_substage = CAL_SUBSTAGE_NIL; 3279 gbl->error_stage = CAL_STAGE_NIL; 3280 gbl->error_group = 0xff; 3281 gbl->fom_in = 0; 3282 gbl->fom_out = 0; 3283 3284 mem_config(); 3285 3286 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3287 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | 3288 SCC_MGR_GROUP_COUNTER_OFFSET); 3289 /* Only needed once to set all groups, pins, DQ, DQS, DM. */ 3290 if (i == 0) 3291 scc_mgr_set_hhp_extras(); 3292 3293 scc_set_bypass_mode(i); 3294 } 3295 3296 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) { 3297 /* 3298 * Set VFIFO and LFIFO to instant-on settings in skip 3299 * calibration mode. 3300 */ 3301 mem_skip_calibrate(); 3302 } else { 3303 for (i = 0; i < NUM_CALIB_REPEAT; i++) { 3304 /* 3305 * Zero all delay chain/phase settings for all 3306 * groups and all shadow register sets. 3307 */ 3308 scc_mgr_zero_all(); 3309 3310 run_groups = ~param->skip_groups; 3311 3312 for (write_group = 0, write_test_bgn = 0; write_group 3313 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++, 3314 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) { 3315 /* Initialized the group failure */ 3316 group_failed = 0; 3317 3318 current_run = run_groups & ((1 << 3319 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1); 3320 run_groups = run_groups >> 3321 RW_MGR_NUM_DQS_PER_WRITE_GROUP; 3322 3323 if (current_run == 0) 3324 continue; 3325 3326 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS | 3327 SCC_MGR_GROUP_COUNTER_OFFSET); 3328 scc_mgr_zero_group(write_group, 0); 3329 3330 for (read_group = write_group * 3331 RW_MGR_MEM_IF_READ_DQS_WIDTH / 3332 RW_MGR_MEM_IF_WRITE_DQS_WIDTH, 3333 read_test_bgn = 0; 3334 read_group < (write_group + 1) * 3335 RW_MGR_MEM_IF_READ_DQS_WIDTH / 3336 RW_MGR_MEM_IF_WRITE_DQS_WIDTH && 3337 group_failed == 0; 3338 read_group++, read_test_bgn += 3339 RW_MGR_MEM_DQ_PER_READ_DQS) { 3340 /* Calibrate the VFIFO */ 3341 if (!((STATIC_CALIB_STEPS) & 3342 CALIB_SKIP_VFIFO)) { 3343 if (!rw_mgr_mem_calibrate_vfifo 3344 (read_group, 3345 read_test_bgn)) { 3346 group_failed = 1; 3347 3348 if (!(gbl-> 3349 phy_debug_mode_flags & 3350 PHY_DEBUG_SWEEP_ALL_GROUPS)) { 3351 return 0; 3352 } 3353 } 3354 } 3355 } 3356 3357 /* Calibrate the output side */ 3358 if (group_failed == 0) { 3359 for (rank_bgn = 0, sr = 0; rank_bgn 3360 < RW_MGR_MEM_NUMBER_OF_RANKS; 3361 rank_bgn += 3362 NUM_RANKS_PER_SHADOW_REG, 3363 ++sr) { 3364 sr_failed = 0; 3365 if (!((STATIC_CALIB_STEPS) & 3366 CALIB_SKIP_WRITES)) { 3367 if ((STATIC_CALIB_STEPS) 3368 & CALIB_SKIP_DELAY_SWEEPS) { 3369 /* not needed in quick mode! */ 3370 } else { 3371 /* 3372 * Determine if this set of 3373 * ranks should be skipped 3374 * entirely. 3375 */ 3376 if (!param->skip_shadow_regs[sr]) { 3377 if (!rw_mgr_mem_calibrate_writes 3378 (rank_bgn, write_group, 3379 write_test_bgn)) { 3380 sr_failed = 1; 3381 if (!(gbl-> 3382 phy_debug_mode_flags & 3383 PHY_DEBUG_SWEEP_ALL_GROUPS)) { 3384 return 0; 3385 } 3386 } 3387 } 3388 } 3389 } 3390 if (sr_failed != 0) 3391 group_failed = 1; 3392 } 3393 } 3394 3395 if (group_failed == 0) { 3396 for (read_group = write_group * 3397 RW_MGR_MEM_IF_READ_DQS_WIDTH / 3398 RW_MGR_MEM_IF_WRITE_DQS_WIDTH, 3399 read_test_bgn = 0; 3400 read_group < (write_group + 1) 3401 * RW_MGR_MEM_IF_READ_DQS_WIDTH 3402 / RW_MGR_MEM_IF_WRITE_DQS_WIDTH && 3403 group_failed == 0; 3404 read_group++, read_test_bgn += 3405 RW_MGR_MEM_DQ_PER_READ_DQS) { 3406 if (!((STATIC_CALIB_STEPS) & 3407 CALIB_SKIP_WRITES)) { 3408 if (!rw_mgr_mem_calibrate_vfifo_end 3409 (read_group, read_test_bgn)) { 3410 group_failed = 1; 3411 3412 if (!(gbl->phy_debug_mode_flags 3413 & PHY_DEBUG_SWEEP_ALL_GROUPS)) { 3414 return 0; 3415 } 3416 } 3417 } 3418 } 3419 } 3420 3421 if (group_failed != 0) 3422 failing_groups++; 3423 } 3424 3425 /* 3426 * USER If there are any failing groups then report 3427 * the failure. 3428 */ 3429 if (failing_groups != 0) 3430 return 0; 3431 3432 /* Calibrate the LFIFO */ 3433 if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) { 3434 /* 3435 * If we're skipping groups as part of debug, 3436 * don't calibrate LFIFO. 3437 */ 3438 if (param->skip_groups == 0) { 3439 if (!rw_mgr_mem_calibrate_lfifo()) 3440 return 0; 3441 } 3442 } 3443 } 3444 } 3445 3446 /* 3447 * Do not remove this line as it makes sure all of our decisions 3448 * have been applied. 3449 */ 3450 writel(0, &sdr_scc_mgr->update); 3451 return 1; 3452 } 3453 3454 static uint32_t run_mem_calibrate(void) 3455 { 3456 uint32_t pass; 3457 uint32_t debug_info; 3458 3459 debug("%s:%d\n", __func__, __LINE__); 3460 3461 /* Reset pass/fail status shown on afi_cal_success/fail */ 3462 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status); 3463 3464 /* stop tracking manger */ 3465 uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg); 3466 3467 writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg); 3468 3469 initialize(); 3470 rw_mgr_mem_initialize(); 3471 3472 pass = mem_calibrate(); 3473 3474 mem_precharge_and_activate(); 3475 writel(0, &phy_mgr_cmd->fifo_reset); 3476 3477 /* 3478 * Handoff: 3479 * Don't return control of the PHY back to AFI when in debug mode. 3480 */ 3481 if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) { 3482 rw_mgr_mem_handoff(); 3483 /* 3484 * In Hard PHY this is a 2-bit control: 3485 * 0: AFI Mux Select 3486 * 1: DDIO Mux Select 3487 */ 3488 writel(0x2, &phy_mgr_cfg->mux_sel); 3489 } 3490 3491 writel(ctrlcfg, &sdr_ctrl->ctrl_cfg); 3492 3493 if (pass) { 3494 printf("%s: CALIBRATION PASSED\n", __FILE__); 3495 3496 gbl->fom_in /= 2; 3497 gbl->fom_out /= 2; 3498 3499 if (gbl->fom_in > 0xff) 3500 gbl->fom_in = 0xff; 3501 3502 if (gbl->fom_out > 0xff) 3503 gbl->fom_out = 0xff; 3504 3505 /* Update the FOM in the register file */ 3506 debug_info = gbl->fom_in; 3507 debug_info |= gbl->fom_out << 8; 3508 writel(debug_info, &sdr_reg_file->fom); 3509 3510 writel(debug_info, &phy_mgr_cfg->cal_debug_info); 3511 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status); 3512 } else { 3513 printf("%s: CALIBRATION FAILED\n", __FILE__); 3514 3515 debug_info = gbl->error_stage; 3516 debug_info |= gbl->error_substage << 8; 3517 debug_info |= gbl->error_group << 16; 3518 3519 writel(debug_info, &sdr_reg_file->failing_stage); 3520 writel(debug_info, &phy_mgr_cfg->cal_debug_info); 3521 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status); 3522 3523 /* Update the failing group/stage in the register file */ 3524 debug_info = gbl->error_stage; 3525 debug_info |= gbl->error_substage << 8; 3526 debug_info |= gbl->error_group << 16; 3527 writel(debug_info, &sdr_reg_file->failing_stage); 3528 } 3529 3530 return pass; 3531 } 3532 3533 /** 3534 * hc_initialize_rom_data() - Initialize ROM data 3535 * 3536 * Initialize ROM data. 3537 */ 3538 static void hc_initialize_rom_data(void) 3539 { 3540 u32 i, addr; 3541 3542 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET; 3543 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++) 3544 writel(inst_rom_init[i], addr + (i << 2)); 3545 3546 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET; 3547 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++) 3548 writel(ac_rom_init[i], addr + (i << 2)); 3549 } 3550 3551 /** 3552 * initialize_reg_file() - Initialize SDR register file 3553 * 3554 * Initialize SDR register file. 3555 */ 3556 static void initialize_reg_file(void) 3557 { 3558 /* Initialize the register file with the correct data */ 3559 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature); 3560 writel(0, &sdr_reg_file->debug_data_addr); 3561 writel(0, &sdr_reg_file->cur_stage); 3562 writel(0, &sdr_reg_file->fom); 3563 writel(0, &sdr_reg_file->failing_stage); 3564 writel(0, &sdr_reg_file->debug1); 3565 writel(0, &sdr_reg_file->debug2); 3566 } 3567 3568 /** 3569 * initialize_hps_phy() - Initialize HPS PHY 3570 * 3571 * Initialize HPS PHY. 3572 */ 3573 static void initialize_hps_phy(void) 3574 { 3575 uint32_t reg; 3576 /* 3577 * Tracking also gets configured here because it's in the 3578 * same register. 3579 */ 3580 uint32_t trk_sample_count = 7500; 3581 uint32_t trk_long_idle_sample_count = (10 << 16) | 100; 3582 /* 3583 * Format is number of outer loops in the 16 MSB, sample 3584 * count in 16 LSB. 3585 */ 3586 3587 reg = 0; 3588 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2); 3589 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1); 3590 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1); 3591 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1); 3592 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0); 3593 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1); 3594 /* 3595 * This field selects the intrinsic latency to RDATA_EN/FULL path. 3596 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles. 3597 */ 3598 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0); 3599 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET( 3600 trk_sample_count); 3601 writel(reg, &sdr_ctrl->phy_ctrl0); 3602 3603 reg = 0; 3604 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET( 3605 trk_sample_count >> 3606 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH); 3607 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET( 3608 trk_long_idle_sample_count); 3609 writel(reg, &sdr_ctrl->phy_ctrl1); 3610 3611 reg = 0; 3612 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET( 3613 trk_long_idle_sample_count >> 3614 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH); 3615 writel(reg, &sdr_ctrl->phy_ctrl2); 3616 } 3617 3618 static void initialize_tracking(void) 3619 { 3620 uint32_t concatenated_longidle = 0x0; 3621 uint32_t concatenated_delays = 0x0; 3622 uint32_t concatenated_rw_addr = 0x0; 3623 uint32_t concatenated_refresh = 0x0; 3624 uint32_t trk_sample_count = 7500; 3625 uint32_t dtaps_per_ptap; 3626 uint32_t tmp_delay; 3627 3628 /* 3629 * compute usable version of value in case we skip full 3630 * computation later 3631 */ 3632 dtaps_per_ptap = 0; 3633 tmp_delay = 0; 3634 while (tmp_delay < IO_DELAY_PER_OPA_TAP) { 3635 dtaps_per_ptap++; 3636 tmp_delay += IO_DELAY_PER_DCHAIN_TAP; 3637 } 3638 dtaps_per_ptap--; 3639 3640 concatenated_longidle = concatenated_longidle ^ 10; 3641 /*longidle outer loop */ 3642 concatenated_longidle = concatenated_longidle << 16; 3643 concatenated_longidle = concatenated_longidle ^ 100; 3644 /*longidle sample count */ 3645 concatenated_delays = concatenated_delays ^ 243; 3646 /* trfc, worst case of 933Mhz 4Gb */ 3647 concatenated_delays = concatenated_delays << 8; 3648 concatenated_delays = concatenated_delays ^ 14; 3649 /* trcd, worst case */ 3650 concatenated_delays = concatenated_delays << 8; 3651 concatenated_delays = concatenated_delays ^ 10; 3652 /* vfifo wait */ 3653 concatenated_delays = concatenated_delays << 8; 3654 concatenated_delays = concatenated_delays ^ 4; 3655 /* mux delay */ 3656 3657 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE; 3658 concatenated_rw_addr = concatenated_rw_addr << 8; 3659 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1; 3660 concatenated_rw_addr = concatenated_rw_addr << 8; 3661 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ; 3662 concatenated_rw_addr = concatenated_rw_addr << 8; 3663 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL; 3664 3665 concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL; 3666 concatenated_refresh = concatenated_refresh << 24; 3667 concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */ 3668 3669 /* Initialize the register file with the correct data */ 3670 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap); 3671 writel(trk_sample_count, &sdr_reg_file->trk_sample_count); 3672 writel(concatenated_longidle, &sdr_reg_file->trk_longidle); 3673 writel(concatenated_delays, &sdr_reg_file->delays); 3674 writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr); 3675 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width); 3676 writel(concatenated_refresh, &sdr_reg_file->trk_rfsh); 3677 } 3678 3679 int sdram_calibration_full(void) 3680 { 3681 struct param_type my_param; 3682 struct gbl_type my_gbl; 3683 uint32_t pass; 3684 uint32_t i; 3685 3686 param = &my_param; 3687 gbl = &my_gbl; 3688 3689 /* Initialize the debug mode flags */ 3690 gbl->phy_debug_mode_flags = 0; 3691 /* Set the calibration enabled by default */ 3692 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT; 3693 /* 3694 * Only sweep all groups (regardless of fail state) by default 3695 * Set enabled read test by default. 3696 */ 3697 #if DISABLE_GUARANTEED_READ 3698 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ; 3699 #endif 3700 /* Initialize the register file */ 3701 initialize_reg_file(); 3702 3703 /* Initialize any PHY CSR */ 3704 initialize_hps_phy(); 3705 3706 scc_mgr_initialize(); 3707 3708 initialize_tracking(); 3709 3710 /* USER Enable all ranks, groups */ 3711 for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++) 3712 param->skip_ranks[i] = 0; 3713 for (i = 0; i < NUM_SHADOW_REGS; ++i) 3714 param->skip_shadow_regs[i] = 0; 3715 param->skip_groups = 0; 3716 3717 printf("%s: Preparing to start memory calibration\n", __FILE__); 3718 3719 debug("%s:%d\n", __func__, __LINE__); 3720 debug_cond(DLEVEL == 1, 3721 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ", 3722 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM, 3723 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS, 3724 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS, 3725 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS); 3726 debug_cond(DLEVEL == 1, 3727 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ", 3728 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH, 3729 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH, 3730 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP); 3731 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u", 3732 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH); 3733 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ", 3734 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX, 3735 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX); 3736 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ", 3737 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX, 3738 IO_IO_OUT2_DELAY_MAX); 3739 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n", 3740 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE); 3741 3742 hc_initialize_rom_data(); 3743 3744 /* update info for sims */ 3745 reg_file_set_stage(CAL_STAGE_NIL); 3746 reg_file_set_group(0); 3747 3748 /* 3749 * Load global needed for those actions that require 3750 * some dynamic calibration support. 3751 */ 3752 dyn_calib_steps = STATIC_CALIB_STEPS; 3753 /* 3754 * Load global to allow dynamic selection of delay loop settings 3755 * based on calibration mode. 3756 */ 3757 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS)) 3758 skip_delay_mask = 0xff; 3759 else 3760 skip_delay_mask = 0x0; 3761 3762 pass = run_mem_calibrate(); 3763 3764 printf("%s: Calibration complete\n", __FILE__); 3765 return pass; 3766 } 3767