1 /* 2 * Copyright Altera Corporation (C) 2012-2015 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <asm/arch/sdram.h> 10 #include "sequencer.h" 11 #include "sequencer_auto.h" 12 #include "sequencer_auto_ac_init.h" 13 #include "sequencer_auto_inst_init.h" 14 #include "sequencer_defines.h" 15 16 static void scc_mgr_load_dqs_for_write_group(uint32_t write_group); 17 18 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs = 19 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800); 20 21 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs = 22 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00); 23 24 static struct socfpga_sdr_reg_file *sdr_reg_file = 25 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS; 26 27 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr = 28 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00); 29 30 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd = 31 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS; 32 33 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg = 34 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40); 35 36 static struct socfpga_data_mgr *data_mgr = 37 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS; 38 39 static struct socfpga_sdr_ctrl *sdr_ctrl = 40 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS; 41 42 #define DELTA_D 1 43 44 /* 45 * In order to reduce ROM size, most of the selectable calibration steps are 46 * decided at compile time based on the user's calibration mode selection, 47 * as captured by the STATIC_CALIB_STEPS selection below. 48 * 49 * However, to support simulation-time selection of fast simulation mode, where 50 * we skip everything except the bare minimum, we need a few of the steps to 51 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the 52 * check, which is based on the rtl-supplied value, or we dynamically compute 53 * the value to use based on the dynamically-chosen calibration mode 54 */ 55 56 #define DLEVEL 0 57 #define STATIC_IN_RTL_SIM 0 58 #define STATIC_SKIP_DELAY_LOOPS 0 59 60 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \ 61 STATIC_SKIP_DELAY_LOOPS) 62 63 /* calibration steps requested by the rtl */ 64 uint16_t dyn_calib_steps; 65 66 /* 67 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option 68 * instead of static, we use boolean logic to select between 69 * non-skip and skip values 70 * 71 * The mask is set to include all bits when not-skipping, but is 72 * zero when skipping 73 */ 74 75 uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */ 76 77 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \ 78 ((non_skip_value) & skip_delay_mask) 79 80 struct gbl_type *gbl; 81 struct param_type *param; 82 uint32_t curr_shadow_reg; 83 84 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, 85 uint32_t write_group, uint32_t use_dm, 86 uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks); 87 88 static void set_failing_group_stage(uint32_t group, uint32_t stage, 89 uint32_t substage) 90 { 91 /* 92 * Only set the global stage if there was not been any other 93 * failing group 94 */ 95 if (gbl->error_stage == CAL_STAGE_NIL) { 96 gbl->error_substage = substage; 97 gbl->error_stage = stage; 98 gbl->error_group = group; 99 } 100 } 101 102 static void reg_file_set_group(uint32_t set_group) 103 { 104 /* Read the current group and stage */ 105 uint32_t cur_stage_group = readl(&sdr_reg_file->cur_stage); 106 107 /* Clear the group */ 108 cur_stage_group &= 0x0000FFFF; 109 110 /* Set the group */ 111 cur_stage_group |= (set_group << 16); 112 113 /* Write the data back */ 114 writel(cur_stage_group, &sdr_reg_file->cur_stage); 115 } 116 117 static void reg_file_set_stage(uint32_t set_stage) 118 { 119 /* Read the current group and stage */ 120 uint32_t cur_stage_group = readl(&sdr_reg_file->cur_stage); 121 122 /* Clear the stage and substage */ 123 cur_stage_group &= 0xFFFF0000; 124 125 /* Set the stage */ 126 cur_stage_group |= (set_stage & 0x000000FF); 127 128 /* Write the data back */ 129 writel(cur_stage_group, &sdr_reg_file->cur_stage); 130 } 131 132 static void reg_file_set_sub_stage(uint32_t set_sub_stage) 133 { 134 /* Read the current group and stage */ 135 uint32_t cur_stage_group = readl(&sdr_reg_file->cur_stage); 136 137 /* Clear the substage */ 138 cur_stage_group &= 0xFFFF00FF; 139 140 /* Set the sub stage */ 141 cur_stage_group |= ((set_sub_stage << 8) & 0x0000FF00); 142 143 /* Write the data back */ 144 writel(cur_stage_group, &sdr_reg_file->cur_stage); 145 } 146 147 static void initialize(void) 148 { 149 debug("%s:%d\n", __func__, __LINE__); 150 /* USER calibration has control over path to memory */ 151 /* 152 * In Hard PHY this is a 2-bit control: 153 * 0: AFI Mux Select 154 * 1: DDIO Mux Select 155 */ 156 writel(0x3, &phy_mgr_cfg->mux_sel); 157 158 /* USER memory clock is not stable we begin initialization */ 159 writel(0, &phy_mgr_cfg->reset_mem_stbl); 160 161 /* USER calibration status all set to zero */ 162 writel(0, &phy_mgr_cfg->cal_status); 163 164 writel(0, &phy_mgr_cfg->cal_debug_info); 165 166 if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) { 167 param->read_correct_mask_vg = ((uint32_t)1 << 168 (RW_MGR_MEM_DQ_PER_READ_DQS / 169 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1; 170 param->write_correct_mask_vg = ((uint32_t)1 << 171 (RW_MGR_MEM_DQ_PER_READ_DQS / 172 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1; 173 param->read_correct_mask = ((uint32_t)1 << 174 RW_MGR_MEM_DQ_PER_READ_DQS) - 1; 175 param->write_correct_mask = ((uint32_t)1 << 176 RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1; 177 param->dm_correct_mask = ((uint32_t)1 << 178 (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH)) 179 - 1; 180 } 181 } 182 183 static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode) 184 { 185 uint32_t odt_mask_0 = 0; 186 uint32_t odt_mask_1 = 0; 187 uint32_t cs_and_odt_mask; 188 189 if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) { 190 if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) { 191 /* 192 * 1 Rank 193 * Read: ODT = 0 194 * Write: ODT = 1 195 */ 196 odt_mask_0 = 0x0; 197 odt_mask_1 = 0x1; 198 } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) { 199 /* 2 Ranks */ 200 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) { 201 /* - Dual-Slot , Single-Rank 202 * (1 chip-select per DIMM) 203 * OR 204 * - RDIMM, 4 total CS (2 CS per DIMM) 205 * means 2 DIMM 206 * Since MEM_NUMBER_OF_RANKS is 2 they are 207 * both single rank 208 * with 2 CS each (special for RDIMM) 209 * Read: Turn on ODT on the opposite rank 210 * Write: Turn on ODT on all ranks 211 */ 212 odt_mask_0 = 0x3 & ~(1 << rank); 213 odt_mask_1 = 0x3; 214 } else { 215 /* 216 * USER - Single-Slot , Dual-rank DIMMs 217 * (2 chip-selects per DIMM) 218 * USER Read: Turn on ODT off on all ranks 219 * USER Write: Turn on ODT on active rank 220 */ 221 odt_mask_0 = 0x0; 222 odt_mask_1 = 0x3 & (1 << rank); 223 } 224 } else { 225 /* 4 Ranks 226 * Read: 227 * ----------+-----------------------+ 228 * | | 229 * | ODT | 230 * Read From +-----------------------+ 231 * Rank | 3 | 2 | 1 | 0 | 232 * ----------+-----+-----+-----+-----+ 233 * 0 | 0 | 1 | 0 | 0 | 234 * 1 | 1 | 0 | 0 | 0 | 235 * 2 | 0 | 0 | 0 | 1 | 236 * 3 | 0 | 0 | 1 | 0 | 237 * ----------+-----+-----+-----+-----+ 238 * 239 * Write: 240 * ----------+-----------------------+ 241 * | | 242 * | ODT | 243 * Write To +-----------------------+ 244 * Rank | 3 | 2 | 1 | 0 | 245 * ----------+-----+-----+-----+-----+ 246 * 0 | 0 | 1 | 0 | 1 | 247 * 1 | 1 | 0 | 1 | 0 | 248 * 2 | 0 | 1 | 0 | 1 | 249 * 3 | 1 | 0 | 1 | 0 | 250 * ----------+-----+-----+-----+-----+ 251 */ 252 switch (rank) { 253 case 0: 254 odt_mask_0 = 0x4; 255 odt_mask_1 = 0x5; 256 break; 257 case 1: 258 odt_mask_0 = 0x8; 259 odt_mask_1 = 0xA; 260 break; 261 case 2: 262 odt_mask_0 = 0x1; 263 odt_mask_1 = 0x5; 264 break; 265 case 3: 266 odt_mask_0 = 0x2; 267 odt_mask_1 = 0xA; 268 break; 269 } 270 } 271 } else { 272 odt_mask_0 = 0x0; 273 odt_mask_1 = 0x0; 274 } 275 276 cs_and_odt_mask = 277 (0xFF & ~(1 << rank)) | 278 ((0xFF & odt_mask_0) << 8) | 279 ((0xFF & odt_mask_1) << 16); 280 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS | 281 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); 282 } 283 284 static void scc_mgr_initialize(void) 285 { 286 u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_HHP_RFILE_OFFSET; 287 288 /* 289 * Clear register file for HPS 290 * 16 (2^4) is the size of the full register file in the scc mgr: 291 * RFILE_DEPTH = log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS + 292 * MEM_IF_READ_DQS_WIDTH - 1) + 1; 293 */ 294 uint32_t i; 295 for (i = 0; i < 16; i++) { 296 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n", 297 __func__, __LINE__, i); 298 writel(0, addr + (i << 2)); 299 } 300 } 301 302 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, 303 uint32_t delay) 304 { 305 u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET; 306 307 /* Load the setting in the SCC manager */ 308 writel(delay, addr + (read_group << 2)); 309 } 310 311 static void scc_mgr_set_dqs_io_in_delay(uint32_t write_group, 312 uint32_t delay) 313 { 314 u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET; 315 316 writel(delay, addr + (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2)); 317 } 318 319 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase) 320 { 321 u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_EN_PHASE_OFFSET; 322 323 /* Load the setting in the SCC manager */ 324 writel(phase, addr + (read_group << 2)); 325 } 326 327 static void scc_mgr_set_dqs_en_phase_all_ranks(uint32_t read_group, 328 uint32_t phase) 329 { 330 uint32_t r; 331 uint32_t update_scan_chains; 332 333 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 334 r += NUM_RANKS_PER_SHADOW_REG) { 335 /* 336 * USER although the h/w doesn't support different phases per 337 * shadow register, for simplicity our scc manager modeling 338 * keeps different phase settings per shadow reg, and it's 339 * important for us to keep them in sync to match h/w. 340 * for efficiency, the scan chain update should occur only 341 * once to sr0. 342 */ 343 update_scan_chains = (r == 0) ? 1 : 0; 344 345 scc_mgr_set_dqs_en_phase(read_group, phase); 346 347 if (update_scan_chains) { 348 writel(read_group, &sdr_scc_mgr->dqs_ena); 349 writel(0, &sdr_scc_mgr->update); 350 } 351 } 352 } 353 354 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, 355 uint32_t phase) 356 { 357 u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQDQS_OUT_PHASE_OFFSET; 358 359 /* Load the setting in the SCC manager */ 360 writel(phase, addr + (write_group << 2)); 361 } 362 363 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group, 364 uint32_t phase) 365 { 366 uint32_t r; 367 uint32_t update_scan_chains; 368 369 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 370 r += NUM_RANKS_PER_SHADOW_REG) { 371 /* 372 * USER although the h/w doesn't support different phases per 373 * shadow register, for simplicity our scc manager modeling 374 * keeps different phase settings per shadow reg, and it's 375 * important for us to keep them in sync to match h/w. 376 * for efficiency, the scan chain update should occur only 377 * once to sr0. 378 */ 379 update_scan_chains = (r == 0) ? 1 : 0; 380 381 scc_mgr_set_dqdqs_output_phase(write_group, phase); 382 383 if (update_scan_chains) { 384 writel(write_group, &sdr_scc_mgr->dqs_ena); 385 writel(0, &sdr_scc_mgr->update); 386 } 387 } 388 } 389 390 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay) 391 { 392 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_EN_DELAY_OFFSET; 393 394 /* Load the setting in the SCC manager */ 395 writel(delay + IO_DQS_EN_DELAY_OFFSET, addr + 396 (read_group << 2)); 397 } 398 399 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group, 400 uint32_t delay) 401 { 402 uint32_t r; 403 404 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 405 r += NUM_RANKS_PER_SHADOW_REG) { 406 scc_mgr_set_dqs_en_delay(read_group, delay); 407 408 writel(read_group, &sdr_scc_mgr->dqs_ena); 409 /* 410 * In shadow register mode, the T11 settings are stored in 411 * registers in the core, which are updated by the DQS_ENA 412 * signals. Not issuing the SCC_MGR_UPD command allows us to 413 * save lots of rank switching overhead, by calling 414 * select_shadow_regs_for_update with update_scan_chains 415 * set to 0. 416 */ 417 writel(0, &sdr_scc_mgr->update); 418 } 419 /* 420 * In shadow register mode, the T11 settings are stored in 421 * registers in the core, which are updated by the DQS_ENA 422 * signals. Not issuing the SCC_MGR_UPD command allows us to 423 * save lots of rank switching overhead, by calling 424 * select_shadow_regs_for_update with update_scan_chains 425 * set to 0. 426 */ 427 writel(0, &sdr_scc_mgr->update); 428 } 429 430 static void scc_mgr_set_oct_out1_delay(uint32_t write_group, uint32_t delay) 431 { 432 uint32_t read_group; 433 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_OCT_OUT1_DELAY_OFFSET; 434 435 /* 436 * Load the setting in the SCC manager 437 * Although OCT affects only write data, the OCT delay is controlled 438 * by the DQS logic block which is instantiated once per read group. 439 * For protocols where a write group consists of multiple read groups, 440 * the setting must be set multiple times. 441 */ 442 for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / 443 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; 444 read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / 445 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group) 446 writel(delay, addr + (read_group << 2)); 447 } 448 449 static void scc_mgr_set_dq_out1_delay(uint32_t write_group, 450 uint32_t dq_in_group, uint32_t delay) 451 { 452 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET; 453 454 /* Load the setting in the SCC manager */ 455 writel(delay, addr + (dq_in_group << 2)); 456 } 457 458 static void scc_mgr_set_dq_in_delay(uint32_t write_group, 459 uint32_t dq_in_group, uint32_t delay) 460 { 461 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET; 462 463 /* Load the setting in the SCC manager */ 464 writel(delay, addr + (dq_in_group << 2)); 465 } 466 467 static void scc_mgr_set_hhp_extras(void) 468 { 469 /* 470 * Load the fixed setting in the SCC manager 471 * bits: 0:0 = 1'b1 - dqs bypass 472 * bits: 1:1 = 1'b1 - dq bypass 473 * bits: 4:2 = 3'b001 - rfifo_mode 474 * bits: 6:5 = 2'b01 - rfifo clock_select 475 * bits: 7:7 = 1'b0 - separate gating from ungating setting 476 * bits: 8:8 = 1'b0 - separate OE from Output delay setting 477 */ 478 uint32_t value = (0<<8) | (0<<7) | (1<<5) | (1<<2) | (1<<1) | (1<<0); 479 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_HHP_GLOBALS_OFFSET; 480 481 writel(value, addr + SCC_MGR_HHP_EXTRAS_OFFSET); 482 } 483 484 static void scc_mgr_set_dqs_out1_delay(uint32_t write_group, 485 uint32_t delay) 486 { 487 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET; 488 489 /* Load the setting in the SCC manager */ 490 writel(delay, addr + (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2)); 491 } 492 493 static void scc_mgr_set_dm_out1_delay(uint32_t write_group, 494 uint32_t dm, uint32_t delay) 495 { 496 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET; 497 498 /* Load the setting in the SCC manager */ 499 writel(delay, addr + 500 ((RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm) << 2)); 501 } 502 503 /* 504 * USER Zero all DQS config 505 * TODO: maybe rename to scc_mgr_zero_dqs_config (or something) 506 */ 507 static void scc_mgr_zero_all(void) 508 { 509 uint32_t i, r; 510 511 /* 512 * USER Zero all DQS config settings, across all groups and all 513 * shadow registers 514 */ 515 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += 516 NUM_RANKS_PER_SHADOW_REG) { 517 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 518 /* 519 * The phases actually don't exist on a per-rank basis, 520 * but there's no harm updating them several times, so 521 * let's keep the code simple. 522 */ 523 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE); 524 scc_mgr_set_dqs_en_phase(i, 0); 525 scc_mgr_set_dqs_en_delay(i, 0); 526 } 527 528 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { 529 scc_mgr_set_dqdqs_output_phase(i, 0); 530 /* av/cv don't have out2 */ 531 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE); 532 } 533 } 534 535 /* multicast to all DQS group enables */ 536 writel(0xff, &sdr_scc_mgr->dqs_ena); 537 writel(0, &sdr_scc_mgr->update); 538 } 539 540 static void scc_set_bypass_mode(uint32_t write_group, uint32_t mode) 541 { 542 /* mode = 0 : Do NOT bypass - Half Rate Mode */ 543 /* mode = 1 : Bypass - Full Rate Mode */ 544 545 /* only need to set once for all groups, pins, dq, dqs, dm */ 546 if (write_group == 0) { 547 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n", __func__, 548 __LINE__); 549 scc_mgr_set_hhp_extras(); 550 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n", 551 __func__, __LINE__); 552 } 553 /* multicast to all DQ enables */ 554 writel(0xff, &sdr_scc_mgr->dq_ena); 555 writel(0xff, &sdr_scc_mgr->dm_ena); 556 557 /* update current DQS IO enable */ 558 writel(0, &sdr_scc_mgr->dqs_io_ena); 559 560 /* update the DQS logic */ 561 writel(write_group, &sdr_scc_mgr->dqs_ena); 562 563 /* hit update */ 564 writel(0, &sdr_scc_mgr->update); 565 } 566 567 static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin, 568 int32_t out_only) 569 { 570 uint32_t i, r; 571 572 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += 573 NUM_RANKS_PER_SHADOW_REG) { 574 /* Zero all DQ config settings */ 575 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 576 scc_mgr_set_dq_out1_delay(write_group, i, 0); 577 if (!out_only) 578 scc_mgr_set_dq_in_delay(write_group, i, 0); 579 } 580 581 /* multicast to all DQ enables */ 582 writel(0xff, &sdr_scc_mgr->dq_ena); 583 584 /* Zero all DM config settings */ 585 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { 586 scc_mgr_set_dm_out1_delay(write_group, i, 0); 587 } 588 589 /* multicast to all DM enables */ 590 writel(0xff, &sdr_scc_mgr->dm_ena); 591 592 /* zero all DQS io settings */ 593 if (!out_only) 594 scc_mgr_set_dqs_io_in_delay(write_group, 0); 595 /* av/cv don't have out2 */ 596 scc_mgr_set_dqs_out1_delay(write_group, IO_DQS_OUT_RESERVE); 597 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE); 598 scc_mgr_load_dqs_for_write_group(write_group); 599 600 /* multicast to all DQS IO enables (only 1) */ 601 writel(0, &sdr_scc_mgr->dqs_io_ena); 602 603 /* hit update to zero everything */ 604 writel(0, &sdr_scc_mgr->update); 605 } 606 } 607 608 /* load up dqs config settings */ 609 static void scc_mgr_load_dqs(uint32_t dqs) 610 { 611 writel(dqs, &sdr_scc_mgr->dqs_ena); 612 } 613 614 static void scc_mgr_load_dqs_for_write_group(uint32_t write_group) 615 { 616 uint32_t read_group; 617 uint32_t addr = (u32)&sdr_scc_mgr->dqs_ena; 618 /* 619 * Although OCT affects only write data, the OCT delay is controlled 620 * by the DQS logic block which is instantiated once per read group. 621 * For protocols where a write group consists of multiple read groups, 622 * the setting must be scanned multiple times. 623 */ 624 for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / 625 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; 626 read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / 627 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group) 628 writel(read_group, addr); 629 } 630 631 /* load up dqs io config settings */ 632 static void scc_mgr_load_dqs_io(void) 633 { 634 writel(0, &sdr_scc_mgr->dqs_io_ena); 635 } 636 637 /* load up dq config settings */ 638 static void scc_mgr_load_dq(uint32_t dq_in_group) 639 { 640 writel(dq_in_group, &sdr_scc_mgr->dq_ena); 641 } 642 643 /* load up dm config settings */ 644 static void scc_mgr_load_dm(uint32_t dm) 645 { 646 writel(dm, &sdr_scc_mgr->dm_ena); 647 } 648 649 /* 650 * apply and load a particular input delay for the DQ pins in a group 651 * group_bgn is the index of the first dq pin (in the write group) 652 */ 653 static void scc_mgr_apply_group_dq_in_delay(uint32_t write_group, 654 uint32_t group_bgn, uint32_t delay) 655 { 656 uint32_t i, p; 657 658 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { 659 scc_mgr_set_dq_in_delay(write_group, p, delay); 660 scc_mgr_load_dq(p); 661 } 662 } 663 664 /* apply and load a particular output delay for the DQ pins in a group */ 665 static void scc_mgr_apply_group_dq_out1_delay(uint32_t write_group, 666 uint32_t group_bgn, 667 uint32_t delay1) 668 { 669 uint32_t i, p; 670 671 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { 672 scc_mgr_set_dq_out1_delay(write_group, i, delay1); 673 scc_mgr_load_dq(i); 674 } 675 } 676 677 /* apply and load a particular output delay for the DM pins in a group */ 678 static void scc_mgr_apply_group_dm_out1_delay(uint32_t write_group, 679 uint32_t delay1) 680 { 681 uint32_t i; 682 683 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { 684 scc_mgr_set_dm_out1_delay(write_group, i, delay1); 685 scc_mgr_load_dm(i); 686 } 687 } 688 689 690 /* apply and load delay on both DQS and OCT out1 */ 691 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group, 692 uint32_t delay) 693 { 694 scc_mgr_set_dqs_out1_delay(write_group, delay); 695 scc_mgr_load_dqs_io(); 696 697 scc_mgr_set_oct_out1_delay(write_group, delay); 698 scc_mgr_load_dqs_for_write_group(write_group); 699 } 700 701 /* apply a delay to the entire output side: DQ, DM, DQS, OCT */ 702 static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group, 703 uint32_t group_bgn, 704 uint32_t delay) 705 { 706 uint32_t i, p, new_delay; 707 708 /* dq shift */ 709 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { 710 new_delay = READ_SCC_DQ_OUT2_DELAY; 711 new_delay += delay; 712 713 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 714 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQ[%u,%u]:\ 715 %u > %lu => %lu", __func__, __LINE__, 716 write_group, group_bgn, delay, i, p, new_delay, 717 (long unsigned int)IO_IO_OUT2_DELAY_MAX, 718 (long unsigned int)IO_IO_OUT2_DELAY_MAX); 719 new_delay = IO_IO_OUT2_DELAY_MAX; 720 } 721 722 scc_mgr_load_dq(i); 723 } 724 725 /* dm shift */ 726 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { 727 new_delay = READ_SCC_DM_IO_OUT2_DELAY; 728 new_delay += delay; 729 730 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 731 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DM[%u]:\ 732 %u > %lu => %lu\n", __func__, __LINE__, 733 write_group, group_bgn, delay, i, new_delay, 734 (long unsigned int)IO_IO_OUT2_DELAY_MAX, 735 (long unsigned int)IO_IO_OUT2_DELAY_MAX); 736 new_delay = IO_IO_OUT2_DELAY_MAX; 737 } 738 739 scc_mgr_load_dm(i); 740 } 741 742 /* dqs shift */ 743 new_delay = READ_SCC_DQS_IO_OUT2_DELAY; 744 new_delay += delay; 745 746 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 747 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;" 748 " adding %u to OUT1\n", __func__, __LINE__, 749 write_group, group_bgn, delay, new_delay, 750 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX, 751 new_delay - IO_IO_OUT2_DELAY_MAX); 752 scc_mgr_set_dqs_out1_delay(write_group, new_delay - 753 IO_IO_OUT2_DELAY_MAX); 754 new_delay = IO_IO_OUT2_DELAY_MAX; 755 } 756 757 scc_mgr_load_dqs_io(); 758 759 /* oct shift */ 760 new_delay = READ_SCC_OCT_OUT2_DELAY; 761 new_delay += delay; 762 763 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 764 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;" 765 " adding %u to OUT1\n", __func__, __LINE__, 766 write_group, group_bgn, delay, new_delay, 767 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX, 768 new_delay - IO_IO_OUT2_DELAY_MAX); 769 scc_mgr_set_oct_out1_delay(write_group, new_delay - 770 IO_IO_OUT2_DELAY_MAX); 771 new_delay = IO_IO_OUT2_DELAY_MAX; 772 } 773 774 scc_mgr_load_dqs_for_write_group(write_group); 775 } 776 777 /* 778 * USER apply a delay to the entire output side (DQ, DM, DQS, OCT) 779 * and to all ranks 780 */ 781 static void scc_mgr_apply_group_all_out_delay_add_all_ranks( 782 uint32_t write_group, uint32_t group_bgn, uint32_t delay) 783 { 784 uint32_t r; 785 786 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 787 r += NUM_RANKS_PER_SHADOW_REG) { 788 scc_mgr_apply_group_all_out_delay_add(write_group, 789 group_bgn, delay); 790 writel(0, &sdr_scc_mgr->update); 791 } 792 } 793 794 /* optimization used to recover some slots in ddr3 inst_rom */ 795 /* could be applied to other protocols if we wanted to */ 796 static void set_jump_as_return(void) 797 { 798 /* 799 * to save space, we replace return with jump to special shared 800 * RETURN instruction so we set the counter to large value so that 801 * we always jump 802 */ 803 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0); 804 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 805 } 806 807 /* 808 * should always use constants as argument to ensure all computations are 809 * performed at compile time 810 */ 811 static void delay_for_n_mem_clocks(const uint32_t clocks) 812 { 813 uint32_t afi_clocks; 814 uint8_t inner = 0; 815 uint8_t outer = 0; 816 uint16_t c_loop = 0; 817 818 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks); 819 820 821 afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO; 822 /* scale (rounding up) to get afi clocks */ 823 824 /* 825 * Note, we don't bother accounting for being off a little bit 826 * because of a few extra instructions in outer loops 827 * Note, the loops have a test at the end, and do the test before 828 * the decrement, and so always perform the loop 829 * 1 time more than the counter value 830 */ 831 if (afi_clocks == 0) { 832 ; 833 } else if (afi_clocks <= 0x100) { 834 inner = afi_clocks-1; 835 outer = 0; 836 c_loop = 0; 837 } else if (afi_clocks <= 0x10000) { 838 inner = 0xff; 839 outer = (afi_clocks-1) >> 8; 840 c_loop = 0; 841 } else { 842 inner = 0xff; 843 outer = 0xff; 844 c_loop = (afi_clocks-1) >> 16; 845 } 846 847 /* 848 * rom instructions are structured as follows: 849 * 850 * IDLE_LOOP2: jnz cntr0, TARGET_A 851 * IDLE_LOOP1: jnz cntr1, TARGET_B 852 * return 853 * 854 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and 855 * TARGET_B is set to IDLE_LOOP2 as well 856 * 857 * if we have no outer loop, though, then we can use IDLE_LOOP1 only, 858 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely 859 * 860 * a little confusing, but it helps save precious space in the inst_rom 861 * and sequencer rom and keeps the delays more accurate and reduces 862 * overhead 863 */ 864 if (afi_clocks <= 0x100) { 865 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), 866 &sdr_rw_load_mgr_regs->load_cntr1); 867 868 writel(RW_MGR_IDLE_LOOP1, 869 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 870 871 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS | 872 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 873 } else { 874 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), 875 &sdr_rw_load_mgr_regs->load_cntr0); 876 877 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer), 878 &sdr_rw_load_mgr_regs->load_cntr1); 879 880 writel(RW_MGR_IDLE_LOOP2, 881 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 882 883 writel(RW_MGR_IDLE_LOOP2, 884 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 885 886 /* hack to get around compiler not being smart enough */ 887 if (afi_clocks <= 0x10000) { 888 /* only need to run once */ 889 writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS | 890 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 891 } else { 892 do { 893 writel(RW_MGR_IDLE_LOOP2, 894 SDR_PHYGRP_RWMGRGRP_ADDRESS | 895 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 896 } while (c_loop-- != 0); 897 } 898 } 899 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks); 900 } 901 902 static void rw_mgr_mem_initialize(void) 903 { 904 uint32_t r; 905 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 906 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 907 908 debug("%s:%d\n", __func__, __LINE__); 909 910 /* The reset / cke part of initialization is broadcasted to all ranks */ 911 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS | 912 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); 913 914 /* 915 * Here's how you load register for a loop 916 * Counters are located @ 0x800 917 * Jump address are located @ 0xC00 918 * For both, registers 0 to 3 are selected using bits 3 and 2, like 919 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C 920 * I know this ain't pretty, but Avalon bus throws away the 2 least 921 * significant bits 922 */ 923 924 /* start with memory RESET activated */ 925 926 /* tINIT = 200us */ 927 928 /* 929 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles 930 * If a and b are the number of iteration in 2 nested loops 931 * it takes the following number of cycles to complete the operation: 932 * number_of_cycles = ((2 + n) * a + 2) * b 933 * where n is the number of instruction in the inner loop 934 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF, 935 * b = 6A 936 */ 937 938 /* Load counters */ 939 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL), 940 &sdr_rw_load_mgr_regs->load_cntr0); 941 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL), 942 &sdr_rw_load_mgr_regs->load_cntr1); 943 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL), 944 &sdr_rw_load_mgr_regs->load_cntr2); 945 946 /* Load jump address */ 947 writel(RW_MGR_INIT_RESET_0_CKE_0, 948 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 949 writel(RW_MGR_INIT_RESET_0_CKE_0, 950 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 951 writel(RW_MGR_INIT_RESET_0_CKE_0, 952 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 953 954 /* Execute count instruction */ 955 writel(RW_MGR_INIT_RESET_0_CKE_0, grpaddr); 956 957 /* indicate that memory is stable */ 958 writel(1, &phy_mgr_cfg->reset_mem_stbl); 959 960 /* 961 * transition the RESET to high 962 * Wait for 500us 963 */ 964 965 /* 966 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles 967 * If a and b are the number of iteration in 2 nested loops 968 * it takes the following number of cycles to complete the operation 969 * number_of_cycles = ((2 + n) * a + 2) * b 970 * where n is the number of instruction in the inner loop 971 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83, 972 * b = FF 973 */ 974 975 /* Load counters */ 976 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL), 977 &sdr_rw_load_mgr_regs->load_cntr0); 978 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL), 979 &sdr_rw_load_mgr_regs->load_cntr1); 980 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL), 981 &sdr_rw_load_mgr_regs->load_cntr2); 982 983 /* Load jump address */ 984 writel(RW_MGR_INIT_RESET_1_CKE_0, 985 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 986 writel(RW_MGR_INIT_RESET_1_CKE_0, 987 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 988 writel(RW_MGR_INIT_RESET_1_CKE_0, 989 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 990 991 writel(RW_MGR_INIT_RESET_1_CKE_0, grpaddr); 992 993 /* bring up clock enable */ 994 995 /* tXRP < 250 ck cycles */ 996 delay_for_n_mem_clocks(250); 997 998 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { 999 if (param->skip_ranks[r]) { 1000 /* request to skip the rank */ 1001 continue; 1002 } 1003 1004 /* set rank */ 1005 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 1006 1007 /* 1008 * USER Use Mirror-ed commands for odd ranks if address 1009 * mirrorring is on 1010 */ 1011 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) { 1012 set_jump_as_return(); 1013 writel(RW_MGR_MRS2_MIRR, grpaddr); 1014 delay_for_n_mem_clocks(4); 1015 set_jump_as_return(); 1016 writel(RW_MGR_MRS3_MIRR, grpaddr); 1017 delay_for_n_mem_clocks(4); 1018 set_jump_as_return(); 1019 writel(RW_MGR_MRS1_MIRR, grpaddr); 1020 delay_for_n_mem_clocks(4); 1021 set_jump_as_return(); 1022 writel(RW_MGR_MRS0_DLL_RESET_MIRR, grpaddr); 1023 } else { 1024 set_jump_as_return(); 1025 writel(RW_MGR_MRS2, grpaddr); 1026 delay_for_n_mem_clocks(4); 1027 set_jump_as_return(); 1028 writel(RW_MGR_MRS3, grpaddr); 1029 delay_for_n_mem_clocks(4); 1030 set_jump_as_return(); 1031 writel(RW_MGR_MRS1, grpaddr); 1032 set_jump_as_return(); 1033 writel(RW_MGR_MRS0_DLL_RESET, grpaddr); 1034 } 1035 set_jump_as_return(); 1036 writel(RW_MGR_ZQCL, grpaddr); 1037 1038 /* tZQinit = tDLLK = 512 ck cycles */ 1039 delay_for_n_mem_clocks(512); 1040 } 1041 } 1042 1043 /* 1044 * At the end of calibration we have to program the user settings in, and 1045 * USER hand off the memory to the user. 1046 */ 1047 static void rw_mgr_mem_handoff(void) 1048 { 1049 uint32_t r; 1050 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 1051 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1052 1053 debug("%s:%d\n", __func__, __LINE__); 1054 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { 1055 if (param->skip_ranks[r]) 1056 /* request to skip the rank */ 1057 continue; 1058 /* set rank */ 1059 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 1060 1061 /* precharge all banks ... */ 1062 writel(RW_MGR_PRECHARGE_ALL, grpaddr); 1063 1064 /* load up MR settings specified by user */ 1065 1066 /* 1067 * Use Mirror-ed commands for odd ranks if address 1068 * mirrorring is on 1069 */ 1070 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) { 1071 set_jump_as_return(); 1072 writel(RW_MGR_MRS2_MIRR, grpaddr); 1073 delay_for_n_mem_clocks(4); 1074 set_jump_as_return(); 1075 writel(RW_MGR_MRS3_MIRR, grpaddr); 1076 delay_for_n_mem_clocks(4); 1077 set_jump_as_return(); 1078 writel(RW_MGR_MRS1_MIRR, grpaddr); 1079 delay_for_n_mem_clocks(4); 1080 set_jump_as_return(); 1081 writel(RW_MGR_MRS0_USER_MIRR, grpaddr); 1082 } else { 1083 set_jump_as_return(); 1084 writel(RW_MGR_MRS2, grpaddr); 1085 delay_for_n_mem_clocks(4); 1086 set_jump_as_return(); 1087 writel(RW_MGR_MRS3, grpaddr); 1088 delay_for_n_mem_clocks(4); 1089 set_jump_as_return(); 1090 writel(RW_MGR_MRS1, grpaddr); 1091 delay_for_n_mem_clocks(4); 1092 set_jump_as_return(); 1093 writel(RW_MGR_MRS0_USER, grpaddr); 1094 } 1095 /* 1096 * USER need to wait tMOD (12CK or 15ns) time before issuing 1097 * other commands, but we will have plenty of NIOS cycles before 1098 * actual handoff so its okay. 1099 */ 1100 } 1101 } 1102 1103 /* 1104 * performs a guaranteed read on the patterns we are going to use during a 1105 * read test to ensure memory works 1106 */ 1107 static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn, 1108 uint32_t group, uint32_t num_tries, uint32_t *bit_chk, 1109 uint32_t all_ranks) 1110 { 1111 uint32_t r, vg; 1112 uint32_t correct_mask_vg; 1113 uint32_t tmp_bit_chk; 1114 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 1115 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1116 uint32_t addr; 1117 uint32_t base_rw_mgr; 1118 1119 *bit_chk = param->read_correct_mask; 1120 correct_mask_vg = param->read_correct_mask_vg; 1121 1122 for (r = rank_bgn; r < rank_end; r++) { 1123 if (param->skip_ranks[r]) 1124 /* request to skip the rank */ 1125 continue; 1126 1127 /* set rank */ 1128 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1129 1130 /* Load up a constant bursts of read commands */ 1131 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); 1132 writel(RW_MGR_GUARANTEED_READ, 1133 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1134 1135 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); 1136 writel(RW_MGR_GUARANTEED_READ_CONT, 1137 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1138 1139 tmp_bit_chk = 0; 1140 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) { 1141 /* reset the fifos to get pointers to known state */ 1142 1143 writel(0, &phy_mgr_cmd->fifo_reset); 1144 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1145 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1146 1147 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS 1148 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS); 1149 1150 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1151 writel(RW_MGR_GUARANTEED_READ, addr + 1152 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + 1153 vg) << 2)); 1154 1155 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1156 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr)); 1157 1158 if (vg == 0) 1159 break; 1160 } 1161 *bit_chk &= tmp_bit_chk; 1162 } 1163 1164 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1165 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2)); 1166 1167 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1168 debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\ 1169 %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask, 1170 (long unsigned int)(*bit_chk == param->read_correct_mask)); 1171 return *bit_chk == param->read_correct_mask; 1172 } 1173 1174 static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks 1175 (uint32_t group, uint32_t num_tries, uint32_t *bit_chk) 1176 { 1177 return rw_mgr_mem_calibrate_read_test_patterns(0, group, 1178 num_tries, bit_chk, 1); 1179 } 1180 1181 /* load up the patterns we are going to use during a read test */ 1182 static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn, 1183 uint32_t all_ranks) 1184 { 1185 uint32_t r; 1186 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 1187 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1188 1189 debug("%s:%d\n", __func__, __LINE__); 1190 for (r = rank_bgn; r < rank_end; r++) { 1191 if (param->skip_ranks[r]) 1192 /* request to skip the rank */ 1193 continue; 1194 1195 /* set rank */ 1196 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1197 1198 /* Load up a constant bursts */ 1199 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); 1200 1201 writel(RW_MGR_GUARANTEED_WRITE_WAIT0, 1202 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1203 1204 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); 1205 1206 writel(RW_MGR_GUARANTEED_WRITE_WAIT1, 1207 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1208 1209 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2); 1210 1211 writel(RW_MGR_GUARANTEED_WRITE_WAIT2, 1212 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1213 1214 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3); 1215 1216 writel(RW_MGR_GUARANTEED_WRITE_WAIT3, 1217 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1218 1219 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1220 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 1221 } 1222 1223 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1224 } 1225 1226 /* 1227 * try a read and see if it returns correct data back. has dummy reads 1228 * inserted into the mix used to align dqs enable. has more thorough checks 1229 * than the regular read test. 1230 */ 1231 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group, 1232 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk, 1233 uint32_t all_groups, uint32_t all_ranks) 1234 { 1235 uint32_t r, vg; 1236 uint32_t correct_mask_vg; 1237 uint32_t tmp_bit_chk; 1238 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 1239 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1240 uint32_t addr; 1241 uint32_t base_rw_mgr; 1242 1243 *bit_chk = param->read_correct_mask; 1244 correct_mask_vg = param->read_correct_mask_vg; 1245 1246 uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) & 1247 CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION); 1248 1249 for (r = rank_bgn; r < rank_end; r++) { 1250 if (param->skip_ranks[r]) 1251 /* request to skip the rank */ 1252 continue; 1253 1254 /* set rank */ 1255 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1256 1257 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1); 1258 1259 writel(RW_MGR_READ_B2B_WAIT1, 1260 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1261 1262 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2); 1263 writel(RW_MGR_READ_B2B_WAIT2, 1264 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1265 1266 if (quick_read_mode) 1267 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0); 1268 /* need at least two (1+1) reads to capture failures */ 1269 else if (all_groups) 1270 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0); 1271 else 1272 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0); 1273 1274 writel(RW_MGR_READ_B2B, 1275 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1276 if (all_groups) 1277 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH * 1278 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1, 1279 &sdr_rw_load_mgr_regs->load_cntr3); 1280 else 1281 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3); 1282 1283 writel(RW_MGR_READ_B2B, 1284 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1285 1286 tmp_bit_chk = 0; 1287 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) { 1288 /* reset the fifos to get pointers to known state */ 1289 writel(0, &phy_mgr_cmd->fifo_reset); 1290 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1291 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1292 1293 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS 1294 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS); 1295 1296 if (all_groups) 1297 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET; 1298 else 1299 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1300 1301 writel(RW_MGR_READ_B2B, addr + 1302 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + 1303 vg) << 2)); 1304 1305 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1306 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr)); 1307 1308 if (vg == 0) 1309 break; 1310 } 1311 *bit_chk &= tmp_bit_chk; 1312 } 1313 1314 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1315 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2)); 1316 1317 if (all_correct) { 1318 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1319 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\ 1320 (%u == %u) => %lu", __func__, __LINE__, group, 1321 all_groups, *bit_chk, param->read_correct_mask, 1322 (long unsigned int)(*bit_chk == 1323 param->read_correct_mask)); 1324 return *bit_chk == param->read_correct_mask; 1325 } else { 1326 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1327 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\ 1328 (%u != %lu) => %lu\n", __func__, __LINE__, 1329 group, all_groups, *bit_chk, (long unsigned int)0, 1330 (long unsigned int)(*bit_chk != 0x00)); 1331 return *bit_chk != 0x00; 1332 } 1333 } 1334 1335 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group, 1336 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk, 1337 uint32_t all_groups) 1338 { 1339 return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct, 1340 bit_chk, all_groups, 1); 1341 } 1342 1343 static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v) 1344 { 1345 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy); 1346 (*v)++; 1347 } 1348 1349 static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v) 1350 { 1351 uint32_t i; 1352 1353 for (i = 0; i < VFIFO_SIZE-1; i++) 1354 rw_mgr_incr_vfifo(grp, v); 1355 } 1356 1357 static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk) 1358 { 1359 uint32_t v; 1360 uint32_t fail_cnt = 0; 1361 uint32_t test_status; 1362 1363 for (v = 0; v < VFIFO_SIZE; ) { 1364 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n", 1365 __func__, __LINE__, v); 1366 test_status = rw_mgr_mem_calibrate_read_test_all_ranks 1367 (grp, 1, PASS_ONE_BIT, bit_chk, 0); 1368 if (!test_status) { 1369 fail_cnt++; 1370 1371 if (fail_cnt == 2) 1372 break; 1373 } 1374 1375 /* fiddle with FIFO */ 1376 rw_mgr_incr_vfifo(grp, &v); 1377 } 1378 1379 if (v >= VFIFO_SIZE) { 1380 /* no failing read found!! Something must have gone wrong */ 1381 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n", 1382 __func__, __LINE__); 1383 return 0; 1384 } else { 1385 return v; 1386 } 1387 } 1388 1389 static int find_working_phase(uint32_t *grp, uint32_t *bit_chk, 1390 uint32_t dtaps_per_ptap, uint32_t *work_bgn, 1391 uint32_t *v, uint32_t *d, uint32_t *p, 1392 uint32_t *i, uint32_t *max_working_cnt) 1393 { 1394 uint32_t found_begin = 0; 1395 uint32_t tmp_delay = 0; 1396 uint32_t test_status; 1397 1398 for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay += 1399 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { 1400 *work_bgn = tmp_delay; 1401 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d); 1402 1403 for (*i = 0; *i < VFIFO_SIZE; (*i)++) { 1404 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn += 1405 IO_DELAY_PER_OPA_TAP) { 1406 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p); 1407 1408 test_status = 1409 rw_mgr_mem_calibrate_read_test_all_ranks 1410 (*grp, 1, PASS_ONE_BIT, bit_chk, 0); 1411 1412 if (test_status) { 1413 *max_working_cnt = 1; 1414 found_begin = 1; 1415 break; 1416 } 1417 } 1418 1419 if (found_begin) 1420 break; 1421 1422 if (*p > IO_DQS_EN_PHASE_MAX) 1423 /* fiddle with FIFO */ 1424 rw_mgr_incr_vfifo(*grp, v); 1425 } 1426 1427 if (found_begin) 1428 break; 1429 } 1430 1431 if (*i >= VFIFO_SIZE) { 1432 /* cannot find working solution */ 1433 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\ 1434 ptap/dtap\n", __func__, __LINE__); 1435 return 0; 1436 } else { 1437 return 1; 1438 } 1439 } 1440 1441 static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk, 1442 uint32_t *work_bgn, uint32_t *v, uint32_t *d, 1443 uint32_t *p, uint32_t *max_working_cnt) 1444 { 1445 uint32_t found_begin = 0; 1446 uint32_t tmp_delay; 1447 1448 /* Special case code for backing up a phase */ 1449 if (*p == 0) { 1450 *p = IO_DQS_EN_PHASE_MAX; 1451 rw_mgr_decr_vfifo(*grp, v); 1452 } else { 1453 (*p)--; 1454 } 1455 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP; 1456 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p); 1457 1458 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; 1459 (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { 1460 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d); 1461 1462 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1, 1463 PASS_ONE_BIT, 1464 bit_chk, 0)) { 1465 found_begin = 1; 1466 *work_bgn = tmp_delay; 1467 break; 1468 } 1469 } 1470 1471 /* We have found a working dtap before the ptap found above */ 1472 if (found_begin == 1) 1473 (*max_working_cnt)++; 1474 1475 /* 1476 * Restore VFIFO to old state before we decremented it 1477 * (if needed). 1478 */ 1479 (*p)++; 1480 if (*p > IO_DQS_EN_PHASE_MAX) { 1481 *p = 0; 1482 rw_mgr_incr_vfifo(*grp, v); 1483 } 1484 1485 scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0); 1486 } 1487 1488 static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk, 1489 uint32_t *work_bgn, uint32_t *v, uint32_t *d, 1490 uint32_t *p, uint32_t *i, uint32_t *max_working_cnt, 1491 uint32_t *work_end) 1492 { 1493 uint32_t found_end = 0; 1494 1495 (*p)++; 1496 *work_end += IO_DELAY_PER_OPA_TAP; 1497 if (*p > IO_DQS_EN_PHASE_MAX) { 1498 /* fiddle with FIFO */ 1499 *p = 0; 1500 rw_mgr_incr_vfifo(*grp, v); 1501 } 1502 1503 for (; *i < VFIFO_SIZE + 1; (*i)++) { 1504 for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end 1505 += IO_DELAY_PER_OPA_TAP) { 1506 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p); 1507 1508 if (!rw_mgr_mem_calibrate_read_test_all_ranks 1509 (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) { 1510 found_end = 1; 1511 break; 1512 } else { 1513 (*max_working_cnt)++; 1514 } 1515 } 1516 1517 if (found_end) 1518 break; 1519 1520 if (*p > IO_DQS_EN_PHASE_MAX) { 1521 /* fiddle with FIFO */ 1522 rw_mgr_incr_vfifo(*grp, v); 1523 *p = 0; 1524 } 1525 } 1526 1527 if (*i >= VFIFO_SIZE + 1) { 1528 /* cannot see edge of failing read */ 1529 debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\ 1530 failed\n", __func__, __LINE__); 1531 return 0; 1532 } else { 1533 return 1; 1534 } 1535 } 1536 1537 static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk, 1538 uint32_t *work_bgn, uint32_t *v, uint32_t *d, 1539 uint32_t *p, uint32_t *work_mid, 1540 uint32_t *work_end) 1541 { 1542 int i; 1543 int tmp_delay = 0; 1544 1545 *work_mid = (*work_bgn + *work_end) / 2; 1546 1547 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n", 1548 *work_bgn, *work_end, *work_mid); 1549 /* Get the middle delay to be less than a VFIFO delay */ 1550 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; 1551 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP) 1552 ; 1553 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay); 1554 while (*work_mid > tmp_delay) 1555 *work_mid -= tmp_delay; 1556 debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid); 1557 1558 tmp_delay = 0; 1559 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid; 1560 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP) 1561 ; 1562 tmp_delay -= IO_DELAY_PER_OPA_TAP; 1563 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay); 1564 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++, 1565 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) 1566 ; 1567 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay); 1568 1569 scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1); 1570 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d); 1571 1572 /* 1573 * push vfifo until we can successfully calibrate. We can do this 1574 * because the largest possible margin in 1 VFIFO cycle. 1575 */ 1576 for (i = 0; i < VFIFO_SIZE; i++) { 1577 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n", 1578 *v); 1579 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1, 1580 PASS_ONE_BIT, 1581 bit_chk, 0)) { 1582 break; 1583 } 1584 1585 /* fiddle with FIFO */ 1586 rw_mgr_incr_vfifo(*grp, v); 1587 } 1588 1589 if (i >= VFIFO_SIZE) { 1590 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \ 1591 failed\n", __func__, __LINE__); 1592 return 0; 1593 } else { 1594 return 1; 1595 } 1596 } 1597 1598 /* find a good dqs enable to use */ 1599 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp) 1600 { 1601 uint32_t v, d, p, i; 1602 uint32_t max_working_cnt; 1603 uint32_t bit_chk; 1604 uint32_t dtaps_per_ptap; 1605 uint32_t work_bgn, work_mid, work_end; 1606 uint32_t found_passing_read, found_failing_read, initial_failing_dtap; 1607 1608 debug("%s:%d %u\n", __func__, __LINE__, grp); 1609 1610 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); 1611 1612 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); 1613 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); 1614 1615 /* ************************************************************** */ 1616 /* * Step 0 : Determine number of delay taps for each phase tap * */ 1617 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1618 1619 /* ********************************************************* */ 1620 /* * Step 1 : First push vfifo until we get a failing read * */ 1621 v = find_vfifo_read(grp, &bit_chk); 1622 1623 max_working_cnt = 0; 1624 1625 /* ******************************************************** */ 1626 /* * step 2: find first working phase, increment in ptaps * */ 1627 work_bgn = 0; 1628 if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d, 1629 &p, &i, &max_working_cnt) == 0) 1630 return 0; 1631 1632 work_end = work_bgn; 1633 1634 /* 1635 * If d is 0 then the working window covers a phase tap and 1636 * we can follow the old procedure otherwise, we've found the beginning, 1637 * and we need to increment the dtaps until we find the end. 1638 */ 1639 if (d == 0) { 1640 /* ********************************************************* */ 1641 /* * step 3a: if we have room, back off by one and 1642 increment in dtaps * */ 1643 1644 sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p, 1645 &max_working_cnt); 1646 1647 /* ********************************************************* */ 1648 /* * step 4a: go forward from working phase to non working 1649 phase, increment in ptaps * */ 1650 if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p, 1651 &i, &max_working_cnt, &work_end) == 0) 1652 return 0; 1653 1654 /* ********************************************************* */ 1655 /* * step 5a: back off one from last, increment in dtaps * */ 1656 1657 /* Special case code for backing up a phase */ 1658 if (p == 0) { 1659 p = IO_DQS_EN_PHASE_MAX; 1660 rw_mgr_decr_vfifo(grp, &v); 1661 } else { 1662 p = p - 1; 1663 } 1664 1665 work_end -= IO_DELAY_PER_OPA_TAP; 1666 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1667 1668 /* * The actual increment of dtaps is done outside of 1669 the if/else loop to share code */ 1670 d = 0; 1671 1672 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \ 1673 vfifo=%u ptap=%u\n", __func__, __LINE__, 1674 v, p); 1675 } else { 1676 /* ******************************************************* */ 1677 /* * step 3-5b: Find the right edge of the window using 1678 delay taps * */ 1679 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \ 1680 ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__, 1681 v, p, d, work_bgn); 1682 1683 work_end = work_bgn; 1684 1685 /* * The actual increment of dtaps is done outside of the 1686 if/else loop to share code */ 1687 1688 /* Only here to counterbalance a subtract later on which is 1689 not needed if this branch of the algorithm is taken */ 1690 max_working_cnt++; 1691 } 1692 1693 /* The dtap increment to find the failing edge is done here */ 1694 for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end += 1695 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { 1696 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \ 1697 end-2: dtap=%u\n", __func__, __LINE__, d); 1698 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1699 1700 if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1701 PASS_ONE_BIT, 1702 &bit_chk, 0)) { 1703 break; 1704 } 1705 } 1706 1707 /* Go back to working dtap */ 1708 if (d != 0) 1709 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1710 1711 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \ 1712 ptap=%u dtap=%u end=%u\n", __func__, __LINE__, 1713 v, p, d-1, work_end); 1714 1715 if (work_end < work_bgn) { 1716 /* nil range */ 1717 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \ 1718 failed\n", __func__, __LINE__); 1719 return 0; 1720 } 1721 1722 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n", 1723 __func__, __LINE__, work_bgn, work_end); 1724 1725 /* *************************************************************** */ 1726 /* 1727 * * We need to calculate the number of dtaps that equal a ptap 1728 * * To do that we'll back up a ptap and re-find the edge of the 1729 * * window using dtaps 1730 */ 1731 1732 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \ 1733 for tracking\n", __func__, __LINE__); 1734 1735 /* Special case code for backing up a phase */ 1736 if (p == 0) { 1737 p = IO_DQS_EN_PHASE_MAX; 1738 rw_mgr_decr_vfifo(grp, &v); 1739 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \ 1740 cycle/phase: v=%u p=%u\n", __func__, __LINE__, 1741 v, p); 1742 } else { 1743 p = p - 1; 1744 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \ 1745 phase only: v=%u p=%u", __func__, __LINE__, 1746 v, p); 1747 } 1748 1749 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1750 1751 /* 1752 * Increase dtap until we first see a passing read (in case the 1753 * window is smaller than a ptap), 1754 * and then a failing read to mark the edge of the window again 1755 */ 1756 1757 /* Find a passing read */ 1758 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n", 1759 __func__, __LINE__); 1760 found_passing_read = 0; 1761 found_failing_read = 0; 1762 initial_failing_dtap = d; 1763 for (; d <= IO_DQS_EN_DELAY_MAX; d++) { 1764 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \ 1765 read d=%u\n", __func__, __LINE__, d); 1766 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1767 1768 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1769 PASS_ONE_BIT, 1770 &bit_chk, 0)) { 1771 found_passing_read = 1; 1772 break; 1773 } 1774 } 1775 1776 if (found_passing_read) { 1777 /* Find a failing read */ 1778 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \ 1779 read\n", __func__, __LINE__); 1780 for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) { 1781 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \ 1782 testing read d=%u\n", __func__, __LINE__, d); 1783 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1784 1785 if (!rw_mgr_mem_calibrate_read_test_all_ranks 1786 (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { 1787 found_failing_read = 1; 1788 break; 1789 } 1790 } 1791 } else { 1792 debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \ 1793 calculate dtaps", __func__, __LINE__); 1794 debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n"); 1795 } 1796 1797 /* 1798 * The dynamically calculated dtaps_per_ptap is only valid if we 1799 * found a passing/failing read. If we didn't, it means d hit the max 1800 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its 1801 * statically calculated value. 1802 */ 1803 if (found_passing_read && found_failing_read) 1804 dtaps_per_ptap = d - initial_failing_dtap; 1805 1806 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap); 1807 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \ 1808 - %u = %u", __func__, __LINE__, d, 1809 initial_failing_dtap, dtaps_per_ptap); 1810 1811 /* ******************************************** */ 1812 /* * step 6: Find the centre of the window * */ 1813 if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p, 1814 &work_mid, &work_end) == 0) 1815 return 0; 1816 1817 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \ 1818 vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__, 1819 v, p-1, d); 1820 return 1; 1821 } 1822 1823 /* 1824 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different 1825 * dq_in_delay values 1826 */ 1827 static uint32_t 1828 rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay 1829 (uint32_t write_group, uint32_t read_group, uint32_t test_bgn) 1830 { 1831 uint32_t found; 1832 uint32_t i; 1833 uint32_t p; 1834 uint32_t d; 1835 uint32_t r; 1836 1837 const uint32_t delay_step = IO_IO_IN_DELAY_MAX / 1838 (RW_MGR_MEM_DQ_PER_READ_DQS-1); 1839 /* we start at zero, so have one less dq to devide among */ 1840 1841 debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group, 1842 test_bgn); 1843 1844 /* try different dq_in_delays since the dq path is shorter than dqs */ 1845 1846 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 1847 r += NUM_RANKS_PER_SHADOW_REG) { 1848 for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; 1849 i++, p++, d += delay_step) { 1850 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\ 1851 vfifo_find_dqs_", __func__, __LINE__); 1852 debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ", 1853 write_group, read_group); 1854 debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d); 1855 scc_mgr_set_dq_in_delay(write_group, p, d); 1856 scc_mgr_load_dq(p); 1857 } 1858 writel(0, &sdr_scc_mgr->update); 1859 } 1860 1861 found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group); 1862 1863 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\ 1864 en_phase_sweep_dq", __func__, __LINE__); 1865 debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \ 1866 chain to zero\n", write_group, read_group, found); 1867 1868 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 1869 r += NUM_RANKS_PER_SHADOW_REG) { 1870 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; 1871 i++, p++) { 1872 scc_mgr_set_dq_in_delay(write_group, p, 0); 1873 scc_mgr_load_dq(p); 1874 } 1875 writel(0, &sdr_scc_mgr->update); 1876 } 1877 1878 return found; 1879 } 1880 1881 /* per-bit deskew DQ and center */ 1882 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn, 1883 uint32_t write_group, uint32_t read_group, uint32_t test_bgn, 1884 uint32_t use_read_test, uint32_t update_fom) 1885 { 1886 uint32_t i, p, d, min_index; 1887 /* 1888 * Store these as signed since there are comparisons with 1889 * signed numbers. 1890 */ 1891 uint32_t bit_chk; 1892 uint32_t sticky_bit_chk; 1893 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; 1894 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; 1895 int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS]; 1896 int32_t mid; 1897 int32_t orig_mid_min, mid_min; 1898 int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs, 1899 final_dqs_en; 1900 int32_t dq_margin, dqs_margin; 1901 uint32_t stop; 1902 uint32_t temp_dq_in_delay1, temp_dq_in_delay2; 1903 uint32_t addr; 1904 1905 debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn); 1906 1907 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET; 1908 start_dqs = readl(addr + (read_group << 2)); 1909 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) 1910 start_dqs_en = readl(addr + ((read_group << 2) 1911 - IO_DQS_EN_DELAY_OFFSET)); 1912 1913 /* set the left and right edge of each bit to an illegal value */ 1914 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */ 1915 sticky_bit_chk = 0; 1916 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 1917 left_edge[i] = IO_IO_IN_DELAY_MAX + 1; 1918 right_edge[i] = IO_IO_IN_DELAY_MAX + 1; 1919 } 1920 1921 /* Search for the left edge of the window for each bit */ 1922 for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) { 1923 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d); 1924 1925 writel(0, &sdr_scc_mgr->update); 1926 1927 /* 1928 * Stop searching when the read test doesn't pass AND when 1929 * we've seen a passing read on every bit. 1930 */ 1931 if (use_read_test) { 1932 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn, 1933 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT, 1934 &bit_chk, 0, 0); 1935 } else { 1936 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1937 0, PASS_ONE_BIT, 1938 &bit_chk, 0); 1939 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS * 1940 (read_group - (write_group * 1941 RW_MGR_MEM_IF_READ_DQS_WIDTH / 1942 RW_MGR_MEM_IF_WRITE_DQS_WIDTH))); 1943 stop = (bit_chk == 0); 1944 } 1945 sticky_bit_chk = sticky_bit_chk | bit_chk; 1946 stop = stop && (sticky_bit_chk == param->read_correct_mask); 1947 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \ 1948 && %u", __func__, __LINE__, d, 1949 sticky_bit_chk, 1950 param->read_correct_mask, stop); 1951 1952 if (stop == 1) { 1953 break; 1954 } else { 1955 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 1956 if (bit_chk & 1) { 1957 /* Remember a passing test as the 1958 left_edge */ 1959 left_edge[i] = d; 1960 } else { 1961 /* If a left edge has not been seen yet, 1962 then a future passing test will mark 1963 this edge as the right edge */ 1964 if (left_edge[i] == 1965 IO_IO_IN_DELAY_MAX + 1) { 1966 right_edge[i] = -(d + 1); 1967 } 1968 } 1969 bit_chk = bit_chk >> 1; 1970 } 1971 } 1972 } 1973 1974 /* Reset DQ delay chains to 0 */ 1975 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, 0); 1976 sticky_bit_chk = 0; 1977 for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) { 1978 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \ 1979 %d right_edge[%u]: %d\n", __func__, __LINE__, 1980 i, left_edge[i], i, right_edge[i]); 1981 1982 /* 1983 * Check for cases where we haven't found the left edge, 1984 * which makes our assignment of the the right edge invalid. 1985 * Reset it to the illegal value. 1986 */ 1987 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && ( 1988 right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) { 1989 right_edge[i] = IO_IO_IN_DELAY_MAX + 1; 1990 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \ 1991 right_edge[%u]: %d\n", __func__, __LINE__, 1992 i, right_edge[i]); 1993 } 1994 1995 /* 1996 * Reset sticky bit (except for bits where we have seen 1997 * both the left and right edge). 1998 */ 1999 sticky_bit_chk = sticky_bit_chk << 1; 2000 if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) && 2001 (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) { 2002 sticky_bit_chk = sticky_bit_chk | 1; 2003 } 2004 2005 if (i == 0) 2006 break; 2007 } 2008 2009 /* Search for the right edge of the window for each bit */ 2010 for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) { 2011 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs); 2012 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2013 uint32_t delay = d + start_dqs_en; 2014 if (delay > IO_DQS_EN_DELAY_MAX) 2015 delay = IO_DQS_EN_DELAY_MAX; 2016 scc_mgr_set_dqs_en_delay(read_group, delay); 2017 } 2018 scc_mgr_load_dqs(read_group); 2019 2020 writel(0, &sdr_scc_mgr->update); 2021 2022 /* 2023 * Stop searching when the read test doesn't pass AND when 2024 * we've seen a passing read on every bit. 2025 */ 2026 if (use_read_test) { 2027 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn, 2028 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT, 2029 &bit_chk, 0, 0); 2030 } else { 2031 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 2032 0, PASS_ONE_BIT, 2033 &bit_chk, 0); 2034 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS * 2035 (read_group - (write_group * 2036 RW_MGR_MEM_IF_READ_DQS_WIDTH / 2037 RW_MGR_MEM_IF_WRITE_DQS_WIDTH))); 2038 stop = (bit_chk == 0); 2039 } 2040 sticky_bit_chk = sticky_bit_chk | bit_chk; 2041 stop = stop && (sticky_bit_chk == param->read_correct_mask); 2042 2043 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \ 2044 %u && %u", __func__, __LINE__, d, 2045 sticky_bit_chk, param->read_correct_mask, stop); 2046 2047 if (stop == 1) { 2048 break; 2049 } else { 2050 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 2051 if (bit_chk & 1) { 2052 /* Remember a passing test as 2053 the right_edge */ 2054 right_edge[i] = d; 2055 } else { 2056 if (d != 0) { 2057 /* If a right edge has not been 2058 seen yet, then a future passing 2059 test will mark this edge as the 2060 left edge */ 2061 if (right_edge[i] == 2062 IO_IO_IN_DELAY_MAX + 1) { 2063 left_edge[i] = -(d + 1); 2064 } 2065 } else { 2066 /* d = 0 failed, but it passed 2067 when testing the left edge, 2068 so it must be marginal, 2069 set it to -1 */ 2070 if (right_edge[i] == 2071 IO_IO_IN_DELAY_MAX + 1 && 2072 left_edge[i] != 2073 IO_IO_IN_DELAY_MAX 2074 + 1) { 2075 right_edge[i] = -1; 2076 } 2077 /* If a right edge has not been 2078 seen yet, then a future passing 2079 test will mark this edge as the 2080 left edge */ 2081 else if (right_edge[i] == 2082 IO_IO_IN_DELAY_MAX + 2083 1) { 2084 left_edge[i] = -(d + 1); 2085 } 2086 } 2087 } 2088 2089 debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\ 2090 d=%u]: ", __func__, __LINE__, d); 2091 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ", 2092 (int)(bit_chk & 1), i, left_edge[i]); 2093 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i, 2094 right_edge[i]); 2095 bit_chk = bit_chk >> 1; 2096 } 2097 } 2098 } 2099 2100 /* Check that all bits have a window */ 2101 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 2102 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \ 2103 %d right_edge[%u]: %d", __func__, __LINE__, 2104 i, left_edge[i], i, right_edge[i]); 2105 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i] 2106 == IO_IO_IN_DELAY_MAX + 1)) { 2107 /* 2108 * Restore delay chain settings before letting the loop 2109 * in rw_mgr_mem_calibrate_vfifo to retry different 2110 * dqs/ck relationships. 2111 */ 2112 scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs); 2113 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2114 scc_mgr_set_dqs_en_delay(read_group, 2115 start_dqs_en); 2116 } 2117 scc_mgr_load_dqs(read_group); 2118 writel(0, &sdr_scc_mgr->update); 2119 2120 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \ 2121 find edge [%u]: %d %d", __func__, __LINE__, 2122 i, left_edge[i], right_edge[i]); 2123 if (use_read_test) { 2124 set_failing_group_stage(read_group * 2125 RW_MGR_MEM_DQ_PER_READ_DQS + i, 2126 CAL_STAGE_VFIFO, 2127 CAL_SUBSTAGE_VFIFO_CENTER); 2128 } else { 2129 set_failing_group_stage(read_group * 2130 RW_MGR_MEM_DQ_PER_READ_DQS + i, 2131 CAL_STAGE_VFIFO_AFTER_WRITES, 2132 CAL_SUBSTAGE_VFIFO_CENTER); 2133 } 2134 return 0; 2135 } 2136 } 2137 2138 /* Find middle of window for each DQ bit */ 2139 mid_min = left_edge[0] - right_edge[0]; 2140 min_index = 0; 2141 for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 2142 mid = left_edge[i] - right_edge[i]; 2143 if (mid < mid_min) { 2144 mid_min = mid; 2145 min_index = i; 2146 } 2147 } 2148 2149 /* 2150 * -mid_min/2 represents the amount that we need to move DQS. 2151 * If mid_min is odd and positive we'll need to add one to 2152 * make sure the rounding in further calculations is correct 2153 * (always bias to the right), so just add 1 for all positive values. 2154 */ 2155 if (mid_min > 0) 2156 mid_min++; 2157 2158 mid_min = mid_min / 2; 2159 2160 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n", 2161 __func__, __LINE__, mid_min, min_index); 2162 2163 /* Determine the amount we can change DQS (which is -mid_min) */ 2164 orig_mid_min = mid_min; 2165 new_dqs = start_dqs - mid_min; 2166 if (new_dqs > IO_DQS_IN_DELAY_MAX) 2167 new_dqs = IO_DQS_IN_DELAY_MAX; 2168 else if (new_dqs < 0) 2169 new_dqs = 0; 2170 2171 mid_min = start_dqs - new_dqs; 2172 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n", 2173 mid_min, new_dqs); 2174 2175 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2176 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX) 2177 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX; 2178 else if (start_dqs_en - mid_min < 0) 2179 mid_min += start_dqs_en - mid_min; 2180 } 2181 new_dqs = start_dqs - mid_min; 2182 2183 debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \ 2184 new_dqs=%d mid_min=%d\n", start_dqs, 2185 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1, 2186 new_dqs, mid_min); 2187 2188 /* Initialize data for export structures */ 2189 dqs_margin = IO_IO_IN_DELAY_MAX + 1; 2190 dq_margin = IO_IO_IN_DELAY_MAX + 1; 2191 2192 /* add delay to bring centre of all DQ windows to the same "level" */ 2193 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { 2194 /* Use values before divide by 2 to reduce round off error */ 2195 shift_dq = (left_edge[i] - right_edge[i] - 2196 (left_edge[min_index] - right_edge[min_index]))/2 + 2197 (orig_mid_min - mid_min); 2198 2199 debug_cond(DLEVEL == 2, "vfifo_center: before: \ 2200 shift_dq[%u]=%d\n", i, shift_dq); 2201 2202 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET; 2203 temp_dq_in_delay1 = readl(addr + (p << 2)); 2204 temp_dq_in_delay2 = readl(addr + (i << 2)); 2205 2206 if (shift_dq + (int32_t)temp_dq_in_delay1 > 2207 (int32_t)IO_IO_IN_DELAY_MAX) { 2208 shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2; 2209 } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) { 2210 shift_dq = -(int32_t)temp_dq_in_delay1; 2211 } 2212 debug_cond(DLEVEL == 2, "vfifo_center: after: \ 2213 shift_dq[%u]=%d\n", i, shift_dq); 2214 final_dq[i] = temp_dq_in_delay1 + shift_dq; 2215 scc_mgr_set_dq_in_delay(write_group, p, final_dq[i]); 2216 scc_mgr_load_dq(p); 2217 2218 debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i, 2219 left_edge[i] - shift_dq + (-mid_min), 2220 right_edge[i] + shift_dq - (-mid_min)); 2221 /* To determine values for export structures */ 2222 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin) 2223 dq_margin = left_edge[i] - shift_dq + (-mid_min); 2224 2225 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin) 2226 dqs_margin = right_edge[i] + shift_dq - (-mid_min); 2227 } 2228 2229 final_dqs = new_dqs; 2230 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) 2231 final_dqs_en = start_dqs_en - mid_min; 2232 2233 /* Move DQS-en */ 2234 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2235 scc_mgr_set_dqs_en_delay(read_group, final_dqs_en); 2236 scc_mgr_load_dqs(read_group); 2237 } 2238 2239 /* Move DQS */ 2240 scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs); 2241 scc_mgr_load_dqs(read_group); 2242 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \ 2243 dqs_margin=%d", __func__, __LINE__, 2244 dq_margin, dqs_margin); 2245 2246 /* 2247 * Do not remove this line as it makes sure all of our decisions 2248 * have been applied. Apply the update bit. 2249 */ 2250 writel(0, &sdr_scc_mgr->update); 2251 2252 return (dq_margin >= 0) && (dqs_margin >= 0); 2253 } 2254 2255 /* 2256 * calibrate the read valid prediction FIFO. 2257 * 2258 * - read valid prediction will consist of finding a good DQS enable phase, 2259 * DQS enable delay, DQS input phase, and DQS input delay. 2260 * - we also do a per-bit deskew on the DQ lines. 2261 */ 2262 static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group, 2263 uint32_t test_bgn) 2264 { 2265 uint32_t p, d, rank_bgn, sr; 2266 uint32_t dtaps_per_ptap; 2267 uint32_t tmp_delay; 2268 uint32_t bit_chk; 2269 uint32_t grp_calibrated; 2270 uint32_t write_group, write_test_bgn; 2271 uint32_t failed_substage; 2272 2273 debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn); 2274 2275 /* update info for sims */ 2276 reg_file_set_stage(CAL_STAGE_VFIFO); 2277 2278 write_group = read_group; 2279 write_test_bgn = test_bgn; 2280 2281 /* USER Determine number of delay taps for each phase tap */ 2282 dtaps_per_ptap = 0; 2283 tmp_delay = 0; 2284 while (tmp_delay < IO_DELAY_PER_OPA_TAP) { 2285 dtaps_per_ptap++; 2286 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 2287 } 2288 dtaps_per_ptap--; 2289 tmp_delay = 0; 2290 2291 /* update info for sims */ 2292 reg_file_set_group(read_group); 2293 2294 grp_calibrated = 0; 2295 2296 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ); 2297 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; 2298 2299 for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) { 2300 /* 2301 * In RLDRAMX we may be messing the delay of pins in 2302 * the same write group but outside of the current read 2303 * the group, but that's ok because we haven't 2304 * calibrated output side yet. 2305 */ 2306 if (d > 0) { 2307 scc_mgr_apply_group_all_out_delay_add_all_ranks 2308 (write_group, write_test_bgn, d); 2309 } 2310 2311 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0; 2312 p++) { 2313 /* set a particular dqdqs phase */ 2314 scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p); 2315 2316 debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \ 2317 p=%u d=%u\n", __func__, __LINE__, 2318 read_group, p, d); 2319 2320 /* 2321 * Load up the patterns used by read calibration 2322 * using current DQDQS phase. 2323 */ 2324 rw_mgr_mem_calibrate_read_load_patterns(0, 1); 2325 if (!(gbl->phy_debug_mode_flags & 2326 PHY_DEBUG_DISABLE_GUARANTEED_READ)) { 2327 if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks 2328 (read_group, 1, &bit_chk)) { 2329 debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:", 2330 __func__, __LINE__); 2331 debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n", 2332 read_group, p, d); 2333 break; 2334 } 2335 } 2336 2337 /* case:56390 */ 2338 grp_calibrated = 1; 2339 if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay 2340 (write_group, read_group, test_bgn)) { 2341 /* 2342 * USER Read per-bit deskew can be done on a 2343 * per shadow register basis. 2344 */ 2345 for (rank_bgn = 0, sr = 0; 2346 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; 2347 rank_bgn += NUM_RANKS_PER_SHADOW_REG, 2348 ++sr) { 2349 /* 2350 * Determine if this set of ranks 2351 * should be skipped entirely. 2352 */ 2353 if (!param->skip_shadow_regs[sr]) { 2354 /* 2355 * If doing read after write 2356 * calibration, do not update 2357 * FOM, now - do it then. 2358 */ 2359 if (!rw_mgr_mem_calibrate_vfifo_center 2360 (rank_bgn, write_group, 2361 read_group, test_bgn, 1, 0)) { 2362 grp_calibrated = 0; 2363 failed_substage = 2364 CAL_SUBSTAGE_VFIFO_CENTER; 2365 } 2366 } 2367 } 2368 } else { 2369 grp_calibrated = 0; 2370 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; 2371 } 2372 } 2373 } 2374 2375 if (grp_calibrated == 0) { 2376 set_failing_group_stage(write_group, CAL_STAGE_VFIFO, 2377 failed_substage); 2378 return 0; 2379 } 2380 2381 /* 2382 * Reset the delay chains back to zero if they have moved > 1 2383 * (check for > 1 because loop will increase d even when pass in 2384 * first case). 2385 */ 2386 if (d > 2) 2387 scc_mgr_zero_group(write_group, write_test_bgn, 1); 2388 2389 return 1; 2390 } 2391 2392 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */ 2393 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group, 2394 uint32_t test_bgn) 2395 { 2396 uint32_t rank_bgn, sr; 2397 uint32_t grp_calibrated; 2398 uint32_t write_group; 2399 2400 debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn); 2401 2402 /* update info for sims */ 2403 2404 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES); 2405 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); 2406 2407 write_group = read_group; 2408 2409 /* update info for sims */ 2410 reg_file_set_group(read_group); 2411 2412 grp_calibrated = 1; 2413 /* Read per-bit deskew can be done on a per shadow register basis */ 2414 for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; 2415 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { 2416 /* Determine if this set of ranks should be skipped entirely */ 2417 if (!param->skip_shadow_regs[sr]) { 2418 /* This is the last calibration round, update FOM here */ 2419 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn, 2420 write_group, 2421 read_group, 2422 test_bgn, 0, 2423 1)) { 2424 grp_calibrated = 0; 2425 } 2426 } 2427 } 2428 2429 2430 if (grp_calibrated == 0) { 2431 set_failing_group_stage(write_group, 2432 CAL_STAGE_VFIFO_AFTER_WRITES, 2433 CAL_SUBSTAGE_VFIFO_CENTER); 2434 return 0; 2435 } 2436 2437 return 1; 2438 } 2439 2440 /* Calibrate LFIFO to find smallest read latency */ 2441 static uint32_t rw_mgr_mem_calibrate_lfifo(void) 2442 { 2443 uint32_t found_one; 2444 uint32_t bit_chk; 2445 2446 debug("%s:%d\n", __func__, __LINE__); 2447 2448 /* update info for sims */ 2449 reg_file_set_stage(CAL_STAGE_LFIFO); 2450 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY); 2451 2452 /* Load up the patterns used by read calibration for all ranks */ 2453 rw_mgr_mem_calibrate_read_load_patterns(0, 1); 2454 found_one = 0; 2455 2456 do { 2457 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 2458 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u", 2459 __func__, __LINE__, gbl->curr_read_lat); 2460 2461 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, 2462 NUM_READ_TESTS, 2463 PASS_ALL_BITS, 2464 &bit_chk, 1)) { 2465 break; 2466 } 2467 2468 found_one = 1; 2469 /* reduce read latency and see if things are working */ 2470 /* correctly */ 2471 gbl->curr_read_lat--; 2472 } while (gbl->curr_read_lat > 0); 2473 2474 /* reset the fifos to get pointers to known state */ 2475 2476 writel(0, &phy_mgr_cmd->fifo_reset); 2477 2478 if (found_one) { 2479 /* add a fudge factor to the read latency that was determined */ 2480 gbl->curr_read_lat += 2; 2481 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 2482 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \ 2483 read_lat=%u\n", __func__, __LINE__, 2484 gbl->curr_read_lat); 2485 return 1; 2486 } else { 2487 set_failing_group_stage(0xff, CAL_STAGE_LFIFO, 2488 CAL_SUBSTAGE_READ_LATENCY); 2489 2490 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \ 2491 read_lat=%u\n", __func__, __LINE__, 2492 gbl->curr_read_lat); 2493 return 0; 2494 } 2495 } 2496 2497 /* 2498 * issue write test command. 2499 * two variants are provided. one that just tests a write pattern and 2500 * another that tests datamask functionality. 2501 */ 2502 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group, 2503 uint32_t test_dm) 2504 { 2505 uint32_t mcc_instruction; 2506 uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) && 2507 ENABLE_SUPER_QUICK_CALIBRATION); 2508 uint32_t rw_wl_nop_cycles; 2509 uint32_t addr; 2510 2511 /* 2512 * Set counter and jump addresses for the right 2513 * number of NOP cycles. 2514 * The number of supported NOP cycles can range from -1 to infinity 2515 * Three different cases are handled: 2516 * 2517 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping 2518 * mechanism will be used to insert the right number of NOPs 2519 * 2520 * 2. For a number of NOP cycles equals to 0, the micro-instruction 2521 * issuing the write command will jump straight to the 2522 * micro-instruction that turns on DQS (for DDRx), or outputs write 2523 * data (for RLD), skipping 2524 * the NOP micro-instruction all together 2525 * 2526 * 3. A number of NOP cycles equal to -1 indicates that DQS must be 2527 * turned on in the same micro-instruction that issues the write 2528 * command. Then we need 2529 * to directly jump to the micro-instruction that sends out the data 2530 * 2531 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters 2532 * (2 and 3). One jump-counter (0) is used to perform multiple 2533 * write-read operations. 2534 * one counter left to issue this command in "multiple-group" mode 2535 */ 2536 2537 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles; 2538 2539 if (rw_wl_nop_cycles == -1) { 2540 /* 2541 * CNTR 2 - We want to execute the special write operation that 2542 * turns on DQS right away and then skip directly to the 2543 * instruction that sends out the data. We set the counter to a 2544 * large number so that the jump is always taken. 2545 */ 2546 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); 2547 2548 /* CNTR 3 - Not used */ 2549 if (test_dm) { 2550 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1; 2551 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA, 2552 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2553 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, 2554 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2555 } else { 2556 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1; 2557 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA, 2558 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2559 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP, 2560 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2561 } 2562 } else if (rw_wl_nop_cycles == 0) { 2563 /* 2564 * CNTR 2 - We want to skip the NOP operation and go straight 2565 * to the DQS enable instruction. We set the counter to a large 2566 * number so that the jump is always taken. 2567 */ 2568 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); 2569 2570 /* CNTR 3 - Not used */ 2571 if (test_dm) { 2572 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0; 2573 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS, 2574 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2575 } else { 2576 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0; 2577 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS, 2578 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2579 } 2580 } else { 2581 /* 2582 * CNTR 2 - In this case we want to execute the next instruction 2583 * and NOT take the jump. So we set the counter to 0. The jump 2584 * address doesn't count. 2585 */ 2586 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2); 2587 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2588 2589 /* 2590 * CNTR 3 - Set the nop counter to the number of cycles we 2591 * need to loop for, minus 1. 2592 */ 2593 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3); 2594 if (test_dm) { 2595 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0; 2596 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, 2597 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2598 } else { 2599 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0; 2600 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP, 2601 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2602 } 2603 } 2604 2605 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 2606 RW_MGR_RESET_READ_DATAPATH_OFFSET); 2607 2608 if (quick_write_mode) 2609 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0); 2610 else 2611 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0); 2612 2613 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 2614 2615 /* 2616 * CNTR 1 - This is used to ensure enough time elapses 2617 * for read data to come back. 2618 */ 2619 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1); 2620 2621 if (test_dm) { 2622 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT, 2623 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 2624 } else { 2625 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT, 2626 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 2627 } 2628 2629 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 2630 writel(mcc_instruction, addr + (group << 2)); 2631 } 2632 2633 /* Test writes, can check for a single bit pass or multiple bit pass */ 2634 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, 2635 uint32_t write_group, uint32_t use_dm, uint32_t all_correct, 2636 uint32_t *bit_chk, uint32_t all_ranks) 2637 { 2638 uint32_t r; 2639 uint32_t correct_mask_vg; 2640 uint32_t tmp_bit_chk; 2641 uint32_t vg; 2642 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 2643 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 2644 uint32_t addr_rw_mgr; 2645 uint32_t base_rw_mgr; 2646 2647 *bit_chk = param->write_correct_mask; 2648 correct_mask_vg = param->write_correct_mask_vg; 2649 2650 for (r = rank_bgn; r < rank_end; r++) { 2651 if (param->skip_ranks[r]) { 2652 /* request to skip the rank */ 2653 continue; 2654 } 2655 2656 /* set rank */ 2657 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 2658 2659 tmp_bit_chk = 0; 2660 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS; 2661 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) { 2662 /* reset the fifos to get pointers to known state */ 2663 writel(0, &phy_mgr_cmd->fifo_reset); 2664 2665 tmp_bit_chk = tmp_bit_chk << 2666 (RW_MGR_MEM_DQ_PER_WRITE_DQS / 2667 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS); 2668 rw_mgr_mem_calibrate_write_test_issue(write_group * 2669 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg, 2670 use_dm); 2671 2672 base_rw_mgr = readl(addr_rw_mgr); 2673 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr)); 2674 if (vg == 0) 2675 break; 2676 } 2677 *bit_chk &= tmp_bit_chk; 2678 } 2679 2680 if (all_correct) { 2681 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 2682 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \ 2683 %u => %lu", write_group, use_dm, 2684 *bit_chk, param->write_correct_mask, 2685 (long unsigned int)(*bit_chk == 2686 param->write_correct_mask)); 2687 return *bit_chk == param->write_correct_mask; 2688 } else { 2689 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 2690 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ", 2691 write_group, use_dm, *bit_chk); 2692 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0, 2693 (long unsigned int)(*bit_chk != 0)); 2694 return *bit_chk != 0x00; 2695 } 2696 } 2697 2698 /* 2699 * center all windows. do per-bit-deskew to possibly increase size of 2700 * certain windows. 2701 */ 2702 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn, 2703 uint32_t write_group, uint32_t test_bgn) 2704 { 2705 uint32_t i, p, min_index; 2706 int32_t d; 2707 /* 2708 * Store these as signed since there are comparisons with 2709 * signed numbers. 2710 */ 2711 uint32_t bit_chk; 2712 uint32_t sticky_bit_chk; 2713 int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; 2714 int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; 2715 int32_t mid; 2716 int32_t mid_min, orig_mid_min; 2717 int32_t new_dqs, start_dqs, shift_dq; 2718 int32_t dq_margin, dqs_margin, dm_margin; 2719 uint32_t stop; 2720 uint32_t temp_dq_out1_delay; 2721 uint32_t addr; 2722 2723 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn); 2724 2725 dm_margin = 0; 2726 2727 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET; 2728 start_dqs = readl(addr + 2729 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2)); 2730 2731 /* per-bit deskew */ 2732 2733 /* 2734 * set the left and right edge of each bit to an illegal value 2735 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value. 2736 */ 2737 sticky_bit_chk = 0; 2738 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2739 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2740 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2741 } 2742 2743 /* Search for the left edge of the window for each bit */ 2744 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) { 2745 scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, d); 2746 2747 writel(0, &sdr_scc_mgr->update); 2748 2749 /* 2750 * Stop searching when the read test doesn't pass AND when 2751 * we've seen a passing read on every bit. 2752 */ 2753 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 2754 0, PASS_ONE_BIT, &bit_chk, 0); 2755 sticky_bit_chk = sticky_bit_chk | bit_chk; 2756 stop = stop && (sticky_bit_chk == param->write_correct_mask); 2757 debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \ 2758 == %u && %u [bit_chk= %u ]\n", 2759 d, sticky_bit_chk, param->write_correct_mask, 2760 stop, bit_chk); 2761 2762 if (stop == 1) { 2763 break; 2764 } else { 2765 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2766 if (bit_chk & 1) { 2767 /* 2768 * Remember a passing test as the 2769 * left_edge. 2770 */ 2771 left_edge[i] = d; 2772 } else { 2773 /* 2774 * If a left edge has not been seen 2775 * yet, then a future passing test will 2776 * mark this edge as the right edge. 2777 */ 2778 if (left_edge[i] == 2779 IO_IO_OUT1_DELAY_MAX + 1) { 2780 right_edge[i] = -(d + 1); 2781 } 2782 } 2783 debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d); 2784 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d", 2785 (int)(bit_chk & 1), i, left_edge[i]); 2786 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i, 2787 right_edge[i]); 2788 bit_chk = bit_chk >> 1; 2789 } 2790 } 2791 } 2792 2793 /* Reset DQ delay chains to 0 */ 2794 scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, 0); 2795 sticky_bit_chk = 0; 2796 for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) { 2797 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \ 2798 %d right_edge[%u]: %d\n", __func__, __LINE__, 2799 i, left_edge[i], i, right_edge[i]); 2800 2801 /* 2802 * Check for cases where we haven't found the left edge, 2803 * which makes our assignment of the the right edge invalid. 2804 * Reset it to the illegal value. 2805 */ 2806 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) && 2807 (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) { 2808 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2809 debug_cond(DLEVEL == 2, "%s:%d write_center: reset \ 2810 right_edge[%u]: %d\n", __func__, __LINE__, 2811 i, right_edge[i]); 2812 } 2813 2814 /* 2815 * Reset sticky bit (except for bits where we have 2816 * seen the left edge). 2817 */ 2818 sticky_bit_chk = sticky_bit_chk << 1; 2819 if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) 2820 sticky_bit_chk = sticky_bit_chk | 1; 2821 2822 if (i == 0) 2823 break; 2824 } 2825 2826 /* Search for the right edge of the window for each bit */ 2827 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) { 2828 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, 2829 d + start_dqs); 2830 2831 writel(0, &sdr_scc_mgr->update); 2832 2833 /* 2834 * Stop searching when the read test doesn't pass AND when 2835 * we've seen a passing read on every bit. 2836 */ 2837 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 2838 0, PASS_ONE_BIT, &bit_chk, 0); 2839 2840 sticky_bit_chk = sticky_bit_chk | bit_chk; 2841 stop = stop && (sticky_bit_chk == param->write_correct_mask); 2842 2843 debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \ 2844 %u && %u\n", d, sticky_bit_chk, 2845 param->write_correct_mask, stop); 2846 2847 if (stop == 1) { 2848 if (d == 0) { 2849 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; 2850 i++) { 2851 /* d = 0 failed, but it passed when 2852 testing the left edge, so it must be 2853 marginal, set it to -1 */ 2854 if (right_edge[i] == 2855 IO_IO_OUT1_DELAY_MAX + 1 && 2856 left_edge[i] != 2857 IO_IO_OUT1_DELAY_MAX + 1) { 2858 right_edge[i] = -1; 2859 } 2860 } 2861 } 2862 break; 2863 } else { 2864 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2865 if (bit_chk & 1) { 2866 /* 2867 * Remember a passing test as 2868 * the right_edge. 2869 */ 2870 right_edge[i] = d; 2871 } else { 2872 if (d != 0) { 2873 /* 2874 * If a right edge has not 2875 * been seen yet, then a future 2876 * passing test will mark this 2877 * edge as the left edge. 2878 */ 2879 if (right_edge[i] == 2880 IO_IO_OUT1_DELAY_MAX + 1) 2881 left_edge[i] = -(d + 1); 2882 } else { 2883 /* 2884 * d = 0 failed, but it passed 2885 * when testing the left edge, 2886 * so it must be marginal, set 2887 * it to -1. 2888 */ 2889 if (right_edge[i] == 2890 IO_IO_OUT1_DELAY_MAX + 1 && 2891 left_edge[i] != 2892 IO_IO_OUT1_DELAY_MAX + 1) 2893 right_edge[i] = -1; 2894 /* 2895 * If a right edge has not been 2896 * seen yet, then a future 2897 * passing test will mark this 2898 * edge as the left edge. 2899 */ 2900 else if (right_edge[i] == 2901 IO_IO_OUT1_DELAY_MAX + 2902 1) 2903 left_edge[i] = -(d + 1); 2904 } 2905 } 2906 debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d); 2907 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d", 2908 (int)(bit_chk & 1), i, left_edge[i]); 2909 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i, 2910 right_edge[i]); 2911 bit_chk = bit_chk >> 1; 2912 } 2913 } 2914 } 2915 2916 /* Check that all bits have a window */ 2917 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2918 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \ 2919 %d right_edge[%u]: %d", __func__, __LINE__, 2920 i, left_edge[i], i, right_edge[i]); 2921 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) || 2922 (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) { 2923 set_failing_group_stage(test_bgn + i, 2924 CAL_STAGE_WRITES, 2925 CAL_SUBSTAGE_WRITES_CENTER); 2926 return 0; 2927 } 2928 } 2929 2930 /* Find middle of window for each DQ bit */ 2931 mid_min = left_edge[0] - right_edge[0]; 2932 min_index = 0; 2933 for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2934 mid = left_edge[i] - right_edge[i]; 2935 if (mid < mid_min) { 2936 mid_min = mid; 2937 min_index = i; 2938 } 2939 } 2940 2941 /* 2942 * -mid_min/2 represents the amount that we need to move DQS. 2943 * If mid_min is odd and positive we'll need to add one to 2944 * make sure the rounding in further calculations is correct 2945 * (always bias to the right), so just add 1 for all positive values. 2946 */ 2947 if (mid_min > 0) 2948 mid_min++; 2949 mid_min = mid_min / 2; 2950 debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__, 2951 __LINE__, mid_min); 2952 2953 /* Determine the amount we can change DQS (which is -mid_min) */ 2954 orig_mid_min = mid_min; 2955 new_dqs = start_dqs; 2956 mid_min = 0; 2957 debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \ 2958 mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min); 2959 /* Initialize data for export structures */ 2960 dqs_margin = IO_IO_OUT1_DELAY_MAX + 1; 2961 dq_margin = IO_IO_OUT1_DELAY_MAX + 1; 2962 2963 /* add delay to bring centre of all DQ windows to the same "level" */ 2964 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { 2965 /* Use values before divide by 2 to reduce round off error */ 2966 shift_dq = (left_edge[i] - right_edge[i] - 2967 (left_edge[min_index] - right_edge[min_index]))/2 + 2968 (orig_mid_min - mid_min); 2969 2970 debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \ 2971 [%u]=%d\n", __func__, __LINE__, i, shift_dq); 2972 2973 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET; 2974 temp_dq_out1_delay = readl(addr + (i << 2)); 2975 if (shift_dq + (int32_t)temp_dq_out1_delay > 2976 (int32_t)IO_IO_OUT1_DELAY_MAX) { 2977 shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay; 2978 } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) { 2979 shift_dq = -(int32_t)temp_dq_out1_delay; 2980 } 2981 debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n", 2982 i, shift_dq); 2983 scc_mgr_set_dq_out1_delay(write_group, i, temp_dq_out1_delay + 2984 shift_dq); 2985 scc_mgr_load_dq(i); 2986 2987 debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i, 2988 left_edge[i] - shift_dq + (-mid_min), 2989 right_edge[i] + shift_dq - (-mid_min)); 2990 /* To determine values for export structures */ 2991 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin) 2992 dq_margin = left_edge[i] - shift_dq + (-mid_min); 2993 2994 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin) 2995 dqs_margin = right_edge[i] + shift_dq - (-mid_min); 2996 } 2997 2998 /* Move DQS */ 2999 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); 3000 writel(0, &sdr_scc_mgr->update); 3001 3002 /* Centre DM */ 3003 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__); 3004 3005 /* 3006 * set the left and right edge of each bit to an illegal value, 3007 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value, 3008 */ 3009 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; 3010 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; 3011 int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 3012 int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1; 3013 int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1; 3014 int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1; 3015 int32_t win_best = 0; 3016 3017 /* Search for the/part of the window with DM shift */ 3018 for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) { 3019 scc_mgr_apply_group_dm_out1_delay(write_group, d); 3020 writel(0, &sdr_scc_mgr->update); 3021 3022 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, 3023 PASS_ALL_BITS, &bit_chk, 3024 0)) { 3025 /* USE Set current end of the window */ 3026 end_curr = -d; 3027 /* 3028 * If a starting edge of our window has not been seen 3029 * this is our current start of the DM window. 3030 */ 3031 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) 3032 bgn_curr = -d; 3033 3034 /* 3035 * If current window is bigger than best seen. 3036 * Set best seen to be current window. 3037 */ 3038 if ((end_curr-bgn_curr+1) > win_best) { 3039 win_best = end_curr-bgn_curr+1; 3040 bgn_best = bgn_curr; 3041 end_best = end_curr; 3042 } 3043 } else { 3044 /* We just saw a failing test. Reset temp edge */ 3045 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 3046 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 3047 } 3048 } 3049 3050 3051 /* Reset DM delay chains to 0 */ 3052 scc_mgr_apply_group_dm_out1_delay(write_group, 0); 3053 3054 /* 3055 * Check to see if the current window nudges up aganist 0 delay. 3056 * If so we need to continue the search by shifting DQS otherwise DQS 3057 * search begins as a new search. */ 3058 if (end_curr != 0) { 3059 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 3060 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 3061 } 3062 3063 /* Search for the/part of the window with DQS shifts */ 3064 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) { 3065 /* 3066 * Note: This only shifts DQS, so are we limiting ourselve to 3067 * width of DQ unnecessarily. 3068 */ 3069 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, 3070 d + new_dqs); 3071 3072 writel(0, &sdr_scc_mgr->update); 3073 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, 3074 PASS_ALL_BITS, &bit_chk, 3075 0)) { 3076 /* USE Set current end of the window */ 3077 end_curr = d; 3078 /* 3079 * If a beginning edge of our window has not been seen 3080 * this is our current begin of the DM window. 3081 */ 3082 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) 3083 bgn_curr = d; 3084 3085 /* 3086 * If current window is bigger than best seen. Set best 3087 * seen to be current window. 3088 */ 3089 if ((end_curr-bgn_curr+1) > win_best) { 3090 win_best = end_curr-bgn_curr+1; 3091 bgn_best = bgn_curr; 3092 end_best = end_curr; 3093 } 3094 } else { 3095 /* We just saw a failing test. Reset temp edge */ 3096 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 3097 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 3098 3099 /* Early exit optimization: if ther remaining delay 3100 chain space is less than already seen largest window 3101 we can exit */ 3102 if ((win_best-1) > 3103 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) { 3104 break; 3105 } 3106 } 3107 } 3108 3109 /* assign left and right edge for cal and reporting; */ 3110 left_edge[0] = -1*bgn_best; 3111 right_edge[0] = end_best; 3112 3113 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__, 3114 __LINE__, left_edge[0], right_edge[0]); 3115 3116 /* Move DQS (back to orig) */ 3117 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); 3118 3119 /* Move DM */ 3120 3121 /* Find middle of window for the DM bit */ 3122 mid = (left_edge[0] - right_edge[0]) / 2; 3123 3124 /* only move right, since we are not moving DQS/DQ */ 3125 if (mid < 0) 3126 mid = 0; 3127 3128 /* dm_marign should fail if we never find a window */ 3129 if (win_best == 0) 3130 dm_margin = -1; 3131 else 3132 dm_margin = left_edge[0] - mid; 3133 3134 scc_mgr_apply_group_dm_out1_delay(write_group, mid); 3135 writel(0, &sdr_scc_mgr->update); 3136 3137 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \ 3138 dm_margin=%d\n", __func__, __LINE__, left_edge[0], 3139 right_edge[0], mid, dm_margin); 3140 /* Export values */ 3141 gbl->fom_out += dq_margin + dqs_margin; 3142 3143 debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \ 3144 dqs_margin=%d dm_margin=%d\n", __func__, __LINE__, 3145 dq_margin, dqs_margin, dm_margin); 3146 3147 /* 3148 * Do not remove this line as it makes sure all of our 3149 * decisions have been applied. 3150 */ 3151 writel(0, &sdr_scc_mgr->update); 3152 return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0); 3153 } 3154 3155 /* calibrate the write operations */ 3156 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g, 3157 uint32_t test_bgn) 3158 { 3159 /* update info for sims */ 3160 debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn); 3161 3162 reg_file_set_stage(CAL_STAGE_WRITES); 3163 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER); 3164 3165 reg_file_set_group(g); 3166 3167 if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) { 3168 set_failing_group_stage(g, CAL_STAGE_WRITES, 3169 CAL_SUBSTAGE_WRITES_CENTER); 3170 return 0; 3171 } 3172 3173 return 1; 3174 } 3175 3176 /* precharge all banks and activate row 0 in bank "000..." and bank "111..." */ 3177 static void mem_precharge_and_activate(void) 3178 { 3179 uint32_t r; 3180 3181 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { 3182 if (param->skip_ranks[r]) { 3183 /* request to skip the rank */ 3184 continue; 3185 } 3186 3187 /* set rank */ 3188 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 3189 3190 /* precharge all banks ... */ 3191 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS | 3192 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 3193 3194 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0); 3195 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1, 3196 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 3197 3198 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1); 3199 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2, 3200 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 3201 3202 /* activate rows */ 3203 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS | 3204 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 3205 } 3206 } 3207 3208 /* Configure various memory related parameters. */ 3209 static void mem_config(void) 3210 { 3211 uint32_t rlat, wlat; 3212 uint32_t rw_wl_nop_cycles; 3213 uint32_t max_latency; 3214 3215 debug("%s:%d\n", __func__, __LINE__); 3216 /* read in write and read latency */ 3217 wlat = readl(&data_mgr->t_wl_add); 3218 wlat += readl(&data_mgr->mem_t_add); 3219 3220 /* WL for hard phy does not include additive latency */ 3221 3222 /* 3223 * add addtional write latency to offset the address/command extra 3224 * clock cycle. We change the AC mux setting causing AC to be delayed 3225 * by one mem clock cycle. Only do this for DDR3 3226 */ 3227 wlat = wlat + 1; 3228 3229 rlat = readl(&data_mgr->t_rl_add); 3230 3231 rw_wl_nop_cycles = wlat - 2; 3232 gbl->rw_wl_nop_cycles = rw_wl_nop_cycles; 3233 3234 /* 3235 * For AV/CV, lfifo is hardened and always runs at full rate so 3236 * max latency in AFI clocks, used here, is correspondingly smaller. 3237 */ 3238 max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1; 3239 /* configure for a burst length of 8 */ 3240 3241 /* write latency */ 3242 /* Adjust Write Latency for Hard PHY */ 3243 wlat = wlat + 1; 3244 3245 /* set a pretty high read latency initially */ 3246 gbl->curr_read_lat = rlat + 16; 3247 3248 if (gbl->curr_read_lat > max_latency) 3249 gbl->curr_read_lat = max_latency; 3250 3251 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 3252 3253 /* advertise write latency */ 3254 gbl->curr_write_lat = wlat; 3255 writel(wlat - 2, &phy_mgr_cfg->afi_wlat); 3256 3257 /* initialize bit slips */ 3258 mem_precharge_and_activate(); 3259 } 3260 3261 /* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */ 3262 static void mem_skip_calibrate(void) 3263 { 3264 uint32_t vfifo_offset; 3265 uint32_t i, j, r; 3266 3267 debug("%s:%d\n", __func__, __LINE__); 3268 /* Need to update every shadow register set used by the interface */ 3269 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 3270 r += NUM_RANKS_PER_SHADOW_REG) { 3271 /* 3272 * Set output phase alignment settings appropriate for 3273 * skip calibration. 3274 */ 3275 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3276 scc_mgr_set_dqs_en_phase(i, 0); 3277 #if IO_DLL_CHAIN_LENGTH == 6 3278 scc_mgr_set_dqdqs_output_phase(i, 6); 3279 #else 3280 scc_mgr_set_dqdqs_output_phase(i, 7); 3281 #endif 3282 /* 3283 * Case:33398 3284 * 3285 * Write data arrives to the I/O two cycles before write 3286 * latency is reached (720 deg). 3287 * -> due to bit-slip in a/c bus 3288 * -> to allow board skew where dqs is longer than ck 3289 * -> how often can this happen!? 3290 * -> can claim back some ptaps for high freq 3291 * support if we can relax this, but i digress... 3292 * 3293 * The write_clk leads mem_ck by 90 deg 3294 * The minimum ptap of the OPA is 180 deg 3295 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay 3296 * The write_clk is always delayed by 2 ptaps 3297 * 3298 * Hence, to make DQS aligned to CK, we need to delay 3299 * DQS by: 3300 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH)) 3301 * 3302 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH) 3303 * gives us the number of ptaps, which simplies to: 3304 * 3305 * (1.25 * IO_DLL_CHAIN_LENGTH - 2) 3306 */ 3307 scc_mgr_set_dqdqs_output_phase(i, (1.25 * 3308 IO_DLL_CHAIN_LENGTH - 2)); 3309 } 3310 writel(0xff, &sdr_scc_mgr->dqs_ena); 3311 writel(0xff, &sdr_scc_mgr->dqs_io_ena); 3312 3313 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { 3314 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | 3315 SCC_MGR_GROUP_COUNTER_OFFSET); 3316 } 3317 writel(0xff, &sdr_scc_mgr->dq_ena); 3318 writel(0xff, &sdr_scc_mgr->dm_ena); 3319 writel(0, &sdr_scc_mgr->update); 3320 } 3321 3322 /* Compensate for simulation model behaviour */ 3323 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3324 scc_mgr_set_dqs_bus_in_delay(i, 10); 3325 scc_mgr_load_dqs(i); 3326 } 3327 writel(0, &sdr_scc_mgr->update); 3328 3329 /* 3330 * ArriaV has hard FIFOs that can only be initialized by incrementing 3331 * in sequencer. 3332 */ 3333 vfifo_offset = CALIB_VFIFO_OFFSET; 3334 for (j = 0; j < vfifo_offset; j++) { 3335 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy); 3336 } 3337 writel(0, &phy_mgr_cmd->fifo_reset); 3338 3339 /* 3340 * For ACV with hard lfifo, we get the skip-cal setting from 3341 * generation-time constant. 3342 */ 3343 gbl->curr_read_lat = CALIB_LFIFO_OFFSET; 3344 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 3345 } 3346 3347 /* Memory calibration entry point */ 3348 static uint32_t mem_calibrate(void) 3349 { 3350 uint32_t i; 3351 uint32_t rank_bgn, sr; 3352 uint32_t write_group, write_test_bgn; 3353 uint32_t read_group, read_test_bgn; 3354 uint32_t run_groups, current_run; 3355 uint32_t failing_groups = 0; 3356 uint32_t group_failed = 0; 3357 uint32_t sr_failed = 0; 3358 3359 debug("%s:%d\n", __func__, __LINE__); 3360 /* Initialize the data settings */ 3361 3362 gbl->error_substage = CAL_SUBSTAGE_NIL; 3363 gbl->error_stage = CAL_STAGE_NIL; 3364 gbl->error_group = 0xff; 3365 gbl->fom_in = 0; 3366 gbl->fom_out = 0; 3367 3368 mem_config(); 3369 3370 uint32_t bypass_mode = 0x1; 3371 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3372 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | 3373 SCC_MGR_GROUP_COUNTER_OFFSET); 3374 scc_set_bypass_mode(i, bypass_mode); 3375 } 3376 3377 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) { 3378 /* 3379 * Set VFIFO and LFIFO to instant-on settings in skip 3380 * calibration mode. 3381 */ 3382 mem_skip_calibrate(); 3383 } else { 3384 for (i = 0; i < NUM_CALIB_REPEAT; i++) { 3385 /* 3386 * Zero all delay chain/phase settings for all 3387 * groups and all shadow register sets. 3388 */ 3389 scc_mgr_zero_all(); 3390 3391 run_groups = ~param->skip_groups; 3392 3393 for (write_group = 0, write_test_bgn = 0; write_group 3394 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++, 3395 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) { 3396 /* Initialized the group failure */ 3397 group_failed = 0; 3398 3399 current_run = run_groups & ((1 << 3400 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1); 3401 run_groups = run_groups >> 3402 RW_MGR_NUM_DQS_PER_WRITE_GROUP; 3403 3404 if (current_run == 0) 3405 continue; 3406 3407 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS | 3408 SCC_MGR_GROUP_COUNTER_OFFSET); 3409 scc_mgr_zero_group(write_group, write_test_bgn, 3410 0); 3411 3412 for (read_group = write_group * 3413 RW_MGR_MEM_IF_READ_DQS_WIDTH / 3414 RW_MGR_MEM_IF_WRITE_DQS_WIDTH, 3415 read_test_bgn = 0; 3416 read_group < (write_group + 1) * 3417 RW_MGR_MEM_IF_READ_DQS_WIDTH / 3418 RW_MGR_MEM_IF_WRITE_DQS_WIDTH && 3419 group_failed == 0; 3420 read_group++, read_test_bgn += 3421 RW_MGR_MEM_DQ_PER_READ_DQS) { 3422 /* Calibrate the VFIFO */ 3423 if (!((STATIC_CALIB_STEPS) & 3424 CALIB_SKIP_VFIFO)) { 3425 if (!rw_mgr_mem_calibrate_vfifo 3426 (read_group, 3427 read_test_bgn)) { 3428 group_failed = 1; 3429 3430 if (!(gbl-> 3431 phy_debug_mode_flags & 3432 PHY_DEBUG_SWEEP_ALL_GROUPS)) { 3433 return 0; 3434 } 3435 } 3436 } 3437 } 3438 3439 /* Calibrate the output side */ 3440 if (group_failed == 0) { 3441 for (rank_bgn = 0, sr = 0; rank_bgn 3442 < RW_MGR_MEM_NUMBER_OF_RANKS; 3443 rank_bgn += 3444 NUM_RANKS_PER_SHADOW_REG, 3445 ++sr) { 3446 sr_failed = 0; 3447 if (!((STATIC_CALIB_STEPS) & 3448 CALIB_SKIP_WRITES)) { 3449 if ((STATIC_CALIB_STEPS) 3450 & CALIB_SKIP_DELAY_SWEEPS) { 3451 /* not needed in quick mode! */ 3452 } else { 3453 /* 3454 * Determine if this set of 3455 * ranks should be skipped 3456 * entirely. 3457 */ 3458 if (!param->skip_shadow_regs[sr]) { 3459 if (!rw_mgr_mem_calibrate_writes 3460 (rank_bgn, write_group, 3461 write_test_bgn)) { 3462 sr_failed = 1; 3463 if (!(gbl-> 3464 phy_debug_mode_flags & 3465 PHY_DEBUG_SWEEP_ALL_GROUPS)) { 3466 return 0; 3467 } 3468 } 3469 } 3470 } 3471 } 3472 if (sr_failed != 0) 3473 group_failed = 1; 3474 } 3475 } 3476 3477 if (group_failed == 0) { 3478 for (read_group = write_group * 3479 RW_MGR_MEM_IF_READ_DQS_WIDTH / 3480 RW_MGR_MEM_IF_WRITE_DQS_WIDTH, 3481 read_test_bgn = 0; 3482 read_group < (write_group + 1) 3483 * RW_MGR_MEM_IF_READ_DQS_WIDTH 3484 / RW_MGR_MEM_IF_WRITE_DQS_WIDTH && 3485 group_failed == 0; 3486 read_group++, read_test_bgn += 3487 RW_MGR_MEM_DQ_PER_READ_DQS) { 3488 if (!((STATIC_CALIB_STEPS) & 3489 CALIB_SKIP_WRITES)) { 3490 if (!rw_mgr_mem_calibrate_vfifo_end 3491 (read_group, read_test_bgn)) { 3492 group_failed = 1; 3493 3494 if (!(gbl->phy_debug_mode_flags 3495 & PHY_DEBUG_SWEEP_ALL_GROUPS)) { 3496 return 0; 3497 } 3498 } 3499 } 3500 } 3501 } 3502 3503 if (group_failed != 0) 3504 failing_groups++; 3505 } 3506 3507 /* 3508 * USER If there are any failing groups then report 3509 * the failure. 3510 */ 3511 if (failing_groups != 0) 3512 return 0; 3513 3514 /* Calibrate the LFIFO */ 3515 if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) { 3516 /* 3517 * If we're skipping groups as part of debug, 3518 * don't calibrate LFIFO. 3519 */ 3520 if (param->skip_groups == 0) { 3521 if (!rw_mgr_mem_calibrate_lfifo()) 3522 return 0; 3523 } 3524 } 3525 } 3526 } 3527 3528 /* 3529 * Do not remove this line as it makes sure all of our decisions 3530 * have been applied. 3531 */ 3532 writel(0, &sdr_scc_mgr->update); 3533 return 1; 3534 } 3535 3536 static uint32_t run_mem_calibrate(void) 3537 { 3538 uint32_t pass; 3539 uint32_t debug_info; 3540 3541 debug("%s:%d\n", __func__, __LINE__); 3542 3543 /* Reset pass/fail status shown on afi_cal_success/fail */ 3544 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status); 3545 3546 /* stop tracking manger */ 3547 uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg); 3548 3549 writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg); 3550 3551 initialize(); 3552 rw_mgr_mem_initialize(); 3553 3554 pass = mem_calibrate(); 3555 3556 mem_precharge_and_activate(); 3557 writel(0, &phy_mgr_cmd->fifo_reset); 3558 3559 /* 3560 * Handoff: 3561 * Don't return control of the PHY back to AFI when in debug mode. 3562 */ 3563 if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) { 3564 rw_mgr_mem_handoff(); 3565 /* 3566 * In Hard PHY this is a 2-bit control: 3567 * 0: AFI Mux Select 3568 * 1: DDIO Mux Select 3569 */ 3570 writel(0x2, &phy_mgr_cfg->mux_sel); 3571 } 3572 3573 writel(ctrlcfg, &sdr_ctrl->ctrl_cfg); 3574 3575 if (pass) { 3576 printf("%s: CALIBRATION PASSED\n", __FILE__); 3577 3578 gbl->fom_in /= 2; 3579 gbl->fom_out /= 2; 3580 3581 if (gbl->fom_in > 0xff) 3582 gbl->fom_in = 0xff; 3583 3584 if (gbl->fom_out > 0xff) 3585 gbl->fom_out = 0xff; 3586 3587 /* Update the FOM in the register file */ 3588 debug_info = gbl->fom_in; 3589 debug_info |= gbl->fom_out << 8; 3590 writel(debug_info, &sdr_reg_file->fom); 3591 3592 writel(debug_info, &phy_mgr_cfg->cal_debug_info); 3593 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status); 3594 } else { 3595 printf("%s: CALIBRATION FAILED\n", __FILE__); 3596 3597 debug_info = gbl->error_stage; 3598 debug_info |= gbl->error_substage << 8; 3599 debug_info |= gbl->error_group << 16; 3600 3601 writel(debug_info, &sdr_reg_file->failing_stage); 3602 writel(debug_info, &phy_mgr_cfg->cal_debug_info); 3603 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status); 3604 3605 /* Update the failing group/stage in the register file */ 3606 debug_info = gbl->error_stage; 3607 debug_info |= gbl->error_substage << 8; 3608 debug_info |= gbl->error_group << 16; 3609 writel(debug_info, &sdr_reg_file->failing_stage); 3610 } 3611 3612 return pass; 3613 } 3614 3615 /** 3616 * hc_initialize_rom_data() - Initialize ROM data 3617 * 3618 * Initialize ROM data. 3619 */ 3620 static void hc_initialize_rom_data(void) 3621 { 3622 u32 i, addr; 3623 3624 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET; 3625 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++) 3626 writel(inst_rom_init[i], addr + (i << 2)); 3627 3628 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET; 3629 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++) 3630 writel(ac_rom_init[i], addr + (i << 2)); 3631 } 3632 3633 static void initialize_reg_file(void) 3634 { 3635 /* Initialize the register file with the correct data */ 3636 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature); 3637 writel(0, &sdr_reg_file->debug_data_addr); 3638 writel(0, &sdr_reg_file->cur_stage); 3639 writel(0, &sdr_reg_file->fom); 3640 writel(0, &sdr_reg_file->failing_stage); 3641 writel(0, &sdr_reg_file->debug1); 3642 writel(0, &sdr_reg_file->debug2); 3643 } 3644 3645 static void initialize_hps_phy(void) 3646 { 3647 uint32_t reg; 3648 /* 3649 * Tracking also gets configured here because it's in the 3650 * same register. 3651 */ 3652 uint32_t trk_sample_count = 7500; 3653 uint32_t trk_long_idle_sample_count = (10 << 16) | 100; 3654 /* 3655 * Format is number of outer loops in the 16 MSB, sample 3656 * count in 16 LSB. 3657 */ 3658 3659 reg = 0; 3660 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2); 3661 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1); 3662 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1); 3663 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1); 3664 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0); 3665 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1); 3666 /* 3667 * This field selects the intrinsic latency to RDATA_EN/FULL path. 3668 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles. 3669 */ 3670 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0); 3671 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET( 3672 trk_sample_count); 3673 writel(reg, &sdr_ctrl->phy_ctrl0); 3674 3675 reg = 0; 3676 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET( 3677 trk_sample_count >> 3678 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH); 3679 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET( 3680 trk_long_idle_sample_count); 3681 writel(reg, &sdr_ctrl->phy_ctrl1); 3682 3683 reg = 0; 3684 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET( 3685 trk_long_idle_sample_count >> 3686 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH); 3687 writel(reg, &sdr_ctrl->phy_ctrl2); 3688 } 3689 3690 static void initialize_tracking(void) 3691 { 3692 uint32_t concatenated_longidle = 0x0; 3693 uint32_t concatenated_delays = 0x0; 3694 uint32_t concatenated_rw_addr = 0x0; 3695 uint32_t concatenated_refresh = 0x0; 3696 uint32_t trk_sample_count = 7500; 3697 uint32_t dtaps_per_ptap; 3698 uint32_t tmp_delay; 3699 3700 /* 3701 * compute usable version of value in case we skip full 3702 * computation later 3703 */ 3704 dtaps_per_ptap = 0; 3705 tmp_delay = 0; 3706 while (tmp_delay < IO_DELAY_PER_OPA_TAP) { 3707 dtaps_per_ptap++; 3708 tmp_delay += IO_DELAY_PER_DCHAIN_TAP; 3709 } 3710 dtaps_per_ptap--; 3711 3712 concatenated_longidle = concatenated_longidle ^ 10; 3713 /*longidle outer loop */ 3714 concatenated_longidle = concatenated_longidle << 16; 3715 concatenated_longidle = concatenated_longidle ^ 100; 3716 /*longidle sample count */ 3717 concatenated_delays = concatenated_delays ^ 243; 3718 /* trfc, worst case of 933Mhz 4Gb */ 3719 concatenated_delays = concatenated_delays << 8; 3720 concatenated_delays = concatenated_delays ^ 14; 3721 /* trcd, worst case */ 3722 concatenated_delays = concatenated_delays << 8; 3723 concatenated_delays = concatenated_delays ^ 10; 3724 /* vfifo wait */ 3725 concatenated_delays = concatenated_delays << 8; 3726 concatenated_delays = concatenated_delays ^ 4; 3727 /* mux delay */ 3728 3729 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE; 3730 concatenated_rw_addr = concatenated_rw_addr << 8; 3731 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1; 3732 concatenated_rw_addr = concatenated_rw_addr << 8; 3733 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ; 3734 concatenated_rw_addr = concatenated_rw_addr << 8; 3735 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL; 3736 3737 concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL; 3738 concatenated_refresh = concatenated_refresh << 24; 3739 concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */ 3740 3741 /* Initialize the register file with the correct data */ 3742 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap); 3743 writel(trk_sample_count, &sdr_reg_file->trk_sample_count); 3744 writel(concatenated_longidle, &sdr_reg_file->trk_longidle); 3745 writel(concatenated_delays, &sdr_reg_file->delays); 3746 writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr); 3747 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width); 3748 writel(concatenated_refresh, &sdr_reg_file->trk_rfsh); 3749 } 3750 3751 int sdram_calibration_full(void) 3752 { 3753 struct param_type my_param; 3754 struct gbl_type my_gbl; 3755 uint32_t pass; 3756 uint32_t i; 3757 3758 param = &my_param; 3759 gbl = &my_gbl; 3760 3761 /* Initialize the debug mode flags */ 3762 gbl->phy_debug_mode_flags = 0; 3763 /* Set the calibration enabled by default */ 3764 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT; 3765 /* 3766 * Only sweep all groups (regardless of fail state) by default 3767 * Set enabled read test by default. 3768 */ 3769 #if DISABLE_GUARANTEED_READ 3770 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ; 3771 #endif 3772 /* Initialize the register file */ 3773 initialize_reg_file(); 3774 3775 /* Initialize any PHY CSR */ 3776 initialize_hps_phy(); 3777 3778 scc_mgr_initialize(); 3779 3780 initialize_tracking(); 3781 3782 /* USER Enable all ranks, groups */ 3783 for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++) 3784 param->skip_ranks[i] = 0; 3785 for (i = 0; i < NUM_SHADOW_REGS; ++i) 3786 param->skip_shadow_regs[i] = 0; 3787 param->skip_groups = 0; 3788 3789 printf("%s: Preparing to start memory calibration\n", __FILE__); 3790 3791 debug("%s:%d\n", __func__, __LINE__); 3792 debug_cond(DLEVEL == 1, 3793 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ", 3794 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM, 3795 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS, 3796 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS, 3797 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS); 3798 debug_cond(DLEVEL == 1, 3799 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ", 3800 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH, 3801 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH, 3802 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP); 3803 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u", 3804 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH); 3805 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ", 3806 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX, 3807 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX); 3808 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ", 3809 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX, 3810 IO_IO_OUT2_DELAY_MAX); 3811 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n", 3812 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE); 3813 3814 hc_initialize_rom_data(); 3815 3816 /* update info for sims */ 3817 reg_file_set_stage(CAL_STAGE_NIL); 3818 reg_file_set_group(0); 3819 3820 /* 3821 * Load global needed for those actions that require 3822 * some dynamic calibration support. 3823 */ 3824 dyn_calib_steps = STATIC_CALIB_STEPS; 3825 /* 3826 * Load global to allow dynamic selection of delay loop settings 3827 * based on calibration mode. 3828 */ 3829 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS)) 3830 skip_delay_mask = 0xff; 3831 else 3832 skip_delay_mask = 0x0; 3833 3834 pass = run_mem_calibrate(); 3835 3836 printf("%s: Calibration complete\n", __FILE__); 3837 return pass; 3838 } 3839