1 /* 2 * Copyright Altera Corporation (C) 2012-2015 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <asm/arch/sdram.h> 10 #include "sequencer.h" 11 #include "sequencer_auto.h" 12 #include "sequencer_auto_ac_init.h" 13 #include "sequencer_auto_inst_init.h" 14 #include "sequencer_defines.h" 15 16 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs = 17 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800); 18 19 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs = 20 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00); 21 22 static struct socfpga_sdr_reg_file *sdr_reg_file = 23 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS; 24 25 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr = 26 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00); 27 28 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd = 29 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS; 30 31 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg = 32 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40); 33 34 static struct socfpga_data_mgr *data_mgr = 35 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS; 36 37 static struct socfpga_sdr_ctrl *sdr_ctrl = 38 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS; 39 40 #define DELTA_D 1 41 42 /* 43 * In order to reduce ROM size, most of the selectable calibration steps are 44 * decided at compile time based on the user's calibration mode selection, 45 * as captured by the STATIC_CALIB_STEPS selection below. 46 * 47 * However, to support simulation-time selection of fast simulation mode, where 48 * we skip everything except the bare minimum, we need a few of the steps to 49 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the 50 * check, which is based on the rtl-supplied value, or we dynamically compute 51 * the value to use based on the dynamically-chosen calibration mode 52 */ 53 54 #define DLEVEL 0 55 #define STATIC_IN_RTL_SIM 0 56 #define STATIC_SKIP_DELAY_LOOPS 0 57 58 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \ 59 STATIC_SKIP_DELAY_LOOPS) 60 61 /* calibration steps requested by the rtl */ 62 uint16_t dyn_calib_steps; 63 64 /* 65 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option 66 * instead of static, we use boolean logic to select between 67 * non-skip and skip values 68 * 69 * The mask is set to include all bits when not-skipping, but is 70 * zero when skipping 71 */ 72 73 uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */ 74 75 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \ 76 ((non_skip_value) & skip_delay_mask) 77 78 struct gbl_type *gbl; 79 struct param_type *param; 80 uint32_t curr_shadow_reg; 81 82 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, 83 uint32_t write_group, uint32_t use_dm, 84 uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks); 85 86 static void set_failing_group_stage(uint32_t group, uint32_t stage, 87 uint32_t substage) 88 { 89 /* 90 * Only set the global stage if there was not been any other 91 * failing group 92 */ 93 if (gbl->error_stage == CAL_STAGE_NIL) { 94 gbl->error_substage = substage; 95 gbl->error_stage = stage; 96 gbl->error_group = group; 97 } 98 } 99 100 static void reg_file_set_group(u16 set_group) 101 { 102 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16); 103 } 104 105 static void reg_file_set_stage(u8 set_stage) 106 { 107 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff); 108 } 109 110 static void reg_file_set_sub_stage(u8 set_sub_stage) 111 { 112 set_sub_stage &= 0xff; 113 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8); 114 } 115 116 static void initialize(void) 117 { 118 debug("%s:%d\n", __func__, __LINE__); 119 /* USER calibration has control over path to memory */ 120 /* 121 * In Hard PHY this is a 2-bit control: 122 * 0: AFI Mux Select 123 * 1: DDIO Mux Select 124 */ 125 writel(0x3, &phy_mgr_cfg->mux_sel); 126 127 /* USER memory clock is not stable we begin initialization */ 128 writel(0, &phy_mgr_cfg->reset_mem_stbl); 129 130 /* USER calibration status all set to zero */ 131 writel(0, &phy_mgr_cfg->cal_status); 132 133 writel(0, &phy_mgr_cfg->cal_debug_info); 134 135 if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) { 136 param->read_correct_mask_vg = ((uint32_t)1 << 137 (RW_MGR_MEM_DQ_PER_READ_DQS / 138 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1; 139 param->write_correct_mask_vg = ((uint32_t)1 << 140 (RW_MGR_MEM_DQ_PER_READ_DQS / 141 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1; 142 param->read_correct_mask = ((uint32_t)1 << 143 RW_MGR_MEM_DQ_PER_READ_DQS) - 1; 144 param->write_correct_mask = ((uint32_t)1 << 145 RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1; 146 param->dm_correct_mask = ((uint32_t)1 << 147 (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH)) 148 - 1; 149 } 150 } 151 152 static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode) 153 { 154 uint32_t odt_mask_0 = 0; 155 uint32_t odt_mask_1 = 0; 156 uint32_t cs_and_odt_mask; 157 158 if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) { 159 if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) { 160 /* 161 * 1 Rank 162 * Read: ODT = 0 163 * Write: ODT = 1 164 */ 165 odt_mask_0 = 0x0; 166 odt_mask_1 = 0x1; 167 } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) { 168 /* 2 Ranks */ 169 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) { 170 /* - Dual-Slot , Single-Rank 171 * (1 chip-select per DIMM) 172 * OR 173 * - RDIMM, 4 total CS (2 CS per DIMM) 174 * means 2 DIMM 175 * Since MEM_NUMBER_OF_RANKS is 2 they are 176 * both single rank 177 * with 2 CS each (special for RDIMM) 178 * Read: Turn on ODT on the opposite rank 179 * Write: Turn on ODT on all ranks 180 */ 181 odt_mask_0 = 0x3 & ~(1 << rank); 182 odt_mask_1 = 0x3; 183 } else { 184 /* 185 * USER - Single-Slot , Dual-rank DIMMs 186 * (2 chip-selects per DIMM) 187 * USER Read: Turn on ODT off on all ranks 188 * USER Write: Turn on ODT on active rank 189 */ 190 odt_mask_0 = 0x0; 191 odt_mask_1 = 0x3 & (1 << rank); 192 } 193 } else { 194 /* 4 Ranks 195 * Read: 196 * ----------+-----------------------+ 197 * | | 198 * | ODT | 199 * Read From +-----------------------+ 200 * Rank | 3 | 2 | 1 | 0 | 201 * ----------+-----+-----+-----+-----+ 202 * 0 | 0 | 1 | 0 | 0 | 203 * 1 | 1 | 0 | 0 | 0 | 204 * 2 | 0 | 0 | 0 | 1 | 205 * 3 | 0 | 0 | 1 | 0 | 206 * ----------+-----+-----+-----+-----+ 207 * 208 * Write: 209 * ----------+-----------------------+ 210 * | | 211 * | ODT | 212 * Write To +-----------------------+ 213 * Rank | 3 | 2 | 1 | 0 | 214 * ----------+-----+-----+-----+-----+ 215 * 0 | 0 | 1 | 0 | 1 | 216 * 1 | 1 | 0 | 1 | 0 | 217 * 2 | 0 | 1 | 0 | 1 | 218 * 3 | 1 | 0 | 1 | 0 | 219 * ----------+-----+-----+-----+-----+ 220 */ 221 switch (rank) { 222 case 0: 223 odt_mask_0 = 0x4; 224 odt_mask_1 = 0x5; 225 break; 226 case 1: 227 odt_mask_0 = 0x8; 228 odt_mask_1 = 0xA; 229 break; 230 case 2: 231 odt_mask_0 = 0x1; 232 odt_mask_1 = 0x5; 233 break; 234 case 3: 235 odt_mask_0 = 0x2; 236 odt_mask_1 = 0xA; 237 break; 238 } 239 } 240 } else { 241 odt_mask_0 = 0x0; 242 odt_mask_1 = 0x0; 243 } 244 245 cs_and_odt_mask = 246 (0xFF & ~(1 << rank)) | 247 ((0xFF & odt_mask_0) << 8) | 248 ((0xFF & odt_mask_1) << 16); 249 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS | 250 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); 251 } 252 253 /** 254 * scc_mgr_set() - Set SCC Manager register 255 * @off: Base offset in SCC Manager space 256 * @grp: Read/Write group 257 * @val: Value to be set 258 * 259 * This function sets the SCC Manager (Scan Chain Control Manager) register. 260 */ 261 static void scc_mgr_set(u32 off, u32 grp, u32 val) 262 { 263 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2)); 264 } 265 266 /** 267 * scc_mgr_initialize() - Initialize SCC Manager registers 268 * 269 * Initialize SCC Manager registers. 270 */ 271 static void scc_mgr_initialize(void) 272 { 273 /* 274 * Clear register file for HPS. 16 (2^4) is the size of the 275 * full register file in the scc mgr: 276 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS + 277 * MEM_IF_READ_DQS_WIDTH - 1); 278 */ 279 int i; 280 281 for (i = 0; i < 16; i++) { 282 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n", 283 __func__, __LINE__, i); 284 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i); 285 } 286 } 287 288 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase) 289 { 290 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase); 291 } 292 293 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay) 294 { 295 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay); 296 } 297 298 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase) 299 { 300 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase); 301 } 302 303 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay) 304 { 305 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay); 306 } 307 308 static void scc_mgr_set_dqs_io_in_delay(uint32_t write_group, uint32_t delay) 309 { 310 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS, 311 delay); 312 } 313 314 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay) 315 { 316 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay); 317 } 318 319 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay) 320 { 321 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay); 322 } 323 324 static void scc_mgr_set_dqs_out1_delay(uint32_t write_group, 325 uint32_t delay) 326 { 327 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS, 328 delay); 329 } 330 331 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay) 332 { 333 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, 334 RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm, 335 delay); 336 } 337 338 /* load up dqs config settings */ 339 static void scc_mgr_load_dqs(uint32_t dqs) 340 { 341 writel(dqs, &sdr_scc_mgr->dqs_ena); 342 } 343 344 /* load up dqs io config settings */ 345 static void scc_mgr_load_dqs_io(void) 346 { 347 writel(0, &sdr_scc_mgr->dqs_io_ena); 348 } 349 350 /* load up dq config settings */ 351 static void scc_mgr_load_dq(uint32_t dq_in_group) 352 { 353 writel(dq_in_group, &sdr_scc_mgr->dq_ena); 354 } 355 356 /* load up dm config settings */ 357 static void scc_mgr_load_dm(uint32_t dm) 358 { 359 writel(dm, &sdr_scc_mgr->dm_ena); 360 } 361 362 /** 363 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks 364 * @off: Base offset in SCC Manager space 365 * @grp: Read/Write group 366 * @val: Value to be set 367 * @update: If non-zero, trigger SCC Manager update for all ranks 368 * 369 * This function sets the SCC Manager (Scan Chain Control Manager) register 370 * and optionally triggers the SCC update for all ranks. 371 */ 372 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val, 373 const int update) 374 { 375 u32 r; 376 377 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 378 r += NUM_RANKS_PER_SHADOW_REG) { 379 scc_mgr_set(off, grp, val); 380 381 if (update || (r == 0)) { 382 writel(grp, &sdr_scc_mgr->dqs_ena); 383 writel(0, &sdr_scc_mgr->update); 384 } 385 } 386 } 387 388 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase) 389 { 390 /* 391 * USER although the h/w doesn't support different phases per 392 * shadow register, for simplicity our scc manager modeling 393 * keeps different phase settings per shadow reg, and it's 394 * important for us to keep them in sync to match h/w. 395 * for efficiency, the scan chain update should occur only 396 * once to sr0. 397 */ 398 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET, 399 read_group, phase, 0); 400 } 401 402 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group, 403 uint32_t phase) 404 { 405 /* 406 * USER although the h/w doesn't support different phases per 407 * shadow register, for simplicity our scc manager modeling 408 * keeps different phase settings per shadow reg, and it's 409 * important for us to keep them in sync to match h/w. 410 * for efficiency, the scan chain update should occur only 411 * once to sr0. 412 */ 413 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, 414 write_group, phase, 0); 415 } 416 417 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group, 418 uint32_t delay) 419 { 420 /* 421 * In shadow register mode, the T11 settings are stored in 422 * registers in the core, which are updated by the DQS_ENA 423 * signals. Not issuing the SCC_MGR_UPD command allows us to 424 * save lots of rank switching overhead, by calling 425 * select_shadow_regs_for_update with update_scan_chains 426 * set to 0. 427 */ 428 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET, 429 read_group, delay, 1); 430 writel(0, &sdr_scc_mgr->update); 431 } 432 433 /** 434 * scc_mgr_set_oct_out1_delay() - Set OCT output delay 435 * @write_group: Write group 436 * @delay: Delay value 437 * 438 * This function sets the OCT output delay in SCC manager. 439 */ 440 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay) 441 { 442 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH / 443 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; 444 const int base = write_group * ratio; 445 int i; 446 /* 447 * Load the setting in the SCC manager 448 * Although OCT affects only write data, the OCT delay is controlled 449 * by the DQS logic block which is instantiated once per read group. 450 * For protocols where a write group consists of multiple read groups, 451 * the setting must be set multiple times. 452 */ 453 for (i = 0; i < ratio; i++) 454 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay); 455 } 456 457 static void scc_mgr_set_hhp_extras(void) 458 { 459 /* 460 * Load the fixed setting in the SCC manager 461 * bits: 0:0 = 1'b1 - dqs bypass 462 * bits: 1:1 = 1'b1 - dq bypass 463 * bits: 4:2 = 3'b001 - rfifo_mode 464 * bits: 6:5 = 2'b01 - rfifo clock_select 465 * bits: 7:7 = 1'b0 - separate gating from ungating setting 466 * bits: 8:8 = 1'b0 - separate OE from Output delay setting 467 */ 468 uint32_t value = (0<<8) | (0<<7) | (1<<5) | (1<<2) | (1<<1) | (1<<0); 469 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_HHP_GLOBALS_OFFSET; 470 471 writel(value, addr + SCC_MGR_HHP_EXTRAS_OFFSET); 472 } 473 474 /* 475 * USER Zero all DQS config 476 * TODO: maybe rename to scc_mgr_zero_dqs_config (or something) 477 */ 478 static void scc_mgr_zero_all(void) 479 { 480 uint32_t i, r; 481 482 /* 483 * USER Zero all DQS config settings, across all groups and all 484 * shadow registers 485 */ 486 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += 487 NUM_RANKS_PER_SHADOW_REG) { 488 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 489 /* 490 * The phases actually don't exist on a per-rank basis, 491 * but there's no harm updating them several times, so 492 * let's keep the code simple. 493 */ 494 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE); 495 scc_mgr_set_dqs_en_phase(i, 0); 496 scc_mgr_set_dqs_en_delay(i, 0); 497 } 498 499 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { 500 scc_mgr_set_dqdqs_output_phase(i, 0); 501 /* av/cv don't have out2 */ 502 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE); 503 } 504 } 505 506 /* multicast to all DQS group enables */ 507 writel(0xff, &sdr_scc_mgr->dqs_ena); 508 writel(0, &sdr_scc_mgr->update); 509 } 510 511 /** 512 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update 513 * @write_group: Write group 514 * 515 * Set bypass mode and trigger SCC update. 516 */ 517 static void scc_set_bypass_mode(const u32 write_group) 518 { 519 /* Only needed once to set all groups, pins, DQ, DQS, DM. */ 520 if (write_group == 0) { 521 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n", __func__, 522 __LINE__); 523 scc_mgr_set_hhp_extras(); 524 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n", 525 __func__, __LINE__); 526 } 527 528 /* Multicast to all DQ enables. */ 529 writel(0xff, &sdr_scc_mgr->dq_ena); 530 writel(0xff, &sdr_scc_mgr->dm_ena); 531 532 /* Update current DQS IO enable. */ 533 writel(0, &sdr_scc_mgr->dqs_io_ena); 534 535 /* Update the DQS logic. */ 536 writel(write_group, &sdr_scc_mgr->dqs_ena); 537 538 /* Hit update. */ 539 writel(0, &sdr_scc_mgr->update); 540 } 541 542 /** 543 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group 544 * @write_group: Write group 545 * 546 * Load DQS settings for Write Group, do not trigger SCC update. 547 */ 548 static void scc_mgr_load_dqs_for_write_group(const u32 write_group) 549 { 550 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH / 551 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; 552 const int base = write_group * ratio; 553 int i; 554 /* 555 * Load the setting in the SCC manager 556 * Although OCT affects only write data, the OCT delay is controlled 557 * by the DQS logic block which is instantiated once per read group. 558 * For protocols where a write group consists of multiple read groups, 559 * the setting must be set multiple times. 560 */ 561 for (i = 0; i < ratio; i++) 562 writel(base + i, &sdr_scc_mgr->dqs_ena); 563 } 564 565 static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin, 566 int32_t out_only) 567 { 568 uint32_t i, r; 569 570 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += 571 NUM_RANKS_PER_SHADOW_REG) { 572 /* Zero all DQ config settings */ 573 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 574 scc_mgr_set_dq_out1_delay(i, 0); 575 if (!out_only) 576 scc_mgr_set_dq_in_delay(i, 0); 577 } 578 579 /* multicast to all DQ enables */ 580 writel(0xff, &sdr_scc_mgr->dq_ena); 581 582 /* Zero all DM config settings */ 583 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { 584 scc_mgr_set_dm_out1_delay(i, 0); 585 } 586 587 /* multicast to all DM enables */ 588 writel(0xff, &sdr_scc_mgr->dm_ena); 589 590 /* zero all DQS io settings */ 591 if (!out_only) 592 scc_mgr_set_dqs_io_in_delay(write_group, 0); 593 /* av/cv don't have out2 */ 594 scc_mgr_set_dqs_out1_delay(write_group, IO_DQS_OUT_RESERVE); 595 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE); 596 scc_mgr_load_dqs_for_write_group(write_group); 597 598 /* multicast to all DQS IO enables (only 1) */ 599 writel(0, &sdr_scc_mgr->dqs_io_ena); 600 601 /* hit update to zero everything */ 602 writel(0, &sdr_scc_mgr->update); 603 } 604 } 605 606 /* 607 * apply and load a particular input delay for the DQ pins in a group 608 * group_bgn is the index of the first dq pin (in the write group) 609 */ 610 static void scc_mgr_apply_group_dq_in_delay(uint32_t write_group, 611 uint32_t group_bgn, uint32_t delay) 612 { 613 uint32_t i, p; 614 615 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { 616 scc_mgr_set_dq_in_delay(p, delay); 617 scc_mgr_load_dq(p); 618 } 619 } 620 621 /** 622 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group 623 * @delay: Delay value 624 * 625 * Apply and load a particular output delay for the DQ pins in a group. 626 */ 627 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay) 628 { 629 int i; 630 631 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 632 scc_mgr_set_dq_out1_delay(i, delay); 633 scc_mgr_load_dq(i); 634 } 635 } 636 637 /* apply and load a particular output delay for the DM pins in a group */ 638 static void scc_mgr_apply_group_dm_out1_delay(uint32_t write_group, 639 uint32_t delay1) 640 { 641 uint32_t i; 642 643 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { 644 scc_mgr_set_dm_out1_delay(i, delay1); 645 scc_mgr_load_dm(i); 646 } 647 } 648 649 650 /* apply and load delay on both DQS and OCT out1 */ 651 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group, 652 uint32_t delay) 653 { 654 scc_mgr_set_dqs_out1_delay(write_group, delay); 655 scc_mgr_load_dqs_io(); 656 657 scc_mgr_set_oct_out1_delay(write_group, delay); 658 scc_mgr_load_dqs_for_write_group(write_group); 659 } 660 661 /* apply a delay to the entire output side: DQ, DM, DQS, OCT */ 662 static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group, 663 uint32_t group_bgn, 664 uint32_t delay) 665 { 666 uint32_t i, p, new_delay; 667 668 /* dq shift */ 669 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { 670 new_delay = READ_SCC_DQ_OUT2_DELAY; 671 new_delay += delay; 672 673 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 674 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQ[%u,%u]:\ 675 %u > %lu => %lu", __func__, __LINE__, 676 write_group, group_bgn, delay, i, p, new_delay, 677 (long unsigned int)IO_IO_OUT2_DELAY_MAX, 678 (long unsigned int)IO_IO_OUT2_DELAY_MAX); 679 new_delay = IO_IO_OUT2_DELAY_MAX; 680 } 681 682 scc_mgr_load_dq(i); 683 } 684 685 /* dm shift */ 686 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { 687 new_delay = READ_SCC_DM_IO_OUT2_DELAY; 688 new_delay += delay; 689 690 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 691 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DM[%u]:\ 692 %u > %lu => %lu\n", __func__, __LINE__, 693 write_group, group_bgn, delay, i, new_delay, 694 (long unsigned int)IO_IO_OUT2_DELAY_MAX, 695 (long unsigned int)IO_IO_OUT2_DELAY_MAX); 696 new_delay = IO_IO_OUT2_DELAY_MAX; 697 } 698 699 scc_mgr_load_dm(i); 700 } 701 702 /* dqs shift */ 703 new_delay = READ_SCC_DQS_IO_OUT2_DELAY; 704 new_delay += delay; 705 706 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 707 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;" 708 " adding %u to OUT1\n", __func__, __LINE__, 709 write_group, group_bgn, delay, new_delay, 710 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX, 711 new_delay - IO_IO_OUT2_DELAY_MAX); 712 scc_mgr_set_dqs_out1_delay(write_group, new_delay - 713 IO_IO_OUT2_DELAY_MAX); 714 new_delay = IO_IO_OUT2_DELAY_MAX; 715 } 716 717 scc_mgr_load_dqs_io(); 718 719 /* oct shift */ 720 new_delay = READ_SCC_OCT_OUT2_DELAY; 721 new_delay += delay; 722 723 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 724 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;" 725 " adding %u to OUT1\n", __func__, __LINE__, 726 write_group, group_bgn, delay, new_delay, 727 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX, 728 new_delay - IO_IO_OUT2_DELAY_MAX); 729 scc_mgr_set_oct_out1_delay(write_group, new_delay - 730 IO_IO_OUT2_DELAY_MAX); 731 new_delay = IO_IO_OUT2_DELAY_MAX; 732 } 733 734 scc_mgr_load_dqs_for_write_group(write_group); 735 } 736 737 /* 738 * USER apply a delay to the entire output side (DQ, DM, DQS, OCT) 739 * and to all ranks 740 */ 741 static void scc_mgr_apply_group_all_out_delay_add_all_ranks( 742 uint32_t write_group, uint32_t group_bgn, uint32_t delay) 743 { 744 uint32_t r; 745 746 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 747 r += NUM_RANKS_PER_SHADOW_REG) { 748 scc_mgr_apply_group_all_out_delay_add(write_group, 749 group_bgn, delay); 750 writel(0, &sdr_scc_mgr->update); 751 } 752 } 753 754 /* optimization used to recover some slots in ddr3 inst_rom */ 755 /* could be applied to other protocols if we wanted to */ 756 static void set_jump_as_return(void) 757 { 758 /* 759 * to save space, we replace return with jump to special shared 760 * RETURN instruction so we set the counter to large value so that 761 * we always jump 762 */ 763 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0); 764 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 765 } 766 767 /* 768 * should always use constants as argument to ensure all computations are 769 * performed at compile time 770 */ 771 static void delay_for_n_mem_clocks(const uint32_t clocks) 772 { 773 uint32_t afi_clocks; 774 uint8_t inner = 0; 775 uint8_t outer = 0; 776 uint16_t c_loop = 0; 777 778 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks); 779 780 781 afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO; 782 /* scale (rounding up) to get afi clocks */ 783 784 /* 785 * Note, we don't bother accounting for being off a little bit 786 * because of a few extra instructions in outer loops 787 * Note, the loops have a test at the end, and do the test before 788 * the decrement, and so always perform the loop 789 * 1 time more than the counter value 790 */ 791 if (afi_clocks == 0) { 792 ; 793 } else if (afi_clocks <= 0x100) { 794 inner = afi_clocks-1; 795 outer = 0; 796 c_loop = 0; 797 } else if (afi_clocks <= 0x10000) { 798 inner = 0xff; 799 outer = (afi_clocks-1) >> 8; 800 c_loop = 0; 801 } else { 802 inner = 0xff; 803 outer = 0xff; 804 c_loop = (afi_clocks-1) >> 16; 805 } 806 807 /* 808 * rom instructions are structured as follows: 809 * 810 * IDLE_LOOP2: jnz cntr0, TARGET_A 811 * IDLE_LOOP1: jnz cntr1, TARGET_B 812 * return 813 * 814 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and 815 * TARGET_B is set to IDLE_LOOP2 as well 816 * 817 * if we have no outer loop, though, then we can use IDLE_LOOP1 only, 818 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely 819 * 820 * a little confusing, but it helps save precious space in the inst_rom 821 * and sequencer rom and keeps the delays more accurate and reduces 822 * overhead 823 */ 824 if (afi_clocks <= 0x100) { 825 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), 826 &sdr_rw_load_mgr_regs->load_cntr1); 827 828 writel(RW_MGR_IDLE_LOOP1, 829 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 830 831 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS | 832 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 833 } else { 834 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), 835 &sdr_rw_load_mgr_regs->load_cntr0); 836 837 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer), 838 &sdr_rw_load_mgr_regs->load_cntr1); 839 840 writel(RW_MGR_IDLE_LOOP2, 841 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 842 843 writel(RW_MGR_IDLE_LOOP2, 844 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 845 846 /* hack to get around compiler not being smart enough */ 847 if (afi_clocks <= 0x10000) { 848 /* only need to run once */ 849 writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS | 850 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 851 } else { 852 do { 853 writel(RW_MGR_IDLE_LOOP2, 854 SDR_PHYGRP_RWMGRGRP_ADDRESS | 855 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 856 } while (c_loop-- != 0); 857 } 858 } 859 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks); 860 } 861 862 static void rw_mgr_mem_initialize(void) 863 { 864 uint32_t r; 865 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 866 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 867 868 debug("%s:%d\n", __func__, __LINE__); 869 870 /* The reset / cke part of initialization is broadcasted to all ranks */ 871 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS | 872 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); 873 874 /* 875 * Here's how you load register for a loop 876 * Counters are located @ 0x800 877 * Jump address are located @ 0xC00 878 * For both, registers 0 to 3 are selected using bits 3 and 2, like 879 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C 880 * I know this ain't pretty, but Avalon bus throws away the 2 least 881 * significant bits 882 */ 883 884 /* start with memory RESET activated */ 885 886 /* tINIT = 200us */ 887 888 /* 889 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles 890 * If a and b are the number of iteration in 2 nested loops 891 * it takes the following number of cycles to complete the operation: 892 * number_of_cycles = ((2 + n) * a + 2) * b 893 * where n is the number of instruction in the inner loop 894 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF, 895 * b = 6A 896 */ 897 898 /* Load counters */ 899 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL), 900 &sdr_rw_load_mgr_regs->load_cntr0); 901 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL), 902 &sdr_rw_load_mgr_regs->load_cntr1); 903 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL), 904 &sdr_rw_load_mgr_regs->load_cntr2); 905 906 /* Load jump address */ 907 writel(RW_MGR_INIT_RESET_0_CKE_0, 908 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 909 writel(RW_MGR_INIT_RESET_0_CKE_0, 910 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 911 writel(RW_MGR_INIT_RESET_0_CKE_0, 912 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 913 914 /* Execute count instruction */ 915 writel(RW_MGR_INIT_RESET_0_CKE_0, grpaddr); 916 917 /* indicate that memory is stable */ 918 writel(1, &phy_mgr_cfg->reset_mem_stbl); 919 920 /* 921 * transition the RESET to high 922 * Wait for 500us 923 */ 924 925 /* 926 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles 927 * If a and b are the number of iteration in 2 nested loops 928 * it takes the following number of cycles to complete the operation 929 * number_of_cycles = ((2 + n) * a + 2) * b 930 * where n is the number of instruction in the inner loop 931 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83, 932 * b = FF 933 */ 934 935 /* Load counters */ 936 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL), 937 &sdr_rw_load_mgr_regs->load_cntr0); 938 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL), 939 &sdr_rw_load_mgr_regs->load_cntr1); 940 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL), 941 &sdr_rw_load_mgr_regs->load_cntr2); 942 943 /* Load jump address */ 944 writel(RW_MGR_INIT_RESET_1_CKE_0, 945 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 946 writel(RW_MGR_INIT_RESET_1_CKE_0, 947 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 948 writel(RW_MGR_INIT_RESET_1_CKE_0, 949 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 950 951 writel(RW_MGR_INIT_RESET_1_CKE_0, grpaddr); 952 953 /* bring up clock enable */ 954 955 /* tXRP < 250 ck cycles */ 956 delay_for_n_mem_clocks(250); 957 958 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { 959 if (param->skip_ranks[r]) { 960 /* request to skip the rank */ 961 continue; 962 } 963 964 /* set rank */ 965 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 966 967 /* 968 * USER Use Mirror-ed commands for odd ranks if address 969 * mirrorring is on 970 */ 971 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) { 972 set_jump_as_return(); 973 writel(RW_MGR_MRS2_MIRR, grpaddr); 974 delay_for_n_mem_clocks(4); 975 set_jump_as_return(); 976 writel(RW_MGR_MRS3_MIRR, grpaddr); 977 delay_for_n_mem_clocks(4); 978 set_jump_as_return(); 979 writel(RW_MGR_MRS1_MIRR, grpaddr); 980 delay_for_n_mem_clocks(4); 981 set_jump_as_return(); 982 writel(RW_MGR_MRS0_DLL_RESET_MIRR, grpaddr); 983 } else { 984 set_jump_as_return(); 985 writel(RW_MGR_MRS2, grpaddr); 986 delay_for_n_mem_clocks(4); 987 set_jump_as_return(); 988 writel(RW_MGR_MRS3, grpaddr); 989 delay_for_n_mem_clocks(4); 990 set_jump_as_return(); 991 writel(RW_MGR_MRS1, grpaddr); 992 set_jump_as_return(); 993 writel(RW_MGR_MRS0_DLL_RESET, grpaddr); 994 } 995 set_jump_as_return(); 996 writel(RW_MGR_ZQCL, grpaddr); 997 998 /* tZQinit = tDLLK = 512 ck cycles */ 999 delay_for_n_mem_clocks(512); 1000 } 1001 } 1002 1003 /* 1004 * At the end of calibration we have to program the user settings in, and 1005 * USER hand off the memory to the user. 1006 */ 1007 static void rw_mgr_mem_handoff(void) 1008 { 1009 uint32_t r; 1010 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 1011 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1012 1013 debug("%s:%d\n", __func__, __LINE__); 1014 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { 1015 if (param->skip_ranks[r]) 1016 /* request to skip the rank */ 1017 continue; 1018 /* set rank */ 1019 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 1020 1021 /* precharge all banks ... */ 1022 writel(RW_MGR_PRECHARGE_ALL, grpaddr); 1023 1024 /* load up MR settings specified by user */ 1025 1026 /* 1027 * Use Mirror-ed commands for odd ranks if address 1028 * mirrorring is on 1029 */ 1030 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) { 1031 set_jump_as_return(); 1032 writel(RW_MGR_MRS2_MIRR, grpaddr); 1033 delay_for_n_mem_clocks(4); 1034 set_jump_as_return(); 1035 writel(RW_MGR_MRS3_MIRR, grpaddr); 1036 delay_for_n_mem_clocks(4); 1037 set_jump_as_return(); 1038 writel(RW_MGR_MRS1_MIRR, grpaddr); 1039 delay_for_n_mem_clocks(4); 1040 set_jump_as_return(); 1041 writel(RW_MGR_MRS0_USER_MIRR, grpaddr); 1042 } else { 1043 set_jump_as_return(); 1044 writel(RW_MGR_MRS2, grpaddr); 1045 delay_for_n_mem_clocks(4); 1046 set_jump_as_return(); 1047 writel(RW_MGR_MRS3, grpaddr); 1048 delay_for_n_mem_clocks(4); 1049 set_jump_as_return(); 1050 writel(RW_MGR_MRS1, grpaddr); 1051 delay_for_n_mem_clocks(4); 1052 set_jump_as_return(); 1053 writel(RW_MGR_MRS0_USER, grpaddr); 1054 } 1055 /* 1056 * USER need to wait tMOD (12CK or 15ns) time before issuing 1057 * other commands, but we will have plenty of NIOS cycles before 1058 * actual handoff so its okay. 1059 */ 1060 } 1061 } 1062 1063 /* 1064 * performs a guaranteed read on the patterns we are going to use during a 1065 * read test to ensure memory works 1066 */ 1067 static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn, 1068 uint32_t group, uint32_t num_tries, uint32_t *bit_chk, 1069 uint32_t all_ranks) 1070 { 1071 uint32_t r, vg; 1072 uint32_t correct_mask_vg; 1073 uint32_t tmp_bit_chk; 1074 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 1075 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1076 uint32_t addr; 1077 uint32_t base_rw_mgr; 1078 1079 *bit_chk = param->read_correct_mask; 1080 correct_mask_vg = param->read_correct_mask_vg; 1081 1082 for (r = rank_bgn; r < rank_end; r++) { 1083 if (param->skip_ranks[r]) 1084 /* request to skip the rank */ 1085 continue; 1086 1087 /* set rank */ 1088 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1089 1090 /* Load up a constant bursts of read commands */ 1091 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); 1092 writel(RW_MGR_GUARANTEED_READ, 1093 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1094 1095 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); 1096 writel(RW_MGR_GUARANTEED_READ_CONT, 1097 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1098 1099 tmp_bit_chk = 0; 1100 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) { 1101 /* reset the fifos to get pointers to known state */ 1102 1103 writel(0, &phy_mgr_cmd->fifo_reset); 1104 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1105 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1106 1107 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS 1108 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS); 1109 1110 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1111 writel(RW_MGR_GUARANTEED_READ, addr + 1112 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + 1113 vg) << 2)); 1114 1115 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1116 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr)); 1117 1118 if (vg == 0) 1119 break; 1120 } 1121 *bit_chk &= tmp_bit_chk; 1122 } 1123 1124 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1125 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2)); 1126 1127 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1128 debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\ 1129 %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask, 1130 (long unsigned int)(*bit_chk == param->read_correct_mask)); 1131 return *bit_chk == param->read_correct_mask; 1132 } 1133 1134 static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks 1135 (uint32_t group, uint32_t num_tries, uint32_t *bit_chk) 1136 { 1137 return rw_mgr_mem_calibrate_read_test_patterns(0, group, 1138 num_tries, bit_chk, 1); 1139 } 1140 1141 /* load up the patterns we are going to use during a read test */ 1142 static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn, 1143 uint32_t all_ranks) 1144 { 1145 uint32_t r; 1146 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 1147 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1148 1149 debug("%s:%d\n", __func__, __LINE__); 1150 for (r = rank_bgn; r < rank_end; r++) { 1151 if (param->skip_ranks[r]) 1152 /* request to skip the rank */ 1153 continue; 1154 1155 /* set rank */ 1156 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1157 1158 /* Load up a constant bursts */ 1159 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); 1160 1161 writel(RW_MGR_GUARANTEED_WRITE_WAIT0, 1162 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1163 1164 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); 1165 1166 writel(RW_MGR_GUARANTEED_WRITE_WAIT1, 1167 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1168 1169 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2); 1170 1171 writel(RW_MGR_GUARANTEED_WRITE_WAIT2, 1172 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1173 1174 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3); 1175 1176 writel(RW_MGR_GUARANTEED_WRITE_WAIT3, 1177 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1178 1179 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1180 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 1181 } 1182 1183 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1184 } 1185 1186 /* 1187 * try a read and see if it returns correct data back. has dummy reads 1188 * inserted into the mix used to align dqs enable. has more thorough checks 1189 * than the regular read test. 1190 */ 1191 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group, 1192 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk, 1193 uint32_t all_groups, uint32_t all_ranks) 1194 { 1195 uint32_t r, vg; 1196 uint32_t correct_mask_vg; 1197 uint32_t tmp_bit_chk; 1198 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 1199 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1200 uint32_t addr; 1201 uint32_t base_rw_mgr; 1202 1203 *bit_chk = param->read_correct_mask; 1204 correct_mask_vg = param->read_correct_mask_vg; 1205 1206 uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) & 1207 CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION); 1208 1209 for (r = rank_bgn; r < rank_end; r++) { 1210 if (param->skip_ranks[r]) 1211 /* request to skip the rank */ 1212 continue; 1213 1214 /* set rank */ 1215 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1216 1217 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1); 1218 1219 writel(RW_MGR_READ_B2B_WAIT1, 1220 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1221 1222 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2); 1223 writel(RW_MGR_READ_B2B_WAIT2, 1224 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1225 1226 if (quick_read_mode) 1227 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0); 1228 /* need at least two (1+1) reads to capture failures */ 1229 else if (all_groups) 1230 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0); 1231 else 1232 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0); 1233 1234 writel(RW_MGR_READ_B2B, 1235 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1236 if (all_groups) 1237 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH * 1238 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1, 1239 &sdr_rw_load_mgr_regs->load_cntr3); 1240 else 1241 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3); 1242 1243 writel(RW_MGR_READ_B2B, 1244 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1245 1246 tmp_bit_chk = 0; 1247 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) { 1248 /* reset the fifos to get pointers to known state */ 1249 writel(0, &phy_mgr_cmd->fifo_reset); 1250 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1251 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1252 1253 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS 1254 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS); 1255 1256 if (all_groups) 1257 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET; 1258 else 1259 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1260 1261 writel(RW_MGR_READ_B2B, addr + 1262 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + 1263 vg) << 2)); 1264 1265 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1266 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr)); 1267 1268 if (vg == 0) 1269 break; 1270 } 1271 *bit_chk &= tmp_bit_chk; 1272 } 1273 1274 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1275 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2)); 1276 1277 if (all_correct) { 1278 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1279 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\ 1280 (%u == %u) => %lu", __func__, __LINE__, group, 1281 all_groups, *bit_chk, param->read_correct_mask, 1282 (long unsigned int)(*bit_chk == 1283 param->read_correct_mask)); 1284 return *bit_chk == param->read_correct_mask; 1285 } else { 1286 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1287 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\ 1288 (%u != %lu) => %lu\n", __func__, __LINE__, 1289 group, all_groups, *bit_chk, (long unsigned int)0, 1290 (long unsigned int)(*bit_chk != 0x00)); 1291 return *bit_chk != 0x00; 1292 } 1293 } 1294 1295 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group, 1296 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk, 1297 uint32_t all_groups) 1298 { 1299 return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct, 1300 bit_chk, all_groups, 1); 1301 } 1302 1303 static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v) 1304 { 1305 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy); 1306 (*v)++; 1307 } 1308 1309 static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v) 1310 { 1311 uint32_t i; 1312 1313 for (i = 0; i < VFIFO_SIZE-1; i++) 1314 rw_mgr_incr_vfifo(grp, v); 1315 } 1316 1317 static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk) 1318 { 1319 uint32_t v; 1320 uint32_t fail_cnt = 0; 1321 uint32_t test_status; 1322 1323 for (v = 0; v < VFIFO_SIZE; ) { 1324 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n", 1325 __func__, __LINE__, v); 1326 test_status = rw_mgr_mem_calibrate_read_test_all_ranks 1327 (grp, 1, PASS_ONE_BIT, bit_chk, 0); 1328 if (!test_status) { 1329 fail_cnt++; 1330 1331 if (fail_cnt == 2) 1332 break; 1333 } 1334 1335 /* fiddle with FIFO */ 1336 rw_mgr_incr_vfifo(grp, &v); 1337 } 1338 1339 if (v >= VFIFO_SIZE) { 1340 /* no failing read found!! Something must have gone wrong */ 1341 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n", 1342 __func__, __LINE__); 1343 return 0; 1344 } else { 1345 return v; 1346 } 1347 } 1348 1349 static int find_working_phase(uint32_t *grp, uint32_t *bit_chk, 1350 uint32_t dtaps_per_ptap, uint32_t *work_bgn, 1351 uint32_t *v, uint32_t *d, uint32_t *p, 1352 uint32_t *i, uint32_t *max_working_cnt) 1353 { 1354 uint32_t found_begin = 0; 1355 uint32_t tmp_delay = 0; 1356 uint32_t test_status; 1357 1358 for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay += 1359 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { 1360 *work_bgn = tmp_delay; 1361 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d); 1362 1363 for (*i = 0; *i < VFIFO_SIZE; (*i)++) { 1364 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn += 1365 IO_DELAY_PER_OPA_TAP) { 1366 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p); 1367 1368 test_status = 1369 rw_mgr_mem_calibrate_read_test_all_ranks 1370 (*grp, 1, PASS_ONE_BIT, bit_chk, 0); 1371 1372 if (test_status) { 1373 *max_working_cnt = 1; 1374 found_begin = 1; 1375 break; 1376 } 1377 } 1378 1379 if (found_begin) 1380 break; 1381 1382 if (*p > IO_DQS_EN_PHASE_MAX) 1383 /* fiddle with FIFO */ 1384 rw_mgr_incr_vfifo(*grp, v); 1385 } 1386 1387 if (found_begin) 1388 break; 1389 } 1390 1391 if (*i >= VFIFO_SIZE) { 1392 /* cannot find working solution */ 1393 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\ 1394 ptap/dtap\n", __func__, __LINE__); 1395 return 0; 1396 } else { 1397 return 1; 1398 } 1399 } 1400 1401 static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk, 1402 uint32_t *work_bgn, uint32_t *v, uint32_t *d, 1403 uint32_t *p, uint32_t *max_working_cnt) 1404 { 1405 uint32_t found_begin = 0; 1406 uint32_t tmp_delay; 1407 1408 /* Special case code for backing up a phase */ 1409 if (*p == 0) { 1410 *p = IO_DQS_EN_PHASE_MAX; 1411 rw_mgr_decr_vfifo(*grp, v); 1412 } else { 1413 (*p)--; 1414 } 1415 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP; 1416 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p); 1417 1418 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; 1419 (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { 1420 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d); 1421 1422 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1, 1423 PASS_ONE_BIT, 1424 bit_chk, 0)) { 1425 found_begin = 1; 1426 *work_bgn = tmp_delay; 1427 break; 1428 } 1429 } 1430 1431 /* We have found a working dtap before the ptap found above */ 1432 if (found_begin == 1) 1433 (*max_working_cnt)++; 1434 1435 /* 1436 * Restore VFIFO to old state before we decremented it 1437 * (if needed). 1438 */ 1439 (*p)++; 1440 if (*p > IO_DQS_EN_PHASE_MAX) { 1441 *p = 0; 1442 rw_mgr_incr_vfifo(*grp, v); 1443 } 1444 1445 scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0); 1446 } 1447 1448 static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk, 1449 uint32_t *work_bgn, uint32_t *v, uint32_t *d, 1450 uint32_t *p, uint32_t *i, uint32_t *max_working_cnt, 1451 uint32_t *work_end) 1452 { 1453 uint32_t found_end = 0; 1454 1455 (*p)++; 1456 *work_end += IO_DELAY_PER_OPA_TAP; 1457 if (*p > IO_DQS_EN_PHASE_MAX) { 1458 /* fiddle with FIFO */ 1459 *p = 0; 1460 rw_mgr_incr_vfifo(*grp, v); 1461 } 1462 1463 for (; *i < VFIFO_SIZE + 1; (*i)++) { 1464 for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end 1465 += IO_DELAY_PER_OPA_TAP) { 1466 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p); 1467 1468 if (!rw_mgr_mem_calibrate_read_test_all_ranks 1469 (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) { 1470 found_end = 1; 1471 break; 1472 } else { 1473 (*max_working_cnt)++; 1474 } 1475 } 1476 1477 if (found_end) 1478 break; 1479 1480 if (*p > IO_DQS_EN_PHASE_MAX) { 1481 /* fiddle with FIFO */ 1482 rw_mgr_incr_vfifo(*grp, v); 1483 *p = 0; 1484 } 1485 } 1486 1487 if (*i >= VFIFO_SIZE + 1) { 1488 /* cannot see edge of failing read */ 1489 debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\ 1490 failed\n", __func__, __LINE__); 1491 return 0; 1492 } else { 1493 return 1; 1494 } 1495 } 1496 1497 static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk, 1498 uint32_t *work_bgn, uint32_t *v, uint32_t *d, 1499 uint32_t *p, uint32_t *work_mid, 1500 uint32_t *work_end) 1501 { 1502 int i; 1503 int tmp_delay = 0; 1504 1505 *work_mid = (*work_bgn + *work_end) / 2; 1506 1507 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n", 1508 *work_bgn, *work_end, *work_mid); 1509 /* Get the middle delay to be less than a VFIFO delay */ 1510 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; 1511 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP) 1512 ; 1513 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay); 1514 while (*work_mid > tmp_delay) 1515 *work_mid -= tmp_delay; 1516 debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid); 1517 1518 tmp_delay = 0; 1519 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid; 1520 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP) 1521 ; 1522 tmp_delay -= IO_DELAY_PER_OPA_TAP; 1523 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay); 1524 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++, 1525 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) 1526 ; 1527 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay); 1528 1529 scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1); 1530 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d); 1531 1532 /* 1533 * push vfifo until we can successfully calibrate. We can do this 1534 * because the largest possible margin in 1 VFIFO cycle. 1535 */ 1536 for (i = 0; i < VFIFO_SIZE; i++) { 1537 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n", 1538 *v); 1539 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1, 1540 PASS_ONE_BIT, 1541 bit_chk, 0)) { 1542 break; 1543 } 1544 1545 /* fiddle with FIFO */ 1546 rw_mgr_incr_vfifo(*grp, v); 1547 } 1548 1549 if (i >= VFIFO_SIZE) { 1550 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \ 1551 failed\n", __func__, __LINE__); 1552 return 0; 1553 } else { 1554 return 1; 1555 } 1556 } 1557 1558 /* find a good dqs enable to use */ 1559 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp) 1560 { 1561 uint32_t v, d, p, i; 1562 uint32_t max_working_cnt; 1563 uint32_t bit_chk; 1564 uint32_t dtaps_per_ptap; 1565 uint32_t work_bgn, work_mid, work_end; 1566 uint32_t found_passing_read, found_failing_read, initial_failing_dtap; 1567 1568 debug("%s:%d %u\n", __func__, __LINE__, grp); 1569 1570 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); 1571 1572 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); 1573 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); 1574 1575 /* ************************************************************** */ 1576 /* * Step 0 : Determine number of delay taps for each phase tap * */ 1577 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1578 1579 /* ********************************************************* */ 1580 /* * Step 1 : First push vfifo until we get a failing read * */ 1581 v = find_vfifo_read(grp, &bit_chk); 1582 1583 max_working_cnt = 0; 1584 1585 /* ******************************************************** */ 1586 /* * step 2: find first working phase, increment in ptaps * */ 1587 work_bgn = 0; 1588 if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d, 1589 &p, &i, &max_working_cnt) == 0) 1590 return 0; 1591 1592 work_end = work_bgn; 1593 1594 /* 1595 * If d is 0 then the working window covers a phase tap and 1596 * we can follow the old procedure otherwise, we've found the beginning, 1597 * and we need to increment the dtaps until we find the end. 1598 */ 1599 if (d == 0) { 1600 /* ********************************************************* */ 1601 /* * step 3a: if we have room, back off by one and 1602 increment in dtaps * */ 1603 1604 sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p, 1605 &max_working_cnt); 1606 1607 /* ********************************************************* */ 1608 /* * step 4a: go forward from working phase to non working 1609 phase, increment in ptaps * */ 1610 if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p, 1611 &i, &max_working_cnt, &work_end) == 0) 1612 return 0; 1613 1614 /* ********************************************************* */ 1615 /* * step 5a: back off one from last, increment in dtaps * */ 1616 1617 /* Special case code for backing up a phase */ 1618 if (p == 0) { 1619 p = IO_DQS_EN_PHASE_MAX; 1620 rw_mgr_decr_vfifo(grp, &v); 1621 } else { 1622 p = p - 1; 1623 } 1624 1625 work_end -= IO_DELAY_PER_OPA_TAP; 1626 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1627 1628 /* * The actual increment of dtaps is done outside of 1629 the if/else loop to share code */ 1630 d = 0; 1631 1632 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \ 1633 vfifo=%u ptap=%u\n", __func__, __LINE__, 1634 v, p); 1635 } else { 1636 /* ******************************************************* */ 1637 /* * step 3-5b: Find the right edge of the window using 1638 delay taps * */ 1639 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \ 1640 ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__, 1641 v, p, d, work_bgn); 1642 1643 work_end = work_bgn; 1644 1645 /* * The actual increment of dtaps is done outside of the 1646 if/else loop to share code */ 1647 1648 /* Only here to counterbalance a subtract later on which is 1649 not needed if this branch of the algorithm is taken */ 1650 max_working_cnt++; 1651 } 1652 1653 /* The dtap increment to find the failing edge is done here */ 1654 for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end += 1655 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { 1656 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \ 1657 end-2: dtap=%u\n", __func__, __LINE__, d); 1658 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1659 1660 if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1661 PASS_ONE_BIT, 1662 &bit_chk, 0)) { 1663 break; 1664 } 1665 } 1666 1667 /* Go back to working dtap */ 1668 if (d != 0) 1669 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1670 1671 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \ 1672 ptap=%u dtap=%u end=%u\n", __func__, __LINE__, 1673 v, p, d-1, work_end); 1674 1675 if (work_end < work_bgn) { 1676 /* nil range */ 1677 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \ 1678 failed\n", __func__, __LINE__); 1679 return 0; 1680 } 1681 1682 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n", 1683 __func__, __LINE__, work_bgn, work_end); 1684 1685 /* *************************************************************** */ 1686 /* 1687 * * We need to calculate the number of dtaps that equal a ptap 1688 * * To do that we'll back up a ptap and re-find the edge of the 1689 * * window using dtaps 1690 */ 1691 1692 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \ 1693 for tracking\n", __func__, __LINE__); 1694 1695 /* Special case code for backing up a phase */ 1696 if (p == 0) { 1697 p = IO_DQS_EN_PHASE_MAX; 1698 rw_mgr_decr_vfifo(grp, &v); 1699 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \ 1700 cycle/phase: v=%u p=%u\n", __func__, __LINE__, 1701 v, p); 1702 } else { 1703 p = p - 1; 1704 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \ 1705 phase only: v=%u p=%u", __func__, __LINE__, 1706 v, p); 1707 } 1708 1709 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1710 1711 /* 1712 * Increase dtap until we first see a passing read (in case the 1713 * window is smaller than a ptap), 1714 * and then a failing read to mark the edge of the window again 1715 */ 1716 1717 /* Find a passing read */ 1718 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n", 1719 __func__, __LINE__); 1720 found_passing_read = 0; 1721 found_failing_read = 0; 1722 initial_failing_dtap = d; 1723 for (; d <= IO_DQS_EN_DELAY_MAX; d++) { 1724 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \ 1725 read d=%u\n", __func__, __LINE__, d); 1726 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1727 1728 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1729 PASS_ONE_BIT, 1730 &bit_chk, 0)) { 1731 found_passing_read = 1; 1732 break; 1733 } 1734 } 1735 1736 if (found_passing_read) { 1737 /* Find a failing read */ 1738 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \ 1739 read\n", __func__, __LINE__); 1740 for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) { 1741 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \ 1742 testing read d=%u\n", __func__, __LINE__, d); 1743 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1744 1745 if (!rw_mgr_mem_calibrate_read_test_all_ranks 1746 (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { 1747 found_failing_read = 1; 1748 break; 1749 } 1750 } 1751 } else { 1752 debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \ 1753 calculate dtaps", __func__, __LINE__); 1754 debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n"); 1755 } 1756 1757 /* 1758 * The dynamically calculated dtaps_per_ptap is only valid if we 1759 * found a passing/failing read. If we didn't, it means d hit the max 1760 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its 1761 * statically calculated value. 1762 */ 1763 if (found_passing_read && found_failing_read) 1764 dtaps_per_ptap = d - initial_failing_dtap; 1765 1766 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap); 1767 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \ 1768 - %u = %u", __func__, __LINE__, d, 1769 initial_failing_dtap, dtaps_per_ptap); 1770 1771 /* ******************************************** */ 1772 /* * step 6: Find the centre of the window * */ 1773 if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p, 1774 &work_mid, &work_end) == 0) 1775 return 0; 1776 1777 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \ 1778 vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__, 1779 v, p-1, d); 1780 return 1; 1781 } 1782 1783 /* 1784 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different 1785 * dq_in_delay values 1786 */ 1787 static uint32_t 1788 rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay 1789 (uint32_t write_group, uint32_t read_group, uint32_t test_bgn) 1790 { 1791 uint32_t found; 1792 uint32_t i; 1793 uint32_t p; 1794 uint32_t d; 1795 uint32_t r; 1796 1797 const uint32_t delay_step = IO_IO_IN_DELAY_MAX / 1798 (RW_MGR_MEM_DQ_PER_READ_DQS-1); 1799 /* we start at zero, so have one less dq to devide among */ 1800 1801 debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group, 1802 test_bgn); 1803 1804 /* try different dq_in_delays since the dq path is shorter than dqs */ 1805 1806 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 1807 r += NUM_RANKS_PER_SHADOW_REG) { 1808 for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; 1809 i++, p++, d += delay_step) { 1810 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\ 1811 vfifo_find_dqs_", __func__, __LINE__); 1812 debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ", 1813 write_group, read_group); 1814 debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d); 1815 scc_mgr_set_dq_in_delay(p, d); 1816 scc_mgr_load_dq(p); 1817 } 1818 writel(0, &sdr_scc_mgr->update); 1819 } 1820 1821 found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group); 1822 1823 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\ 1824 en_phase_sweep_dq", __func__, __LINE__); 1825 debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \ 1826 chain to zero\n", write_group, read_group, found); 1827 1828 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 1829 r += NUM_RANKS_PER_SHADOW_REG) { 1830 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; 1831 i++, p++) { 1832 scc_mgr_set_dq_in_delay(p, 0); 1833 scc_mgr_load_dq(p); 1834 } 1835 writel(0, &sdr_scc_mgr->update); 1836 } 1837 1838 return found; 1839 } 1840 1841 /* per-bit deskew DQ and center */ 1842 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn, 1843 uint32_t write_group, uint32_t read_group, uint32_t test_bgn, 1844 uint32_t use_read_test, uint32_t update_fom) 1845 { 1846 uint32_t i, p, d, min_index; 1847 /* 1848 * Store these as signed since there are comparisons with 1849 * signed numbers. 1850 */ 1851 uint32_t bit_chk; 1852 uint32_t sticky_bit_chk; 1853 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; 1854 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; 1855 int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS]; 1856 int32_t mid; 1857 int32_t orig_mid_min, mid_min; 1858 int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs, 1859 final_dqs_en; 1860 int32_t dq_margin, dqs_margin; 1861 uint32_t stop; 1862 uint32_t temp_dq_in_delay1, temp_dq_in_delay2; 1863 uint32_t addr; 1864 1865 debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn); 1866 1867 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET; 1868 start_dqs = readl(addr + (read_group << 2)); 1869 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) 1870 start_dqs_en = readl(addr + ((read_group << 2) 1871 - IO_DQS_EN_DELAY_OFFSET)); 1872 1873 /* set the left and right edge of each bit to an illegal value */ 1874 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */ 1875 sticky_bit_chk = 0; 1876 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 1877 left_edge[i] = IO_IO_IN_DELAY_MAX + 1; 1878 right_edge[i] = IO_IO_IN_DELAY_MAX + 1; 1879 } 1880 1881 /* Search for the left edge of the window for each bit */ 1882 for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) { 1883 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d); 1884 1885 writel(0, &sdr_scc_mgr->update); 1886 1887 /* 1888 * Stop searching when the read test doesn't pass AND when 1889 * we've seen a passing read on every bit. 1890 */ 1891 if (use_read_test) { 1892 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn, 1893 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT, 1894 &bit_chk, 0, 0); 1895 } else { 1896 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1897 0, PASS_ONE_BIT, 1898 &bit_chk, 0); 1899 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS * 1900 (read_group - (write_group * 1901 RW_MGR_MEM_IF_READ_DQS_WIDTH / 1902 RW_MGR_MEM_IF_WRITE_DQS_WIDTH))); 1903 stop = (bit_chk == 0); 1904 } 1905 sticky_bit_chk = sticky_bit_chk | bit_chk; 1906 stop = stop && (sticky_bit_chk == param->read_correct_mask); 1907 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \ 1908 && %u", __func__, __LINE__, d, 1909 sticky_bit_chk, 1910 param->read_correct_mask, stop); 1911 1912 if (stop == 1) { 1913 break; 1914 } else { 1915 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 1916 if (bit_chk & 1) { 1917 /* Remember a passing test as the 1918 left_edge */ 1919 left_edge[i] = d; 1920 } else { 1921 /* If a left edge has not been seen yet, 1922 then a future passing test will mark 1923 this edge as the right edge */ 1924 if (left_edge[i] == 1925 IO_IO_IN_DELAY_MAX + 1) { 1926 right_edge[i] = -(d + 1); 1927 } 1928 } 1929 bit_chk = bit_chk >> 1; 1930 } 1931 } 1932 } 1933 1934 /* Reset DQ delay chains to 0 */ 1935 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, 0); 1936 sticky_bit_chk = 0; 1937 for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) { 1938 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \ 1939 %d right_edge[%u]: %d\n", __func__, __LINE__, 1940 i, left_edge[i], i, right_edge[i]); 1941 1942 /* 1943 * Check for cases where we haven't found the left edge, 1944 * which makes our assignment of the the right edge invalid. 1945 * Reset it to the illegal value. 1946 */ 1947 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && ( 1948 right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) { 1949 right_edge[i] = IO_IO_IN_DELAY_MAX + 1; 1950 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \ 1951 right_edge[%u]: %d\n", __func__, __LINE__, 1952 i, right_edge[i]); 1953 } 1954 1955 /* 1956 * Reset sticky bit (except for bits where we have seen 1957 * both the left and right edge). 1958 */ 1959 sticky_bit_chk = sticky_bit_chk << 1; 1960 if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) && 1961 (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) { 1962 sticky_bit_chk = sticky_bit_chk | 1; 1963 } 1964 1965 if (i == 0) 1966 break; 1967 } 1968 1969 /* Search for the right edge of the window for each bit */ 1970 for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) { 1971 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs); 1972 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 1973 uint32_t delay = d + start_dqs_en; 1974 if (delay > IO_DQS_EN_DELAY_MAX) 1975 delay = IO_DQS_EN_DELAY_MAX; 1976 scc_mgr_set_dqs_en_delay(read_group, delay); 1977 } 1978 scc_mgr_load_dqs(read_group); 1979 1980 writel(0, &sdr_scc_mgr->update); 1981 1982 /* 1983 * Stop searching when the read test doesn't pass AND when 1984 * we've seen a passing read on every bit. 1985 */ 1986 if (use_read_test) { 1987 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn, 1988 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT, 1989 &bit_chk, 0, 0); 1990 } else { 1991 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1992 0, PASS_ONE_BIT, 1993 &bit_chk, 0); 1994 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS * 1995 (read_group - (write_group * 1996 RW_MGR_MEM_IF_READ_DQS_WIDTH / 1997 RW_MGR_MEM_IF_WRITE_DQS_WIDTH))); 1998 stop = (bit_chk == 0); 1999 } 2000 sticky_bit_chk = sticky_bit_chk | bit_chk; 2001 stop = stop && (sticky_bit_chk == param->read_correct_mask); 2002 2003 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \ 2004 %u && %u", __func__, __LINE__, d, 2005 sticky_bit_chk, param->read_correct_mask, stop); 2006 2007 if (stop == 1) { 2008 break; 2009 } else { 2010 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 2011 if (bit_chk & 1) { 2012 /* Remember a passing test as 2013 the right_edge */ 2014 right_edge[i] = d; 2015 } else { 2016 if (d != 0) { 2017 /* If a right edge has not been 2018 seen yet, then a future passing 2019 test will mark this edge as the 2020 left edge */ 2021 if (right_edge[i] == 2022 IO_IO_IN_DELAY_MAX + 1) { 2023 left_edge[i] = -(d + 1); 2024 } 2025 } else { 2026 /* d = 0 failed, but it passed 2027 when testing the left edge, 2028 so it must be marginal, 2029 set it to -1 */ 2030 if (right_edge[i] == 2031 IO_IO_IN_DELAY_MAX + 1 && 2032 left_edge[i] != 2033 IO_IO_IN_DELAY_MAX 2034 + 1) { 2035 right_edge[i] = -1; 2036 } 2037 /* If a right edge has not been 2038 seen yet, then a future passing 2039 test will mark this edge as the 2040 left edge */ 2041 else if (right_edge[i] == 2042 IO_IO_IN_DELAY_MAX + 2043 1) { 2044 left_edge[i] = -(d + 1); 2045 } 2046 } 2047 } 2048 2049 debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\ 2050 d=%u]: ", __func__, __LINE__, d); 2051 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ", 2052 (int)(bit_chk & 1), i, left_edge[i]); 2053 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i, 2054 right_edge[i]); 2055 bit_chk = bit_chk >> 1; 2056 } 2057 } 2058 } 2059 2060 /* Check that all bits have a window */ 2061 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 2062 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \ 2063 %d right_edge[%u]: %d", __func__, __LINE__, 2064 i, left_edge[i], i, right_edge[i]); 2065 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i] 2066 == IO_IO_IN_DELAY_MAX + 1)) { 2067 /* 2068 * Restore delay chain settings before letting the loop 2069 * in rw_mgr_mem_calibrate_vfifo to retry different 2070 * dqs/ck relationships. 2071 */ 2072 scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs); 2073 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2074 scc_mgr_set_dqs_en_delay(read_group, 2075 start_dqs_en); 2076 } 2077 scc_mgr_load_dqs(read_group); 2078 writel(0, &sdr_scc_mgr->update); 2079 2080 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \ 2081 find edge [%u]: %d %d", __func__, __LINE__, 2082 i, left_edge[i], right_edge[i]); 2083 if (use_read_test) { 2084 set_failing_group_stage(read_group * 2085 RW_MGR_MEM_DQ_PER_READ_DQS + i, 2086 CAL_STAGE_VFIFO, 2087 CAL_SUBSTAGE_VFIFO_CENTER); 2088 } else { 2089 set_failing_group_stage(read_group * 2090 RW_MGR_MEM_DQ_PER_READ_DQS + i, 2091 CAL_STAGE_VFIFO_AFTER_WRITES, 2092 CAL_SUBSTAGE_VFIFO_CENTER); 2093 } 2094 return 0; 2095 } 2096 } 2097 2098 /* Find middle of window for each DQ bit */ 2099 mid_min = left_edge[0] - right_edge[0]; 2100 min_index = 0; 2101 for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 2102 mid = left_edge[i] - right_edge[i]; 2103 if (mid < mid_min) { 2104 mid_min = mid; 2105 min_index = i; 2106 } 2107 } 2108 2109 /* 2110 * -mid_min/2 represents the amount that we need to move DQS. 2111 * If mid_min is odd and positive we'll need to add one to 2112 * make sure the rounding in further calculations is correct 2113 * (always bias to the right), so just add 1 for all positive values. 2114 */ 2115 if (mid_min > 0) 2116 mid_min++; 2117 2118 mid_min = mid_min / 2; 2119 2120 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n", 2121 __func__, __LINE__, mid_min, min_index); 2122 2123 /* Determine the amount we can change DQS (which is -mid_min) */ 2124 orig_mid_min = mid_min; 2125 new_dqs = start_dqs - mid_min; 2126 if (new_dqs > IO_DQS_IN_DELAY_MAX) 2127 new_dqs = IO_DQS_IN_DELAY_MAX; 2128 else if (new_dqs < 0) 2129 new_dqs = 0; 2130 2131 mid_min = start_dqs - new_dqs; 2132 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n", 2133 mid_min, new_dqs); 2134 2135 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2136 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX) 2137 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX; 2138 else if (start_dqs_en - mid_min < 0) 2139 mid_min += start_dqs_en - mid_min; 2140 } 2141 new_dqs = start_dqs - mid_min; 2142 2143 debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \ 2144 new_dqs=%d mid_min=%d\n", start_dqs, 2145 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1, 2146 new_dqs, mid_min); 2147 2148 /* Initialize data for export structures */ 2149 dqs_margin = IO_IO_IN_DELAY_MAX + 1; 2150 dq_margin = IO_IO_IN_DELAY_MAX + 1; 2151 2152 /* add delay to bring centre of all DQ windows to the same "level" */ 2153 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { 2154 /* Use values before divide by 2 to reduce round off error */ 2155 shift_dq = (left_edge[i] - right_edge[i] - 2156 (left_edge[min_index] - right_edge[min_index]))/2 + 2157 (orig_mid_min - mid_min); 2158 2159 debug_cond(DLEVEL == 2, "vfifo_center: before: \ 2160 shift_dq[%u]=%d\n", i, shift_dq); 2161 2162 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET; 2163 temp_dq_in_delay1 = readl(addr + (p << 2)); 2164 temp_dq_in_delay2 = readl(addr + (i << 2)); 2165 2166 if (shift_dq + (int32_t)temp_dq_in_delay1 > 2167 (int32_t)IO_IO_IN_DELAY_MAX) { 2168 shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2; 2169 } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) { 2170 shift_dq = -(int32_t)temp_dq_in_delay1; 2171 } 2172 debug_cond(DLEVEL == 2, "vfifo_center: after: \ 2173 shift_dq[%u]=%d\n", i, shift_dq); 2174 final_dq[i] = temp_dq_in_delay1 + shift_dq; 2175 scc_mgr_set_dq_in_delay(p, final_dq[i]); 2176 scc_mgr_load_dq(p); 2177 2178 debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i, 2179 left_edge[i] - shift_dq + (-mid_min), 2180 right_edge[i] + shift_dq - (-mid_min)); 2181 /* To determine values for export structures */ 2182 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin) 2183 dq_margin = left_edge[i] - shift_dq + (-mid_min); 2184 2185 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin) 2186 dqs_margin = right_edge[i] + shift_dq - (-mid_min); 2187 } 2188 2189 final_dqs = new_dqs; 2190 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) 2191 final_dqs_en = start_dqs_en - mid_min; 2192 2193 /* Move DQS-en */ 2194 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2195 scc_mgr_set_dqs_en_delay(read_group, final_dqs_en); 2196 scc_mgr_load_dqs(read_group); 2197 } 2198 2199 /* Move DQS */ 2200 scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs); 2201 scc_mgr_load_dqs(read_group); 2202 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \ 2203 dqs_margin=%d", __func__, __LINE__, 2204 dq_margin, dqs_margin); 2205 2206 /* 2207 * Do not remove this line as it makes sure all of our decisions 2208 * have been applied. Apply the update bit. 2209 */ 2210 writel(0, &sdr_scc_mgr->update); 2211 2212 return (dq_margin >= 0) && (dqs_margin >= 0); 2213 } 2214 2215 /* 2216 * calibrate the read valid prediction FIFO. 2217 * 2218 * - read valid prediction will consist of finding a good DQS enable phase, 2219 * DQS enable delay, DQS input phase, and DQS input delay. 2220 * - we also do a per-bit deskew on the DQ lines. 2221 */ 2222 static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group, 2223 uint32_t test_bgn) 2224 { 2225 uint32_t p, d, rank_bgn, sr; 2226 uint32_t dtaps_per_ptap; 2227 uint32_t tmp_delay; 2228 uint32_t bit_chk; 2229 uint32_t grp_calibrated; 2230 uint32_t write_group, write_test_bgn; 2231 uint32_t failed_substage; 2232 2233 debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn); 2234 2235 /* update info for sims */ 2236 reg_file_set_stage(CAL_STAGE_VFIFO); 2237 2238 write_group = read_group; 2239 write_test_bgn = test_bgn; 2240 2241 /* USER Determine number of delay taps for each phase tap */ 2242 dtaps_per_ptap = 0; 2243 tmp_delay = 0; 2244 while (tmp_delay < IO_DELAY_PER_OPA_TAP) { 2245 dtaps_per_ptap++; 2246 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 2247 } 2248 dtaps_per_ptap--; 2249 tmp_delay = 0; 2250 2251 /* update info for sims */ 2252 reg_file_set_group(read_group); 2253 2254 grp_calibrated = 0; 2255 2256 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ); 2257 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; 2258 2259 for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) { 2260 /* 2261 * In RLDRAMX we may be messing the delay of pins in 2262 * the same write group but outside of the current read 2263 * the group, but that's ok because we haven't 2264 * calibrated output side yet. 2265 */ 2266 if (d > 0) { 2267 scc_mgr_apply_group_all_out_delay_add_all_ranks 2268 (write_group, write_test_bgn, d); 2269 } 2270 2271 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0; 2272 p++) { 2273 /* set a particular dqdqs phase */ 2274 scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p); 2275 2276 debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \ 2277 p=%u d=%u\n", __func__, __LINE__, 2278 read_group, p, d); 2279 2280 /* 2281 * Load up the patterns used by read calibration 2282 * using current DQDQS phase. 2283 */ 2284 rw_mgr_mem_calibrate_read_load_patterns(0, 1); 2285 if (!(gbl->phy_debug_mode_flags & 2286 PHY_DEBUG_DISABLE_GUARANTEED_READ)) { 2287 if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks 2288 (read_group, 1, &bit_chk)) { 2289 debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:", 2290 __func__, __LINE__); 2291 debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n", 2292 read_group, p, d); 2293 break; 2294 } 2295 } 2296 2297 /* case:56390 */ 2298 grp_calibrated = 1; 2299 if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay 2300 (write_group, read_group, test_bgn)) { 2301 /* 2302 * USER Read per-bit deskew can be done on a 2303 * per shadow register basis. 2304 */ 2305 for (rank_bgn = 0, sr = 0; 2306 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; 2307 rank_bgn += NUM_RANKS_PER_SHADOW_REG, 2308 ++sr) { 2309 /* 2310 * Determine if this set of ranks 2311 * should be skipped entirely. 2312 */ 2313 if (!param->skip_shadow_regs[sr]) { 2314 /* 2315 * If doing read after write 2316 * calibration, do not update 2317 * FOM, now - do it then. 2318 */ 2319 if (!rw_mgr_mem_calibrate_vfifo_center 2320 (rank_bgn, write_group, 2321 read_group, test_bgn, 1, 0)) { 2322 grp_calibrated = 0; 2323 failed_substage = 2324 CAL_SUBSTAGE_VFIFO_CENTER; 2325 } 2326 } 2327 } 2328 } else { 2329 grp_calibrated = 0; 2330 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; 2331 } 2332 } 2333 } 2334 2335 if (grp_calibrated == 0) { 2336 set_failing_group_stage(write_group, CAL_STAGE_VFIFO, 2337 failed_substage); 2338 return 0; 2339 } 2340 2341 /* 2342 * Reset the delay chains back to zero if they have moved > 1 2343 * (check for > 1 because loop will increase d even when pass in 2344 * first case). 2345 */ 2346 if (d > 2) 2347 scc_mgr_zero_group(write_group, write_test_bgn, 1); 2348 2349 return 1; 2350 } 2351 2352 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */ 2353 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group, 2354 uint32_t test_bgn) 2355 { 2356 uint32_t rank_bgn, sr; 2357 uint32_t grp_calibrated; 2358 uint32_t write_group; 2359 2360 debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn); 2361 2362 /* update info for sims */ 2363 2364 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES); 2365 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); 2366 2367 write_group = read_group; 2368 2369 /* update info for sims */ 2370 reg_file_set_group(read_group); 2371 2372 grp_calibrated = 1; 2373 /* Read per-bit deskew can be done on a per shadow register basis */ 2374 for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; 2375 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { 2376 /* Determine if this set of ranks should be skipped entirely */ 2377 if (!param->skip_shadow_regs[sr]) { 2378 /* This is the last calibration round, update FOM here */ 2379 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn, 2380 write_group, 2381 read_group, 2382 test_bgn, 0, 2383 1)) { 2384 grp_calibrated = 0; 2385 } 2386 } 2387 } 2388 2389 2390 if (grp_calibrated == 0) { 2391 set_failing_group_stage(write_group, 2392 CAL_STAGE_VFIFO_AFTER_WRITES, 2393 CAL_SUBSTAGE_VFIFO_CENTER); 2394 return 0; 2395 } 2396 2397 return 1; 2398 } 2399 2400 /* Calibrate LFIFO to find smallest read latency */ 2401 static uint32_t rw_mgr_mem_calibrate_lfifo(void) 2402 { 2403 uint32_t found_one; 2404 uint32_t bit_chk; 2405 2406 debug("%s:%d\n", __func__, __LINE__); 2407 2408 /* update info for sims */ 2409 reg_file_set_stage(CAL_STAGE_LFIFO); 2410 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY); 2411 2412 /* Load up the patterns used by read calibration for all ranks */ 2413 rw_mgr_mem_calibrate_read_load_patterns(0, 1); 2414 found_one = 0; 2415 2416 do { 2417 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 2418 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u", 2419 __func__, __LINE__, gbl->curr_read_lat); 2420 2421 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, 2422 NUM_READ_TESTS, 2423 PASS_ALL_BITS, 2424 &bit_chk, 1)) { 2425 break; 2426 } 2427 2428 found_one = 1; 2429 /* reduce read latency and see if things are working */ 2430 /* correctly */ 2431 gbl->curr_read_lat--; 2432 } while (gbl->curr_read_lat > 0); 2433 2434 /* reset the fifos to get pointers to known state */ 2435 2436 writel(0, &phy_mgr_cmd->fifo_reset); 2437 2438 if (found_one) { 2439 /* add a fudge factor to the read latency that was determined */ 2440 gbl->curr_read_lat += 2; 2441 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 2442 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \ 2443 read_lat=%u\n", __func__, __LINE__, 2444 gbl->curr_read_lat); 2445 return 1; 2446 } else { 2447 set_failing_group_stage(0xff, CAL_STAGE_LFIFO, 2448 CAL_SUBSTAGE_READ_LATENCY); 2449 2450 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \ 2451 read_lat=%u\n", __func__, __LINE__, 2452 gbl->curr_read_lat); 2453 return 0; 2454 } 2455 } 2456 2457 /* 2458 * issue write test command. 2459 * two variants are provided. one that just tests a write pattern and 2460 * another that tests datamask functionality. 2461 */ 2462 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group, 2463 uint32_t test_dm) 2464 { 2465 uint32_t mcc_instruction; 2466 uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) && 2467 ENABLE_SUPER_QUICK_CALIBRATION); 2468 uint32_t rw_wl_nop_cycles; 2469 uint32_t addr; 2470 2471 /* 2472 * Set counter and jump addresses for the right 2473 * number of NOP cycles. 2474 * The number of supported NOP cycles can range from -1 to infinity 2475 * Three different cases are handled: 2476 * 2477 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping 2478 * mechanism will be used to insert the right number of NOPs 2479 * 2480 * 2. For a number of NOP cycles equals to 0, the micro-instruction 2481 * issuing the write command will jump straight to the 2482 * micro-instruction that turns on DQS (for DDRx), or outputs write 2483 * data (for RLD), skipping 2484 * the NOP micro-instruction all together 2485 * 2486 * 3. A number of NOP cycles equal to -1 indicates that DQS must be 2487 * turned on in the same micro-instruction that issues the write 2488 * command. Then we need 2489 * to directly jump to the micro-instruction that sends out the data 2490 * 2491 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters 2492 * (2 and 3). One jump-counter (0) is used to perform multiple 2493 * write-read operations. 2494 * one counter left to issue this command in "multiple-group" mode 2495 */ 2496 2497 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles; 2498 2499 if (rw_wl_nop_cycles == -1) { 2500 /* 2501 * CNTR 2 - We want to execute the special write operation that 2502 * turns on DQS right away and then skip directly to the 2503 * instruction that sends out the data. We set the counter to a 2504 * large number so that the jump is always taken. 2505 */ 2506 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); 2507 2508 /* CNTR 3 - Not used */ 2509 if (test_dm) { 2510 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1; 2511 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA, 2512 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2513 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, 2514 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2515 } else { 2516 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1; 2517 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA, 2518 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2519 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP, 2520 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2521 } 2522 } else if (rw_wl_nop_cycles == 0) { 2523 /* 2524 * CNTR 2 - We want to skip the NOP operation and go straight 2525 * to the DQS enable instruction. We set the counter to a large 2526 * number so that the jump is always taken. 2527 */ 2528 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); 2529 2530 /* CNTR 3 - Not used */ 2531 if (test_dm) { 2532 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0; 2533 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS, 2534 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2535 } else { 2536 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0; 2537 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS, 2538 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2539 } 2540 } else { 2541 /* 2542 * CNTR 2 - In this case we want to execute the next instruction 2543 * and NOT take the jump. So we set the counter to 0. The jump 2544 * address doesn't count. 2545 */ 2546 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2); 2547 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2548 2549 /* 2550 * CNTR 3 - Set the nop counter to the number of cycles we 2551 * need to loop for, minus 1. 2552 */ 2553 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3); 2554 if (test_dm) { 2555 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0; 2556 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, 2557 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2558 } else { 2559 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0; 2560 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP, 2561 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2562 } 2563 } 2564 2565 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 2566 RW_MGR_RESET_READ_DATAPATH_OFFSET); 2567 2568 if (quick_write_mode) 2569 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0); 2570 else 2571 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0); 2572 2573 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 2574 2575 /* 2576 * CNTR 1 - This is used to ensure enough time elapses 2577 * for read data to come back. 2578 */ 2579 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1); 2580 2581 if (test_dm) { 2582 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT, 2583 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 2584 } else { 2585 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT, 2586 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 2587 } 2588 2589 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 2590 writel(mcc_instruction, addr + (group << 2)); 2591 } 2592 2593 /* Test writes, can check for a single bit pass or multiple bit pass */ 2594 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, 2595 uint32_t write_group, uint32_t use_dm, uint32_t all_correct, 2596 uint32_t *bit_chk, uint32_t all_ranks) 2597 { 2598 uint32_t r; 2599 uint32_t correct_mask_vg; 2600 uint32_t tmp_bit_chk; 2601 uint32_t vg; 2602 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 2603 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 2604 uint32_t addr_rw_mgr; 2605 uint32_t base_rw_mgr; 2606 2607 *bit_chk = param->write_correct_mask; 2608 correct_mask_vg = param->write_correct_mask_vg; 2609 2610 for (r = rank_bgn; r < rank_end; r++) { 2611 if (param->skip_ranks[r]) { 2612 /* request to skip the rank */ 2613 continue; 2614 } 2615 2616 /* set rank */ 2617 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 2618 2619 tmp_bit_chk = 0; 2620 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS; 2621 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) { 2622 /* reset the fifos to get pointers to known state */ 2623 writel(0, &phy_mgr_cmd->fifo_reset); 2624 2625 tmp_bit_chk = tmp_bit_chk << 2626 (RW_MGR_MEM_DQ_PER_WRITE_DQS / 2627 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS); 2628 rw_mgr_mem_calibrate_write_test_issue(write_group * 2629 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg, 2630 use_dm); 2631 2632 base_rw_mgr = readl(addr_rw_mgr); 2633 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr)); 2634 if (vg == 0) 2635 break; 2636 } 2637 *bit_chk &= tmp_bit_chk; 2638 } 2639 2640 if (all_correct) { 2641 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 2642 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \ 2643 %u => %lu", write_group, use_dm, 2644 *bit_chk, param->write_correct_mask, 2645 (long unsigned int)(*bit_chk == 2646 param->write_correct_mask)); 2647 return *bit_chk == param->write_correct_mask; 2648 } else { 2649 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 2650 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ", 2651 write_group, use_dm, *bit_chk); 2652 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0, 2653 (long unsigned int)(*bit_chk != 0)); 2654 return *bit_chk != 0x00; 2655 } 2656 } 2657 2658 /* 2659 * center all windows. do per-bit-deskew to possibly increase size of 2660 * certain windows. 2661 */ 2662 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn, 2663 uint32_t write_group, uint32_t test_bgn) 2664 { 2665 uint32_t i, p, min_index; 2666 int32_t d; 2667 /* 2668 * Store these as signed since there are comparisons with 2669 * signed numbers. 2670 */ 2671 uint32_t bit_chk; 2672 uint32_t sticky_bit_chk; 2673 int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; 2674 int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; 2675 int32_t mid; 2676 int32_t mid_min, orig_mid_min; 2677 int32_t new_dqs, start_dqs, shift_dq; 2678 int32_t dq_margin, dqs_margin, dm_margin; 2679 uint32_t stop; 2680 uint32_t temp_dq_out1_delay; 2681 uint32_t addr; 2682 2683 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn); 2684 2685 dm_margin = 0; 2686 2687 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET; 2688 start_dqs = readl(addr + 2689 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2)); 2690 2691 /* per-bit deskew */ 2692 2693 /* 2694 * set the left and right edge of each bit to an illegal value 2695 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value. 2696 */ 2697 sticky_bit_chk = 0; 2698 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2699 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2700 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2701 } 2702 2703 /* Search for the left edge of the window for each bit */ 2704 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) { 2705 scc_mgr_apply_group_dq_out1_delay(write_group, d); 2706 2707 writel(0, &sdr_scc_mgr->update); 2708 2709 /* 2710 * Stop searching when the read test doesn't pass AND when 2711 * we've seen a passing read on every bit. 2712 */ 2713 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 2714 0, PASS_ONE_BIT, &bit_chk, 0); 2715 sticky_bit_chk = sticky_bit_chk | bit_chk; 2716 stop = stop && (sticky_bit_chk == param->write_correct_mask); 2717 debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \ 2718 == %u && %u [bit_chk= %u ]\n", 2719 d, sticky_bit_chk, param->write_correct_mask, 2720 stop, bit_chk); 2721 2722 if (stop == 1) { 2723 break; 2724 } else { 2725 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2726 if (bit_chk & 1) { 2727 /* 2728 * Remember a passing test as the 2729 * left_edge. 2730 */ 2731 left_edge[i] = d; 2732 } else { 2733 /* 2734 * If a left edge has not been seen 2735 * yet, then a future passing test will 2736 * mark this edge as the right edge. 2737 */ 2738 if (left_edge[i] == 2739 IO_IO_OUT1_DELAY_MAX + 1) { 2740 right_edge[i] = -(d + 1); 2741 } 2742 } 2743 debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d); 2744 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d", 2745 (int)(bit_chk & 1), i, left_edge[i]); 2746 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i, 2747 right_edge[i]); 2748 bit_chk = bit_chk >> 1; 2749 } 2750 } 2751 } 2752 2753 /* Reset DQ delay chains to 0 */ 2754 scc_mgr_apply_group_dq_out1_delay(write_group, 0); 2755 sticky_bit_chk = 0; 2756 for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) { 2757 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \ 2758 %d right_edge[%u]: %d\n", __func__, __LINE__, 2759 i, left_edge[i], i, right_edge[i]); 2760 2761 /* 2762 * Check for cases where we haven't found the left edge, 2763 * which makes our assignment of the the right edge invalid. 2764 * Reset it to the illegal value. 2765 */ 2766 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) && 2767 (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) { 2768 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2769 debug_cond(DLEVEL == 2, "%s:%d write_center: reset \ 2770 right_edge[%u]: %d\n", __func__, __LINE__, 2771 i, right_edge[i]); 2772 } 2773 2774 /* 2775 * Reset sticky bit (except for bits where we have 2776 * seen the left edge). 2777 */ 2778 sticky_bit_chk = sticky_bit_chk << 1; 2779 if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) 2780 sticky_bit_chk = sticky_bit_chk | 1; 2781 2782 if (i == 0) 2783 break; 2784 } 2785 2786 /* Search for the right edge of the window for each bit */ 2787 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) { 2788 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, 2789 d + start_dqs); 2790 2791 writel(0, &sdr_scc_mgr->update); 2792 2793 /* 2794 * Stop searching when the read test doesn't pass AND when 2795 * we've seen a passing read on every bit. 2796 */ 2797 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 2798 0, PASS_ONE_BIT, &bit_chk, 0); 2799 2800 sticky_bit_chk = sticky_bit_chk | bit_chk; 2801 stop = stop && (sticky_bit_chk == param->write_correct_mask); 2802 2803 debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \ 2804 %u && %u\n", d, sticky_bit_chk, 2805 param->write_correct_mask, stop); 2806 2807 if (stop == 1) { 2808 if (d == 0) { 2809 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; 2810 i++) { 2811 /* d = 0 failed, but it passed when 2812 testing the left edge, so it must be 2813 marginal, set it to -1 */ 2814 if (right_edge[i] == 2815 IO_IO_OUT1_DELAY_MAX + 1 && 2816 left_edge[i] != 2817 IO_IO_OUT1_DELAY_MAX + 1) { 2818 right_edge[i] = -1; 2819 } 2820 } 2821 } 2822 break; 2823 } else { 2824 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2825 if (bit_chk & 1) { 2826 /* 2827 * Remember a passing test as 2828 * the right_edge. 2829 */ 2830 right_edge[i] = d; 2831 } else { 2832 if (d != 0) { 2833 /* 2834 * If a right edge has not 2835 * been seen yet, then a future 2836 * passing test will mark this 2837 * edge as the left edge. 2838 */ 2839 if (right_edge[i] == 2840 IO_IO_OUT1_DELAY_MAX + 1) 2841 left_edge[i] = -(d + 1); 2842 } else { 2843 /* 2844 * d = 0 failed, but it passed 2845 * when testing the left edge, 2846 * so it must be marginal, set 2847 * it to -1. 2848 */ 2849 if (right_edge[i] == 2850 IO_IO_OUT1_DELAY_MAX + 1 && 2851 left_edge[i] != 2852 IO_IO_OUT1_DELAY_MAX + 1) 2853 right_edge[i] = -1; 2854 /* 2855 * If a right edge has not been 2856 * seen yet, then a future 2857 * passing test will mark this 2858 * edge as the left edge. 2859 */ 2860 else if (right_edge[i] == 2861 IO_IO_OUT1_DELAY_MAX + 2862 1) 2863 left_edge[i] = -(d + 1); 2864 } 2865 } 2866 debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d); 2867 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d", 2868 (int)(bit_chk & 1), i, left_edge[i]); 2869 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i, 2870 right_edge[i]); 2871 bit_chk = bit_chk >> 1; 2872 } 2873 } 2874 } 2875 2876 /* Check that all bits have a window */ 2877 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2878 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \ 2879 %d right_edge[%u]: %d", __func__, __LINE__, 2880 i, left_edge[i], i, right_edge[i]); 2881 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) || 2882 (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) { 2883 set_failing_group_stage(test_bgn + i, 2884 CAL_STAGE_WRITES, 2885 CAL_SUBSTAGE_WRITES_CENTER); 2886 return 0; 2887 } 2888 } 2889 2890 /* Find middle of window for each DQ bit */ 2891 mid_min = left_edge[0] - right_edge[0]; 2892 min_index = 0; 2893 for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2894 mid = left_edge[i] - right_edge[i]; 2895 if (mid < mid_min) { 2896 mid_min = mid; 2897 min_index = i; 2898 } 2899 } 2900 2901 /* 2902 * -mid_min/2 represents the amount that we need to move DQS. 2903 * If mid_min is odd and positive we'll need to add one to 2904 * make sure the rounding in further calculations is correct 2905 * (always bias to the right), so just add 1 for all positive values. 2906 */ 2907 if (mid_min > 0) 2908 mid_min++; 2909 mid_min = mid_min / 2; 2910 debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__, 2911 __LINE__, mid_min); 2912 2913 /* Determine the amount we can change DQS (which is -mid_min) */ 2914 orig_mid_min = mid_min; 2915 new_dqs = start_dqs; 2916 mid_min = 0; 2917 debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \ 2918 mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min); 2919 /* Initialize data for export structures */ 2920 dqs_margin = IO_IO_OUT1_DELAY_MAX + 1; 2921 dq_margin = IO_IO_OUT1_DELAY_MAX + 1; 2922 2923 /* add delay to bring centre of all DQ windows to the same "level" */ 2924 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { 2925 /* Use values before divide by 2 to reduce round off error */ 2926 shift_dq = (left_edge[i] - right_edge[i] - 2927 (left_edge[min_index] - right_edge[min_index]))/2 + 2928 (orig_mid_min - mid_min); 2929 2930 debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \ 2931 [%u]=%d\n", __func__, __LINE__, i, shift_dq); 2932 2933 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET; 2934 temp_dq_out1_delay = readl(addr + (i << 2)); 2935 if (shift_dq + (int32_t)temp_dq_out1_delay > 2936 (int32_t)IO_IO_OUT1_DELAY_MAX) { 2937 shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay; 2938 } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) { 2939 shift_dq = -(int32_t)temp_dq_out1_delay; 2940 } 2941 debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n", 2942 i, shift_dq); 2943 scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq); 2944 scc_mgr_load_dq(i); 2945 2946 debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i, 2947 left_edge[i] - shift_dq + (-mid_min), 2948 right_edge[i] + shift_dq - (-mid_min)); 2949 /* To determine values for export structures */ 2950 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin) 2951 dq_margin = left_edge[i] - shift_dq + (-mid_min); 2952 2953 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin) 2954 dqs_margin = right_edge[i] + shift_dq - (-mid_min); 2955 } 2956 2957 /* Move DQS */ 2958 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); 2959 writel(0, &sdr_scc_mgr->update); 2960 2961 /* Centre DM */ 2962 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__); 2963 2964 /* 2965 * set the left and right edge of each bit to an illegal value, 2966 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value, 2967 */ 2968 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; 2969 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; 2970 int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 2971 int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1; 2972 int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1; 2973 int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1; 2974 int32_t win_best = 0; 2975 2976 /* Search for the/part of the window with DM shift */ 2977 for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) { 2978 scc_mgr_apply_group_dm_out1_delay(write_group, d); 2979 writel(0, &sdr_scc_mgr->update); 2980 2981 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, 2982 PASS_ALL_BITS, &bit_chk, 2983 0)) { 2984 /* USE Set current end of the window */ 2985 end_curr = -d; 2986 /* 2987 * If a starting edge of our window has not been seen 2988 * this is our current start of the DM window. 2989 */ 2990 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) 2991 bgn_curr = -d; 2992 2993 /* 2994 * If current window is bigger than best seen. 2995 * Set best seen to be current window. 2996 */ 2997 if ((end_curr-bgn_curr+1) > win_best) { 2998 win_best = end_curr-bgn_curr+1; 2999 bgn_best = bgn_curr; 3000 end_best = end_curr; 3001 } 3002 } else { 3003 /* We just saw a failing test. Reset temp edge */ 3004 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 3005 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 3006 } 3007 } 3008 3009 3010 /* Reset DM delay chains to 0 */ 3011 scc_mgr_apply_group_dm_out1_delay(write_group, 0); 3012 3013 /* 3014 * Check to see if the current window nudges up aganist 0 delay. 3015 * If so we need to continue the search by shifting DQS otherwise DQS 3016 * search begins as a new search. */ 3017 if (end_curr != 0) { 3018 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 3019 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 3020 } 3021 3022 /* Search for the/part of the window with DQS shifts */ 3023 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) { 3024 /* 3025 * Note: This only shifts DQS, so are we limiting ourselve to 3026 * width of DQ unnecessarily. 3027 */ 3028 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, 3029 d + new_dqs); 3030 3031 writel(0, &sdr_scc_mgr->update); 3032 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, 3033 PASS_ALL_BITS, &bit_chk, 3034 0)) { 3035 /* USE Set current end of the window */ 3036 end_curr = d; 3037 /* 3038 * If a beginning edge of our window has not been seen 3039 * this is our current begin of the DM window. 3040 */ 3041 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) 3042 bgn_curr = d; 3043 3044 /* 3045 * If current window is bigger than best seen. Set best 3046 * seen to be current window. 3047 */ 3048 if ((end_curr-bgn_curr+1) > win_best) { 3049 win_best = end_curr-bgn_curr+1; 3050 bgn_best = bgn_curr; 3051 end_best = end_curr; 3052 } 3053 } else { 3054 /* We just saw a failing test. Reset temp edge */ 3055 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 3056 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 3057 3058 /* Early exit optimization: if ther remaining delay 3059 chain space is less than already seen largest window 3060 we can exit */ 3061 if ((win_best-1) > 3062 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) { 3063 break; 3064 } 3065 } 3066 } 3067 3068 /* assign left and right edge for cal and reporting; */ 3069 left_edge[0] = -1*bgn_best; 3070 right_edge[0] = end_best; 3071 3072 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__, 3073 __LINE__, left_edge[0], right_edge[0]); 3074 3075 /* Move DQS (back to orig) */ 3076 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); 3077 3078 /* Move DM */ 3079 3080 /* Find middle of window for the DM bit */ 3081 mid = (left_edge[0] - right_edge[0]) / 2; 3082 3083 /* only move right, since we are not moving DQS/DQ */ 3084 if (mid < 0) 3085 mid = 0; 3086 3087 /* dm_marign should fail if we never find a window */ 3088 if (win_best == 0) 3089 dm_margin = -1; 3090 else 3091 dm_margin = left_edge[0] - mid; 3092 3093 scc_mgr_apply_group_dm_out1_delay(write_group, mid); 3094 writel(0, &sdr_scc_mgr->update); 3095 3096 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \ 3097 dm_margin=%d\n", __func__, __LINE__, left_edge[0], 3098 right_edge[0], mid, dm_margin); 3099 /* Export values */ 3100 gbl->fom_out += dq_margin + dqs_margin; 3101 3102 debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \ 3103 dqs_margin=%d dm_margin=%d\n", __func__, __LINE__, 3104 dq_margin, dqs_margin, dm_margin); 3105 3106 /* 3107 * Do not remove this line as it makes sure all of our 3108 * decisions have been applied. 3109 */ 3110 writel(0, &sdr_scc_mgr->update); 3111 return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0); 3112 } 3113 3114 /* calibrate the write operations */ 3115 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g, 3116 uint32_t test_bgn) 3117 { 3118 /* update info for sims */ 3119 debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn); 3120 3121 reg_file_set_stage(CAL_STAGE_WRITES); 3122 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER); 3123 3124 reg_file_set_group(g); 3125 3126 if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) { 3127 set_failing_group_stage(g, CAL_STAGE_WRITES, 3128 CAL_SUBSTAGE_WRITES_CENTER); 3129 return 0; 3130 } 3131 3132 return 1; 3133 } 3134 3135 /* precharge all banks and activate row 0 in bank "000..." and bank "111..." */ 3136 static void mem_precharge_and_activate(void) 3137 { 3138 uint32_t r; 3139 3140 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { 3141 if (param->skip_ranks[r]) { 3142 /* request to skip the rank */ 3143 continue; 3144 } 3145 3146 /* set rank */ 3147 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 3148 3149 /* precharge all banks ... */ 3150 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS | 3151 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 3152 3153 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0); 3154 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1, 3155 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 3156 3157 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1); 3158 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2, 3159 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 3160 3161 /* activate rows */ 3162 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS | 3163 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 3164 } 3165 } 3166 3167 /* Configure various memory related parameters. */ 3168 static void mem_config(void) 3169 { 3170 uint32_t rlat, wlat; 3171 uint32_t rw_wl_nop_cycles; 3172 uint32_t max_latency; 3173 3174 debug("%s:%d\n", __func__, __LINE__); 3175 /* read in write and read latency */ 3176 wlat = readl(&data_mgr->t_wl_add); 3177 wlat += readl(&data_mgr->mem_t_add); 3178 3179 /* WL for hard phy does not include additive latency */ 3180 3181 /* 3182 * add addtional write latency to offset the address/command extra 3183 * clock cycle. We change the AC mux setting causing AC to be delayed 3184 * by one mem clock cycle. Only do this for DDR3 3185 */ 3186 wlat = wlat + 1; 3187 3188 rlat = readl(&data_mgr->t_rl_add); 3189 3190 rw_wl_nop_cycles = wlat - 2; 3191 gbl->rw_wl_nop_cycles = rw_wl_nop_cycles; 3192 3193 /* 3194 * For AV/CV, lfifo is hardened and always runs at full rate so 3195 * max latency in AFI clocks, used here, is correspondingly smaller. 3196 */ 3197 max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1; 3198 /* configure for a burst length of 8 */ 3199 3200 /* write latency */ 3201 /* Adjust Write Latency for Hard PHY */ 3202 wlat = wlat + 1; 3203 3204 /* set a pretty high read latency initially */ 3205 gbl->curr_read_lat = rlat + 16; 3206 3207 if (gbl->curr_read_lat > max_latency) 3208 gbl->curr_read_lat = max_latency; 3209 3210 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 3211 3212 /* advertise write latency */ 3213 gbl->curr_write_lat = wlat; 3214 writel(wlat - 2, &phy_mgr_cfg->afi_wlat); 3215 3216 /* initialize bit slips */ 3217 mem_precharge_and_activate(); 3218 } 3219 3220 /* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */ 3221 static void mem_skip_calibrate(void) 3222 { 3223 uint32_t vfifo_offset; 3224 uint32_t i, j, r; 3225 3226 debug("%s:%d\n", __func__, __LINE__); 3227 /* Need to update every shadow register set used by the interface */ 3228 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 3229 r += NUM_RANKS_PER_SHADOW_REG) { 3230 /* 3231 * Set output phase alignment settings appropriate for 3232 * skip calibration. 3233 */ 3234 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3235 scc_mgr_set_dqs_en_phase(i, 0); 3236 #if IO_DLL_CHAIN_LENGTH == 6 3237 scc_mgr_set_dqdqs_output_phase(i, 6); 3238 #else 3239 scc_mgr_set_dqdqs_output_phase(i, 7); 3240 #endif 3241 /* 3242 * Case:33398 3243 * 3244 * Write data arrives to the I/O two cycles before write 3245 * latency is reached (720 deg). 3246 * -> due to bit-slip in a/c bus 3247 * -> to allow board skew where dqs is longer than ck 3248 * -> how often can this happen!? 3249 * -> can claim back some ptaps for high freq 3250 * support if we can relax this, but i digress... 3251 * 3252 * The write_clk leads mem_ck by 90 deg 3253 * The minimum ptap of the OPA is 180 deg 3254 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay 3255 * The write_clk is always delayed by 2 ptaps 3256 * 3257 * Hence, to make DQS aligned to CK, we need to delay 3258 * DQS by: 3259 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH)) 3260 * 3261 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH) 3262 * gives us the number of ptaps, which simplies to: 3263 * 3264 * (1.25 * IO_DLL_CHAIN_LENGTH - 2) 3265 */ 3266 scc_mgr_set_dqdqs_output_phase(i, (1.25 * 3267 IO_DLL_CHAIN_LENGTH - 2)); 3268 } 3269 writel(0xff, &sdr_scc_mgr->dqs_ena); 3270 writel(0xff, &sdr_scc_mgr->dqs_io_ena); 3271 3272 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { 3273 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | 3274 SCC_MGR_GROUP_COUNTER_OFFSET); 3275 } 3276 writel(0xff, &sdr_scc_mgr->dq_ena); 3277 writel(0xff, &sdr_scc_mgr->dm_ena); 3278 writel(0, &sdr_scc_mgr->update); 3279 } 3280 3281 /* Compensate for simulation model behaviour */ 3282 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3283 scc_mgr_set_dqs_bus_in_delay(i, 10); 3284 scc_mgr_load_dqs(i); 3285 } 3286 writel(0, &sdr_scc_mgr->update); 3287 3288 /* 3289 * ArriaV has hard FIFOs that can only be initialized by incrementing 3290 * in sequencer. 3291 */ 3292 vfifo_offset = CALIB_VFIFO_OFFSET; 3293 for (j = 0; j < vfifo_offset; j++) { 3294 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy); 3295 } 3296 writel(0, &phy_mgr_cmd->fifo_reset); 3297 3298 /* 3299 * For ACV with hard lfifo, we get the skip-cal setting from 3300 * generation-time constant. 3301 */ 3302 gbl->curr_read_lat = CALIB_LFIFO_OFFSET; 3303 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 3304 } 3305 3306 /* Memory calibration entry point */ 3307 static uint32_t mem_calibrate(void) 3308 { 3309 uint32_t i; 3310 uint32_t rank_bgn, sr; 3311 uint32_t write_group, write_test_bgn; 3312 uint32_t read_group, read_test_bgn; 3313 uint32_t run_groups, current_run; 3314 uint32_t failing_groups = 0; 3315 uint32_t group_failed = 0; 3316 uint32_t sr_failed = 0; 3317 3318 debug("%s:%d\n", __func__, __LINE__); 3319 /* Initialize the data settings */ 3320 3321 gbl->error_substage = CAL_SUBSTAGE_NIL; 3322 gbl->error_stage = CAL_STAGE_NIL; 3323 gbl->error_group = 0xff; 3324 gbl->fom_in = 0; 3325 gbl->fom_out = 0; 3326 3327 mem_config(); 3328 3329 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3330 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | 3331 SCC_MGR_GROUP_COUNTER_OFFSET); 3332 scc_set_bypass_mode(i); 3333 } 3334 3335 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) { 3336 /* 3337 * Set VFIFO and LFIFO to instant-on settings in skip 3338 * calibration mode. 3339 */ 3340 mem_skip_calibrate(); 3341 } else { 3342 for (i = 0; i < NUM_CALIB_REPEAT; i++) { 3343 /* 3344 * Zero all delay chain/phase settings for all 3345 * groups and all shadow register sets. 3346 */ 3347 scc_mgr_zero_all(); 3348 3349 run_groups = ~param->skip_groups; 3350 3351 for (write_group = 0, write_test_bgn = 0; write_group 3352 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++, 3353 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) { 3354 /* Initialized the group failure */ 3355 group_failed = 0; 3356 3357 current_run = run_groups & ((1 << 3358 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1); 3359 run_groups = run_groups >> 3360 RW_MGR_NUM_DQS_PER_WRITE_GROUP; 3361 3362 if (current_run == 0) 3363 continue; 3364 3365 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS | 3366 SCC_MGR_GROUP_COUNTER_OFFSET); 3367 scc_mgr_zero_group(write_group, write_test_bgn, 3368 0); 3369 3370 for (read_group = write_group * 3371 RW_MGR_MEM_IF_READ_DQS_WIDTH / 3372 RW_MGR_MEM_IF_WRITE_DQS_WIDTH, 3373 read_test_bgn = 0; 3374 read_group < (write_group + 1) * 3375 RW_MGR_MEM_IF_READ_DQS_WIDTH / 3376 RW_MGR_MEM_IF_WRITE_DQS_WIDTH && 3377 group_failed == 0; 3378 read_group++, read_test_bgn += 3379 RW_MGR_MEM_DQ_PER_READ_DQS) { 3380 /* Calibrate the VFIFO */ 3381 if (!((STATIC_CALIB_STEPS) & 3382 CALIB_SKIP_VFIFO)) { 3383 if (!rw_mgr_mem_calibrate_vfifo 3384 (read_group, 3385 read_test_bgn)) { 3386 group_failed = 1; 3387 3388 if (!(gbl-> 3389 phy_debug_mode_flags & 3390 PHY_DEBUG_SWEEP_ALL_GROUPS)) { 3391 return 0; 3392 } 3393 } 3394 } 3395 } 3396 3397 /* Calibrate the output side */ 3398 if (group_failed == 0) { 3399 for (rank_bgn = 0, sr = 0; rank_bgn 3400 < RW_MGR_MEM_NUMBER_OF_RANKS; 3401 rank_bgn += 3402 NUM_RANKS_PER_SHADOW_REG, 3403 ++sr) { 3404 sr_failed = 0; 3405 if (!((STATIC_CALIB_STEPS) & 3406 CALIB_SKIP_WRITES)) { 3407 if ((STATIC_CALIB_STEPS) 3408 & CALIB_SKIP_DELAY_SWEEPS) { 3409 /* not needed in quick mode! */ 3410 } else { 3411 /* 3412 * Determine if this set of 3413 * ranks should be skipped 3414 * entirely. 3415 */ 3416 if (!param->skip_shadow_regs[sr]) { 3417 if (!rw_mgr_mem_calibrate_writes 3418 (rank_bgn, write_group, 3419 write_test_bgn)) { 3420 sr_failed = 1; 3421 if (!(gbl-> 3422 phy_debug_mode_flags & 3423 PHY_DEBUG_SWEEP_ALL_GROUPS)) { 3424 return 0; 3425 } 3426 } 3427 } 3428 } 3429 } 3430 if (sr_failed != 0) 3431 group_failed = 1; 3432 } 3433 } 3434 3435 if (group_failed == 0) { 3436 for (read_group = write_group * 3437 RW_MGR_MEM_IF_READ_DQS_WIDTH / 3438 RW_MGR_MEM_IF_WRITE_DQS_WIDTH, 3439 read_test_bgn = 0; 3440 read_group < (write_group + 1) 3441 * RW_MGR_MEM_IF_READ_DQS_WIDTH 3442 / RW_MGR_MEM_IF_WRITE_DQS_WIDTH && 3443 group_failed == 0; 3444 read_group++, read_test_bgn += 3445 RW_MGR_MEM_DQ_PER_READ_DQS) { 3446 if (!((STATIC_CALIB_STEPS) & 3447 CALIB_SKIP_WRITES)) { 3448 if (!rw_mgr_mem_calibrate_vfifo_end 3449 (read_group, read_test_bgn)) { 3450 group_failed = 1; 3451 3452 if (!(gbl->phy_debug_mode_flags 3453 & PHY_DEBUG_SWEEP_ALL_GROUPS)) { 3454 return 0; 3455 } 3456 } 3457 } 3458 } 3459 } 3460 3461 if (group_failed != 0) 3462 failing_groups++; 3463 } 3464 3465 /* 3466 * USER If there are any failing groups then report 3467 * the failure. 3468 */ 3469 if (failing_groups != 0) 3470 return 0; 3471 3472 /* Calibrate the LFIFO */ 3473 if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) { 3474 /* 3475 * If we're skipping groups as part of debug, 3476 * don't calibrate LFIFO. 3477 */ 3478 if (param->skip_groups == 0) { 3479 if (!rw_mgr_mem_calibrate_lfifo()) 3480 return 0; 3481 } 3482 } 3483 } 3484 } 3485 3486 /* 3487 * Do not remove this line as it makes sure all of our decisions 3488 * have been applied. 3489 */ 3490 writel(0, &sdr_scc_mgr->update); 3491 return 1; 3492 } 3493 3494 static uint32_t run_mem_calibrate(void) 3495 { 3496 uint32_t pass; 3497 uint32_t debug_info; 3498 3499 debug("%s:%d\n", __func__, __LINE__); 3500 3501 /* Reset pass/fail status shown on afi_cal_success/fail */ 3502 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status); 3503 3504 /* stop tracking manger */ 3505 uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg); 3506 3507 writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg); 3508 3509 initialize(); 3510 rw_mgr_mem_initialize(); 3511 3512 pass = mem_calibrate(); 3513 3514 mem_precharge_and_activate(); 3515 writel(0, &phy_mgr_cmd->fifo_reset); 3516 3517 /* 3518 * Handoff: 3519 * Don't return control of the PHY back to AFI when in debug mode. 3520 */ 3521 if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) { 3522 rw_mgr_mem_handoff(); 3523 /* 3524 * In Hard PHY this is a 2-bit control: 3525 * 0: AFI Mux Select 3526 * 1: DDIO Mux Select 3527 */ 3528 writel(0x2, &phy_mgr_cfg->mux_sel); 3529 } 3530 3531 writel(ctrlcfg, &sdr_ctrl->ctrl_cfg); 3532 3533 if (pass) { 3534 printf("%s: CALIBRATION PASSED\n", __FILE__); 3535 3536 gbl->fom_in /= 2; 3537 gbl->fom_out /= 2; 3538 3539 if (gbl->fom_in > 0xff) 3540 gbl->fom_in = 0xff; 3541 3542 if (gbl->fom_out > 0xff) 3543 gbl->fom_out = 0xff; 3544 3545 /* Update the FOM in the register file */ 3546 debug_info = gbl->fom_in; 3547 debug_info |= gbl->fom_out << 8; 3548 writel(debug_info, &sdr_reg_file->fom); 3549 3550 writel(debug_info, &phy_mgr_cfg->cal_debug_info); 3551 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status); 3552 } else { 3553 printf("%s: CALIBRATION FAILED\n", __FILE__); 3554 3555 debug_info = gbl->error_stage; 3556 debug_info |= gbl->error_substage << 8; 3557 debug_info |= gbl->error_group << 16; 3558 3559 writel(debug_info, &sdr_reg_file->failing_stage); 3560 writel(debug_info, &phy_mgr_cfg->cal_debug_info); 3561 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status); 3562 3563 /* Update the failing group/stage in the register file */ 3564 debug_info = gbl->error_stage; 3565 debug_info |= gbl->error_substage << 8; 3566 debug_info |= gbl->error_group << 16; 3567 writel(debug_info, &sdr_reg_file->failing_stage); 3568 } 3569 3570 return pass; 3571 } 3572 3573 /** 3574 * hc_initialize_rom_data() - Initialize ROM data 3575 * 3576 * Initialize ROM data. 3577 */ 3578 static void hc_initialize_rom_data(void) 3579 { 3580 u32 i, addr; 3581 3582 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET; 3583 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++) 3584 writel(inst_rom_init[i], addr + (i << 2)); 3585 3586 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET; 3587 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++) 3588 writel(ac_rom_init[i], addr + (i << 2)); 3589 } 3590 3591 /** 3592 * initialize_reg_file() - Initialize SDR register file 3593 * 3594 * Initialize SDR register file. 3595 */ 3596 static void initialize_reg_file(void) 3597 { 3598 /* Initialize the register file with the correct data */ 3599 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature); 3600 writel(0, &sdr_reg_file->debug_data_addr); 3601 writel(0, &sdr_reg_file->cur_stage); 3602 writel(0, &sdr_reg_file->fom); 3603 writel(0, &sdr_reg_file->failing_stage); 3604 writel(0, &sdr_reg_file->debug1); 3605 writel(0, &sdr_reg_file->debug2); 3606 } 3607 3608 /** 3609 * initialize_hps_phy() - Initialize HPS PHY 3610 * 3611 * Initialize HPS PHY. 3612 */ 3613 static void initialize_hps_phy(void) 3614 { 3615 uint32_t reg; 3616 /* 3617 * Tracking also gets configured here because it's in the 3618 * same register. 3619 */ 3620 uint32_t trk_sample_count = 7500; 3621 uint32_t trk_long_idle_sample_count = (10 << 16) | 100; 3622 /* 3623 * Format is number of outer loops in the 16 MSB, sample 3624 * count in 16 LSB. 3625 */ 3626 3627 reg = 0; 3628 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2); 3629 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1); 3630 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1); 3631 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1); 3632 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0); 3633 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1); 3634 /* 3635 * This field selects the intrinsic latency to RDATA_EN/FULL path. 3636 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles. 3637 */ 3638 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0); 3639 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET( 3640 trk_sample_count); 3641 writel(reg, &sdr_ctrl->phy_ctrl0); 3642 3643 reg = 0; 3644 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET( 3645 trk_sample_count >> 3646 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH); 3647 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET( 3648 trk_long_idle_sample_count); 3649 writel(reg, &sdr_ctrl->phy_ctrl1); 3650 3651 reg = 0; 3652 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET( 3653 trk_long_idle_sample_count >> 3654 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH); 3655 writel(reg, &sdr_ctrl->phy_ctrl2); 3656 } 3657 3658 static void initialize_tracking(void) 3659 { 3660 uint32_t concatenated_longidle = 0x0; 3661 uint32_t concatenated_delays = 0x0; 3662 uint32_t concatenated_rw_addr = 0x0; 3663 uint32_t concatenated_refresh = 0x0; 3664 uint32_t trk_sample_count = 7500; 3665 uint32_t dtaps_per_ptap; 3666 uint32_t tmp_delay; 3667 3668 /* 3669 * compute usable version of value in case we skip full 3670 * computation later 3671 */ 3672 dtaps_per_ptap = 0; 3673 tmp_delay = 0; 3674 while (tmp_delay < IO_DELAY_PER_OPA_TAP) { 3675 dtaps_per_ptap++; 3676 tmp_delay += IO_DELAY_PER_DCHAIN_TAP; 3677 } 3678 dtaps_per_ptap--; 3679 3680 concatenated_longidle = concatenated_longidle ^ 10; 3681 /*longidle outer loop */ 3682 concatenated_longidle = concatenated_longidle << 16; 3683 concatenated_longidle = concatenated_longidle ^ 100; 3684 /*longidle sample count */ 3685 concatenated_delays = concatenated_delays ^ 243; 3686 /* trfc, worst case of 933Mhz 4Gb */ 3687 concatenated_delays = concatenated_delays << 8; 3688 concatenated_delays = concatenated_delays ^ 14; 3689 /* trcd, worst case */ 3690 concatenated_delays = concatenated_delays << 8; 3691 concatenated_delays = concatenated_delays ^ 10; 3692 /* vfifo wait */ 3693 concatenated_delays = concatenated_delays << 8; 3694 concatenated_delays = concatenated_delays ^ 4; 3695 /* mux delay */ 3696 3697 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE; 3698 concatenated_rw_addr = concatenated_rw_addr << 8; 3699 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1; 3700 concatenated_rw_addr = concatenated_rw_addr << 8; 3701 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ; 3702 concatenated_rw_addr = concatenated_rw_addr << 8; 3703 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL; 3704 3705 concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL; 3706 concatenated_refresh = concatenated_refresh << 24; 3707 concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */ 3708 3709 /* Initialize the register file with the correct data */ 3710 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap); 3711 writel(trk_sample_count, &sdr_reg_file->trk_sample_count); 3712 writel(concatenated_longidle, &sdr_reg_file->trk_longidle); 3713 writel(concatenated_delays, &sdr_reg_file->delays); 3714 writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr); 3715 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width); 3716 writel(concatenated_refresh, &sdr_reg_file->trk_rfsh); 3717 } 3718 3719 int sdram_calibration_full(void) 3720 { 3721 struct param_type my_param; 3722 struct gbl_type my_gbl; 3723 uint32_t pass; 3724 uint32_t i; 3725 3726 param = &my_param; 3727 gbl = &my_gbl; 3728 3729 /* Initialize the debug mode flags */ 3730 gbl->phy_debug_mode_flags = 0; 3731 /* Set the calibration enabled by default */ 3732 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT; 3733 /* 3734 * Only sweep all groups (regardless of fail state) by default 3735 * Set enabled read test by default. 3736 */ 3737 #if DISABLE_GUARANTEED_READ 3738 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ; 3739 #endif 3740 /* Initialize the register file */ 3741 initialize_reg_file(); 3742 3743 /* Initialize any PHY CSR */ 3744 initialize_hps_phy(); 3745 3746 scc_mgr_initialize(); 3747 3748 initialize_tracking(); 3749 3750 /* USER Enable all ranks, groups */ 3751 for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++) 3752 param->skip_ranks[i] = 0; 3753 for (i = 0; i < NUM_SHADOW_REGS; ++i) 3754 param->skip_shadow_regs[i] = 0; 3755 param->skip_groups = 0; 3756 3757 printf("%s: Preparing to start memory calibration\n", __FILE__); 3758 3759 debug("%s:%d\n", __func__, __LINE__); 3760 debug_cond(DLEVEL == 1, 3761 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ", 3762 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM, 3763 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS, 3764 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS, 3765 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS); 3766 debug_cond(DLEVEL == 1, 3767 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ", 3768 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH, 3769 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH, 3770 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP); 3771 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u", 3772 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH); 3773 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ", 3774 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX, 3775 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX); 3776 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ", 3777 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX, 3778 IO_IO_OUT2_DELAY_MAX); 3779 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n", 3780 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE); 3781 3782 hc_initialize_rom_data(); 3783 3784 /* update info for sims */ 3785 reg_file_set_stage(CAL_STAGE_NIL); 3786 reg_file_set_group(0); 3787 3788 /* 3789 * Load global needed for those actions that require 3790 * some dynamic calibration support. 3791 */ 3792 dyn_calib_steps = STATIC_CALIB_STEPS; 3793 /* 3794 * Load global to allow dynamic selection of delay loop settings 3795 * based on calibration mode. 3796 */ 3797 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS)) 3798 skip_delay_mask = 0xff; 3799 else 3800 skip_delay_mask = 0x0; 3801 3802 pass = run_mem_calibrate(); 3803 3804 printf("%s: Calibration complete\n", __FILE__); 3805 return pass; 3806 } 3807