1 /* 2 * Copyright Altera Corporation (C) 2012-2015 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <asm/arch/sdram.h> 10 #include <errno.h> 11 #include "sequencer.h" 12 #include "sequencer_auto.h" 13 #include "sequencer_auto_ac_init.h" 14 #include "sequencer_auto_inst_init.h" 15 #include "sequencer_defines.h" 16 17 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs = 18 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800); 19 20 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs = 21 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00); 22 23 static struct socfpga_sdr_reg_file *sdr_reg_file = 24 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS; 25 26 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr = 27 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00); 28 29 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd = 30 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS; 31 32 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg = 33 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40); 34 35 static struct socfpga_data_mgr *data_mgr = 36 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS; 37 38 static struct socfpga_sdr_ctrl *sdr_ctrl = 39 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS; 40 41 #define DELTA_D 1 42 43 /* 44 * In order to reduce ROM size, most of the selectable calibration steps are 45 * decided at compile time based on the user's calibration mode selection, 46 * as captured by the STATIC_CALIB_STEPS selection below. 47 * 48 * However, to support simulation-time selection of fast simulation mode, where 49 * we skip everything except the bare minimum, we need a few of the steps to 50 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the 51 * check, which is based on the rtl-supplied value, or we dynamically compute 52 * the value to use based on the dynamically-chosen calibration mode 53 */ 54 55 #define DLEVEL 0 56 #define STATIC_IN_RTL_SIM 0 57 #define STATIC_SKIP_DELAY_LOOPS 0 58 59 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \ 60 STATIC_SKIP_DELAY_LOOPS) 61 62 /* calibration steps requested by the rtl */ 63 uint16_t dyn_calib_steps; 64 65 /* 66 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option 67 * instead of static, we use boolean logic to select between 68 * non-skip and skip values 69 * 70 * The mask is set to include all bits when not-skipping, but is 71 * zero when skipping 72 */ 73 74 uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */ 75 76 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \ 77 ((non_skip_value) & skip_delay_mask) 78 79 struct gbl_type *gbl; 80 struct param_type *param; 81 uint32_t curr_shadow_reg; 82 83 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, 84 uint32_t write_group, uint32_t use_dm, 85 uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks); 86 87 static void set_failing_group_stage(uint32_t group, uint32_t stage, 88 uint32_t substage) 89 { 90 /* 91 * Only set the global stage if there was not been any other 92 * failing group 93 */ 94 if (gbl->error_stage == CAL_STAGE_NIL) { 95 gbl->error_substage = substage; 96 gbl->error_stage = stage; 97 gbl->error_group = group; 98 } 99 } 100 101 static void reg_file_set_group(u16 set_group) 102 { 103 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16); 104 } 105 106 static void reg_file_set_stage(u8 set_stage) 107 { 108 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff); 109 } 110 111 static void reg_file_set_sub_stage(u8 set_sub_stage) 112 { 113 set_sub_stage &= 0xff; 114 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8); 115 } 116 117 /** 118 * phy_mgr_initialize() - Initialize PHY Manager 119 * 120 * Initialize PHY Manager. 121 */ 122 static void phy_mgr_initialize(void) 123 { 124 u32 ratio; 125 126 debug("%s:%d\n", __func__, __LINE__); 127 /* Calibration has control over path to memory */ 128 /* 129 * In Hard PHY this is a 2-bit control: 130 * 0: AFI Mux Select 131 * 1: DDIO Mux Select 132 */ 133 writel(0x3, &phy_mgr_cfg->mux_sel); 134 135 /* USER memory clock is not stable we begin initialization */ 136 writel(0, &phy_mgr_cfg->reset_mem_stbl); 137 138 /* USER calibration status all set to zero */ 139 writel(0, &phy_mgr_cfg->cal_status); 140 141 writel(0, &phy_mgr_cfg->cal_debug_info); 142 143 /* Init params only if we do NOT skip calibration. */ 144 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) 145 return; 146 147 ratio = RW_MGR_MEM_DQ_PER_READ_DQS / 148 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS; 149 param->read_correct_mask_vg = (1 << ratio) - 1; 150 param->write_correct_mask_vg = (1 << ratio) - 1; 151 param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1; 152 param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1; 153 ratio = RW_MGR_MEM_DATA_WIDTH / 154 RW_MGR_MEM_DATA_MASK_WIDTH; 155 param->dm_correct_mask = (1 << ratio) - 1; 156 } 157 158 /** 159 * set_rank_and_odt_mask() - Set Rank and ODT mask 160 * @rank: Rank mask 161 * @odt_mode: ODT mode, OFF or READ_WRITE 162 * 163 * Set Rank and ODT mask (On-Die Termination). 164 */ 165 static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode) 166 { 167 u32 odt_mask_0 = 0; 168 u32 odt_mask_1 = 0; 169 u32 cs_and_odt_mask; 170 171 if (odt_mode == RW_MGR_ODT_MODE_OFF) { 172 odt_mask_0 = 0x0; 173 odt_mask_1 = 0x0; 174 } else { /* RW_MGR_ODT_MODE_READ_WRITE */ 175 switch (RW_MGR_MEM_NUMBER_OF_RANKS) { 176 case 1: /* 1 Rank */ 177 /* Read: ODT = 0 ; Write: ODT = 1 */ 178 odt_mask_0 = 0x0; 179 odt_mask_1 = 0x1; 180 break; 181 case 2: /* 2 Ranks */ 182 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) { 183 /* 184 * - Dual-Slot , Single-Rank (1 CS per DIMM) 185 * OR 186 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM) 187 * 188 * Since MEM_NUMBER_OF_RANKS is 2, they 189 * are both single rank with 2 CS each 190 * (special for RDIMM). 191 * 192 * Read: Turn on ODT on the opposite rank 193 * Write: Turn on ODT on all ranks 194 */ 195 odt_mask_0 = 0x3 & ~(1 << rank); 196 odt_mask_1 = 0x3; 197 } else { 198 /* 199 * - Single-Slot , Dual-Rank (2 CS per DIMM) 200 * 201 * Read: Turn on ODT off on all ranks 202 * Write: Turn on ODT on active rank 203 */ 204 odt_mask_0 = 0x0; 205 odt_mask_1 = 0x3 & (1 << rank); 206 } 207 break; 208 case 4: /* 4 Ranks */ 209 /* Read: 210 * ----------+-----------------------+ 211 * | ODT | 212 * Read From +-----------------------+ 213 * Rank | 3 | 2 | 1 | 0 | 214 * ----------+-----+-----+-----+-----+ 215 * 0 | 0 | 1 | 0 | 0 | 216 * 1 | 1 | 0 | 0 | 0 | 217 * 2 | 0 | 0 | 0 | 1 | 218 * 3 | 0 | 0 | 1 | 0 | 219 * ----------+-----+-----+-----+-----+ 220 * 221 * Write: 222 * ----------+-----------------------+ 223 * | ODT | 224 * Write To +-----------------------+ 225 * Rank | 3 | 2 | 1 | 0 | 226 * ----------+-----+-----+-----+-----+ 227 * 0 | 0 | 1 | 0 | 1 | 228 * 1 | 1 | 0 | 1 | 0 | 229 * 2 | 0 | 1 | 0 | 1 | 230 * 3 | 1 | 0 | 1 | 0 | 231 * ----------+-----+-----+-----+-----+ 232 */ 233 switch (rank) { 234 case 0: 235 odt_mask_0 = 0x4; 236 odt_mask_1 = 0x5; 237 break; 238 case 1: 239 odt_mask_0 = 0x8; 240 odt_mask_1 = 0xA; 241 break; 242 case 2: 243 odt_mask_0 = 0x1; 244 odt_mask_1 = 0x5; 245 break; 246 case 3: 247 odt_mask_0 = 0x2; 248 odt_mask_1 = 0xA; 249 break; 250 } 251 break; 252 } 253 } 254 255 cs_and_odt_mask = (0xFF & ~(1 << rank)) | 256 ((0xFF & odt_mask_0) << 8) | 257 ((0xFF & odt_mask_1) << 16); 258 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS | 259 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); 260 } 261 262 /** 263 * scc_mgr_set() - Set SCC Manager register 264 * @off: Base offset in SCC Manager space 265 * @grp: Read/Write group 266 * @val: Value to be set 267 * 268 * This function sets the SCC Manager (Scan Chain Control Manager) register. 269 */ 270 static void scc_mgr_set(u32 off, u32 grp, u32 val) 271 { 272 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2)); 273 } 274 275 /** 276 * scc_mgr_initialize() - Initialize SCC Manager registers 277 * 278 * Initialize SCC Manager registers. 279 */ 280 static void scc_mgr_initialize(void) 281 { 282 /* 283 * Clear register file for HPS. 16 (2^4) is the size of the 284 * full register file in the scc mgr: 285 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS + 286 * MEM_IF_READ_DQS_WIDTH - 1); 287 */ 288 int i; 289 290 for (i = 0; i < 16; i++) { 291 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n", 292 __func__, __LINE__, i); 293 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i); 294 } 295 } 296 297 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase) 298 { 299 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase); 300 } 301 302 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay) 303 { 304 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay); 305 } 306 307 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase) 308 { 309 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase); 310 } 311 312 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay) 313 { 314 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay); 315 } 316 317 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay) 318 { 319 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS, 320 delay); 321 } 322 323 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay) 324 { 325 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay); 326 } 327 328 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay) 329 { 330 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay); 331 } 332 333 static void scc_mgr_set_dqs_out1_delay(uint32_t delay) 334 { 335 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS, 336 delay); 337 } 338 339 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay) 340 { 341 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, 342 RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm, 343 delay); 344 } 345 346 /* load up dqs config settings */ 347 static void scc_mgr_load_dqs(uint32_t dqs) 348 { 349 writel(dqs, &sdr_scc_mgr->dqs_ena); 350 } 351 352 /* load up dqs io config settings */ 353 static void scc_mgr_load_dqs_io(void) 354 { 355 writel(0, &sdr_scc_mgr->dqs_io_ena); 356 } 357 358 /* load up dq config settings */ 359 static void scc_mgr_load_dq(uint32_t dq_in_group) 360 { 361 writel(dq_in_group, &sdr_scc_mgr->dq_ena); 362 } 363 364 /* load up dm config settings */ 365 static void scc_mgr_load_dm(uint32_t dm) 366 { 367 writel(dm, &sdr_scc_mgr->dm_ena); 368 } 369 370 /** 371 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks 372 * @off: Base offset in SCC Manager space 373 * @grp: Read/Write group 374 * @val: Value to be set 375 * @update: If non-zero, trigger SCC Manager update for all ranks 376 * 377 * This function sets the SCC Manager (Scan Chain Control Manager) register 378 * and optionally triggers the SCC update for all ranks. 379 */ 380 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val, 381 const int update) 382 { 383 u32 r; 384 385 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 386 r += NUM_RANKS_PER_SHADOW_REG) { 387 scc_mgr_set(off, grp, val); 388 389 if (update || (r == 0)) { 390 writel(grp, &sdr_scc_mgr->dqs_ena); 391 writel(0, &sdr_scc_mgr->update); 392 } 393 } 394 } 395 396 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase) 397 { 398 /* 399 * USER although the h/w doesn't support different phases per 400 * shadow register, for simplicity our scc manager modeling 401 * keeps different phase settings per shadow reg, and it's 402 * important for us to keep them in sync to match h/w. 403 * for efficiency, the scan chain update should occur only 404 * once to sr0. 405 */ 406 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET, 407 read_group, phase, 0); 408 } 409 410 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group, 411 uint32_t phase) 412 { 413 /* 414 * USER although the h/w doesn't support different phases per 415 * shadow register, for simplicity our scc manager modeling 416 * keeps different phase settings per shadow reg, and it's 417 * important for us to keep them in sync to match h/w. 418 * for efficiency, the scan chain update should occur only 419 * once to sr0. 420 */ 421 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, 422 write_group, phase, 0); 423 } 424 425 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group, 426 uint32_t delay) 427 { 428 /* 429 * In shadow register mode, the T11 settings are stored in 430 * registers in the core, which are updated by the DQS_ENA 431 * signals. Not issuing the SCC_MGR_UPD command allows us to 432 * save lots of rank switching overhead, by calling 433 * select_shadow_regs_for_update with update_scan_chains 434 * set to 0. 435 */ 436 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET, 437 read_group, delay, 1); 438 writel(0, &sdr_scc_mgr->update); 439 } 440 441 /** 442 * scc_mgr_set_oct_out1_delay() - Set OCT output delay 443 * @write_group: Write group 444 * @delay: Delay value 445 * 446 * This function sets the OCT output delay in SCC manager. 447 */ 448 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay) 449 { 450 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH / 451 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; 452 const int base = write_group * ratio; 453 int i; 454 /* 455 * Load the setting in the SCC manager 456 * Although OCT affects only write data, the OCT delay is controlled 457 * by the DQS logic block which is instantiated once per read group. 458 * For protocols where a write group consists of multiple read groups, 459 * the setting must be set multiple times. 460 */ 461 for (i = 0; i < ratio; i++) 462 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay); 463 } 464 465 /** 466 * scc_mgr_set_hhp_extras() - Set HHP extras. 467 * 468 * Load the fixed setting in the SCC manager HHP extras. 469 */ 470 static void scc_mgr_set_hhp_extras(void) 471 { 472 /* 473 * Load the fixed setting in the SCC manager 474 * bits: 0:0 = 1'b1 - DQS bypass 475 * bits: 1:1 = 1'b1 - DQ bypass 476 * bits: 4:2 = 3'b001 - rfifo_mode 477 * bits: 6:5 = 2'b01 - rfifo clock_select 478 * bits: 7:7 = 1'b0 - separate gating from ungating setting 479 * bits: 8:8 = 1'b0 - separate OE from Output delay setting 480 */ 481 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) | 482 (1 << 2) | (1 << 1) | (1 << 0); 483 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | 484 SCC_MGR_HHP_GLOBALS_OFFSET | 485 SCC_MGR_HHP_EXTRAS_OFFSET; 486 487 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n", 488 __func__, __LINE__); 489 writel(value, addr); 490 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n", 491 __func__, __LINE__); 492 } 493 494 /** 495 * scc_mgr_zero_all() - Zero all DQS config 496 * 497 * Zero all DQS config. 498 */ 499 static void scc_mgr_zero_all(void) 500 { 501 int i, r; 502 503 /* 504 * USER Zero all DQS config settings, across all groups and all 505 * shadow registers 506 */ 507 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 508 r += NUM_RANKS_PER_SHADOW_REG) { 509 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 510 /* 511 * The phases actually don't exist on a per-rank basis, 512 * but there's no harm updating them several times, so 513 * let's keep the code simple. 514 */ 515 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE); 516 scc_mgr_set_dqs_en_phase(i, 0); 517 scc_mgr_set_dqs_en_delay(i, 0); 518 } 519 520 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { 521 scc_mgr_set_dqdqs_output_phase(i, 0); 522 /* Arria V/Cyclone V don't have out2. */ 523 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE); 524 } 525 } 526 527 /* Multicast to all DQS group enables. */ 528 writel(0xff, &sdr_scc_mgr->dqs_ena); 529 writel(0, &sdr_scc_mgr->update); 530 } 531 532 /** 533 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update 534 * @write_group: Write group 535 * 536 * Set bypass mode and trigger SCC update. 537 */ 538 static void scc_set_bypass_mode(const u32 write_group) 539 { 540 /* Multicast to all DQ enables. */ 541 writel(0xff, &sdr_scc_mgr->dq_ena); 542 writel(0xff, &sdr_scc_mgr->dm_ena); 543 544 /* Update current DQS IO enable. */ 545 writel(0, &sdr_scc_mgr->dqs_io_ena); 546 547 /* Update the DQS logic. */ 548 writel(write_group, &sdr_scc_mgr->dqs_ena); 549 550 /* Hit update. */ 551 writel(0, &sdr_scc_mgr->update); 552 } 553 554 /** 555 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group 556 * @write_group: Write group 557 * 558 * Load DQS settings for Write Group, do not trigger SCC update. 559 */ 560 static void scc_mgr_load_dqs_for_write_group(const u32 write_group) 561 { 562 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH / 563 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; 564 const int base = write_group * ratio; 565 int i; 566 /* 567 * Load the setting in the SCC manager 568 * Although OCT affects only write data, the OCT delay is controlled 569 * by the DQS logic block which is instantiated once per read group. 570 * For protocols where a write group consists of multiple read groups, 571 * the setting must be set multiple times. 572 */ 573 for (i = 0; i < ratio; i++) 574 writel(base + i, &sdr_scc_mgr->dqs_ena); 575 } 576 577 /** 578 * scc_mgr_zero_group() - Zero all configs for a group 579 * 580 * Zero DQ, DM, DQS and OCT configs for a group. 581 */ 582 static void scc_mgr_zero_group(const u32 write_group, const int out_only) 583 { 584 int i, r; 585 586 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 587 r += NUM_RANKS_PER_SHADOW_REG) { 588 /* Zero all DQ config settings. */ 589 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 590 scc_mgr_set_dq_out1_delay(i, 0); 591 if (!out_only) 592 scc_mgr_set_dq_in_delay(i, 0); 593 } 594 595 /* Multicast to all DQ enables. */ 596 writel(0xff, &sdr_scc_mgr->dq_ena); 597 598 /* Zero all DM config settings. */ 599 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) 600 scc_mgr_set_dm_out1_delay(i, 0); 601 602 /* Multicast to all DM enables. */ 603 writel(0xff, &sdr_scc_mgr->dm_ena); 604 605 /* Zero all DQS IO settings. */ 606 if (!out_only) 607 scc_mgr_set_dqs_io_in_delay(0); 608 609 /* Arria V/Cyclone V don't have out2. */ 610 scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE); 611 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE); 612 scc_mgr_load_dqs_for_write_group(write_group); 613 614 /* Multicast to all DQS IO enables (only 1 in total). */ 615 writel(0, &sdr_scc_mgr->dqs_io_ena); 616 617 /* Hit update to zero everything. */ 618 writel(0, &sdr_scc_mgr->update); 619 } 620 } 621 622 /* 623 * apply and load a particular input delay for the DQ pins in a group 624 * group_bgn is the index of the first dq pin (in the write group) 625 */ 626 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay) 627 { 628 uint32_t i, p; 629 630 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { 631 scc_mgr_set_dq_in_delay(p, delay); 632 scc_mgr_load_dq(p); 633 } 634 } 635 636 /** 637 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group 638 * @delay: Delay value 639 * 640 * Apply and load a particular output delay for the DQ pins in a group. 641 */ 642 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay) 643 { 644 int i; 645 646 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 647 scc_mgr_set_dq_out1_delay(i, delay); 648 scc_mgr_load_dq(i); 649 } 650 } 651 652 /* apply and load a particular output delay for the DM pins in a group */ 653 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1) 654 { 655 uint32_t i; 656 657 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { 658 scc_mgr_set_dm_out1_delay(i, delay1); 659 scc_mgr_load_dm(i); 660 } 661 } 662 663 664 /* apply and load delay on both DQS and OCT out1 */ 665 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group, 666 uint32_t delay) 667 { 668 scc_mgr_set_dqs_out1_delay(delay); 669 scc_mgr_load_dqs_io(); 670 671 scc_mgr_set_oct_out1_delay(write_group, delay); 672 scc_mgr_load_dqs_for_write_group(write_group); 673 } 674 675 /** 676 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT 677 * @write_group: Write group 678 * @delay: Delay value 679 * 680 * Apply a delay to the entire output side: DQ, DM, DQS, OCT. 681 */ 682 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group, 683 const u32 delay) 684 { 685 u32 i, new_delay; 686 687 /* DQ shift */ 688 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) 689 scc_mgr_load_dq(i); 690 691 /* DM shift */ 692 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) 693 scc_mgr_load_dm(i); 694 695 /* DQS shift */ 696 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay; 697 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 698 debug_cond(DLEVEL == 1, 699 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n", 700 __func__, __LINE__, write_group, delay, new_delay, 701 IO_IO_OUT2_DELAY_MAX, 702 new_delay - IO_IO_OUT2_DELAY_MAX); 703 new_delay -= IO_IO_OUT2_DELAY_MAX; 704 scc_mgr_set_dqs_out1_delay(new_delay); 705 } 706 707 scc_mgr_load_dqs_io(); 708 709 /* OCT shift */ 710 new_delay = READ_SCC_OCT_OUT2_DELAY + delay; 711 if (new_delay > IO_IO_OUT2_DELAY_MAX) { 712 debug_cond(DLEVEL == 1, 713 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n", 714 __func__, __LINE__, write_group, delay, 715 new_delay, IO_IO_OUT2_DELAY_MAX, 716 new_delay - IO_IO_OUT2_DELAY_MAX); 717 new_delay -= IO_IO_OUT2_DELAY_MAX; 718 scc_mgr_set_oct_out1_delay(write_group, new_delay); 719 } 720 721 scc_mgr_load_dqs_for_write_group(write_group); 722 } 723 724 /** 725 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks 726 * @write_group: Write group 727 * @delay: Delay value 728 * 729 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks. 730 */ 731 static void 732 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group, 733 const u32 delay) 734 { 735 int r; 736 737 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 738 r += NUM_RANKS_PER_SHADOW_REG) { 739 scc_mgr_apply_group_all_out_delay_add(write_group, delay); 740 writel(0, &sdr_scc_mgr->update); 741 } 742 } 743 744 /** 745 * set_jump_as_return() - Return instruction optimization 746 * 747 * Optimization used to recover some slots in ddr3 inst_rom could be 748 * applied to other protocols if we wanted to 749 */ 750 static void set_jump_as_return(void) 751 { 752 /* 753 * To save space, we replace return with jump to special shared 754 * RETURN instruction so we set the counter to large value so that 755 * we always jump. 756 */ 757 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0); 758 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 759 } 760 761 /* 762 * should always use constants as argument to ensure all computations are 763 * performed at compile time 764 */ 765 static void delay_for_n_mem_clocks(const uint32_t clocks) 766 { 767 uint32_t afi_clocks; 768 uint8_t inner = 0; 769 uint8_t outer = 0; 770 uint16_t c_loop = 0; 771 772 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks); 773 774 775 afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO; 776 /* scale (rounding up) to get afi clocks */ 777 778 /* 779 * Note, we don't bother accounting for being off a little bit 780 * because of a few extra instructions in outer loops 781 * Note, the loops have a test at the end, and do the test before 782 * the decrement, and so always perform the loop 783 * 1 time more than the counter value 784 */ 785 if (afi_clocks == 0) { 786 ; 787 } else if (afi_clocks <= 0x100) { 788 inner = afi_clocks-1; 789 outer = 0; 790 c_loop = 0; 791 } else if (afi_clocks <= 0x10000) { 792 inner = 0xff; 793 outer = (afi_clocks-1) >> 8; 794 c_loop = 0; 795 } else { 796 inner = 0xff; 797 outer = 0xff; 798 c_loop = (afi_clocks-1) >> 16; 799 } 800 801 /* 802 * rom instructions are structured as follows: 803 * 804 * IDLE_LOOP2: jnz cntr0, TARGET_A 805 * IDLE_LOOP1: jnz cntr1, TARGET_B 806 * return 807 * 808 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and 809 * TARGET_B is set to IDLE_LOOP2 as well 810 * 811 * if we have no outer loop, though, then we can use IDLE_LOOP1 only, 812 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely 813 * 814 * a little confusing, but it helps save precious space in the inst_rom 815 * and sequencer rom and keeps the delays more accurate and reduces 816 * overhead 817 */ 818 if (afi_clocks <= 0x100) { 819 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), 820 &sdr_rw_load_mgr_regs->load_cntr1); 821 822 writel(RW_MGR_IDLE_LOOP1, 823 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 824 825 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS | 826 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 827 } else { 828 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), 829 &sdr_rw_load_mgr_regs->load_cntr0); 830 831 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer), 832 &sdr_rw_load_mgr_regs->load_cntr1); 833 834 writel(RW_MGR_IDLE_LOOP2, 835 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 836 837 writel(RW_MGR_IDLE_LOOP2, 838 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 839 840 /* hack to get around compiler not being smart enough */ 841 if (afi_clocks <= 0x10000) { 842 /* only need to run once */ 843 writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS | 844 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 845 } else { 846 do { 847 writel(RW_MGR_IDLE_LOOP2, 848 SDR_PHYGRP_RWMGRGRP_ADDRESS | 849 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 850 } while (c_loop-- != 0); 851 } 852 } 853 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks); 854 } 855 856 /** 857 * rw_mgr_mem_init_load_regs() - Load instruction registers 858 * @cntr0: Counter 0 value 859 * @cntr1: Counter 1 value 860 * @cntr2: Counter 2 value 861 * @jump: Jump instruction value 862 * 863 * Load instruction registers. 864 */ 865 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump) 866 { 867 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 868 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 869 870 /* Load counters */ 871 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0), 872 &sdr_rw_load_mgr_regs->load_cntr0); 873 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1), 874 &sdr_rw_load_mgr_regs->load_cntr1); 875 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2), 876 &sdr_rw_load_mgr_regs->load_cntr2); 877 878 /* Load jump address */ 879 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 880 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1); 881 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2); 882 883 /* Execute count instruction */ 884 writel(jump, grpaddr); 885 } 886 887 /** 888 * rw_mgr_mem_load_user() - Load user calibration values 889 * @fin1: Final instruction 1 890 * @fin2: Final instruction 2 891 * @precharge: If 1, precharge the banks at the end 892 * 893 * Load user calibration values and optionally precharge the banks. 894 */ 895 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2, 896 const int precharge) 897 { 898 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 899 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 900 u32 r; 901 902 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { 903 if (param->skip_ranks[r]) { 904 /* request to skip the rank */ 905 continue; 906 } 907 908 /* set rank */ 909 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 910 911 /* precharge all banks ... */ 912 if (precharge) 913 writel(RW_MGR_PRECHARGE_ALL, grpaddr); 914 915 /* 916 * USER Use Mirror-ed commands for odd ranks if address 917 * mirrorring is on 918 */ 919 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) { 920 set_jump_as_return(); 921 writel(RW_MGR_MRS2_MIRR, grpaddr); 922 delay_for_n_mem_clocks(4); 923 set_jump_as_return(); 924 writel(RW_MGR_MRS3_MIRR, grpaddr); 925 delay_for_n_mem_clocks(4); 926 set_jump_as_return(); 927 writel(RW_MGR_MRS1_MIRR, grpaddr); 928 delay_for_n_mem_clocks(4); 929 set_jump_as_return(); 930 writel(fin1, grpaddr); 931 } else { 932 set_jump_as_return(); 933 writel(RW_MGR_MRS2, grpaddr); 934 delay_for_n_mem_clocks(4); 935 set_jump_as_return(); 936 writel(RW_MGR_MRS3, grpaddr); 937 delay_for_n_mem_clocks(4); 938 set_jump_as_return(); 939 writel(RW_MGR_MRS1, grpaddr); 940 set_jump_as_return(); 941 writel(fin2, grpaddr); 942 } 943 944 if (precharge) 945 continue; 946 947 set_jump_as_return(); 948 writel(RW_MGR_ZQCL, grpaddr); 949 950 /* tZQinit = tDLLK = 512 ck cycles */ 951 delay_for_n_mem_clocks(512); 952 } 953 } 954 955 /** 956 * rw_mgr_mem_initialize() - Initialize RW Manager 957 * 958 * Initialize RW Manager. 959 */ 960 static void rw_mgr_mem_initialize(void) 961 { 962 debug("%s:%d\n", __func__, __LINE__); 963 964 /* The reset / cke part of initialization is broadcasted to all ranks */ 965 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS | 966 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); 967 968 /* 969 * Here's how you load register for a loop 970 * Counters are located @ 0x800 971 * Jump address are located @ 0xC00 972 * For both, registers 0 to 3 are selected using bits 3 and 2, like 973 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C 974 * I know this ain't pretty, but Avalon bus throws away the 2 least 975 * significant bits 976 */ 977 978 /* Start with memory RESET activated */ 979 980 /* tINIT = 200us */ 981 982 /* 983 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles 984 * If a and b are the number of iteration in 2 nested loops 985 * it takes the following number of cycles to complete the operation: 986 * number_of_cycles = ((2 + n) * a + 2) * b 987 * where n is the number of instruction in the inner loop 988 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF, 989 * b = 6A 990 */ 991 rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL, 992 SEQ_TINIT_CNTR2_VAL, 993 RW_MGR_INIT_RESET_0_CKE_0); 994 995 /* Indicate that memory is stable. */ 996 writel(1, &phy_mgr_cfg->reset_mem_stbl); 997 998 /* 999 * transition the RESET to high 1000 * Wait for 500us 1001 */ 1002 1003 /* 1004 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles 1005 * If a and b are the number of iteration in 2 nested loops 1006 * it takes the following number of cycles to complete the operation 1007 * number_of_cycles = ((2 + n) * a + 2) * b 1008 * where n is the number of instruction in the inner loop 1009 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83, 1010 * b = FF 1011 */ 1012 rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL, 1013 SEQ_TRESET_CNTR2_VAL, 1014 RW_MGR_INIT_RESET_1_CKE_0); 1015 1016 /* Bring up clock enable. */ 1017 1018 /* tXRP < 250 ck cycles */ 1019 delay_for_n_mem_clocks(250); 1020 1021 rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET, 1022 0); 1023 } 1024 1025 /* 1026 * At the end of calibration we have to program the user settings in, and 1027 * USER hand off the memory to the user. 1028 */ 1029 static void rw_mgr_mem_handoff(void) 1030 { 1031 rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1); 1032 /* 1033 * USER need to wait tMOD (12CK or 15ns) time before issuing 1034 * other commands, but we will have plenty of NIOS cycles before 1035 * actual handoff so its okay. 1036 */ 1037 } 1038 1039 /** 1040 * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns 1041 * @rank_bgn: Rank number 1042 * @group: Read/Write Group 1043 * @all_ranks: Test all ranks 1044 * 1045 * Performs a guaranteed read on the patterns we are going to use during a 1046 * read test to ensure memory works. 1047 */ 1048 static int 1049 rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group, 1050 const u32 all_ranks) 1051 { 1052 const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 1053 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1054 const u32 addr_offset = 1055 (group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2; 1056 const u32 rank_end = all_ranks ? 1057 RW_MGR_MEM_NUMBER_OF_RANKS : 1058 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1059 const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS / 1060 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS; 1061 const u32 correct_mask_vg = param->read_correct_mask_vg; 1062 1063 u32 tmp_bit_chk, base_rw_mgr, bit_chk; 1064 int vg, r; 1065 int ret = 0; 1066 1067 bit_chk = param->read_correct_mask; 1068 1069 for (r = rank_bgn; r < rank_end; r++) { 1070 /* Request to skip the rank */ 1071 if (param->skip_ranks[r]) 1072 continue; 1073 1074 /* Set rank */ 1075 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1076 1077 /* Load up a constant bursts of read commands */ 1078 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); 1079 writel(RW_MGR_GUARANTEED_READ, 1080 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1081 1082 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); 1083 writel(RW_MGR_GUARANTEED_READ_CONT, 1084 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1085 1086 tmp_bit_chk = 0; 1087 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1; 1088 vg >= 0; vg--) { 1089 /* Reset the FIFOs to get pointers to known state. */ 1090 writel(0, &phy_mgr_cmd->fifo_reset); 1091 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1092 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1093 writel(RW_MGR_GUARANTEED_READ, 1094 addr + addr_offset + (vg << 2)); 1095 1096 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1097 tmp_bit_chk <<= shift_ratio; 1098 tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr; 1099 } 1100 1101 bit_chk &= tmp_bit_chk; 1102 } 1103 1104 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2)); 1105 1106 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1107 1108 if (bit_chk != param->read_correct_mask) 1109 ret = -EIO; 1110 1111 debug_cond(DLEVEL == 1, 1112 "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n", 1113 __func__, __LINE__, group, bit_chk, 1114 param->read_correct_mask, ret); 1115 1116 return ret; 1117 } 1118 1119 /** 1120 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test 1121 * @rank_bgn: Rank number 1122 * @all_ranks: Test all ranks 1123 * 1124 * Load up the patterns we are going to use during a read test. 1125 */ 1126 static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn, 1127 const int all_ranks) 1128 { 1129 const u32 rank_end = all_ranks ? 1130 RW_MGR_MEM_NUMBER_OF_RANKS : 1131 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1132 u32 r; 1133 1134 debug("%s:%d\n", __func__, __LINE__); 1135 1136 for (r = rank_bgn; r < rank_end; r++) { 1137 if (param->skip_ranks[r]) 1138 /* request to skip the rank */ 1139 continue; 1140 1141 /* set rank */ 1142 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1143 1144 /* Load up a constant bursts */ 1145 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); 1146 1147 writel(RW_MGR_GUARANTEED_WRITE_WAIT0, 1148 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1149 1150 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); 1151 1152 writel(RW_MGR_GUARANTEED_WRITE_WAIT1, 1153 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1154 1155 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2); 1156 1157 writel(RW_MGR_GUARANTEED_WRITE_WAIT2, 1158 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1159 1160 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3); 1161 1162 writel(RW_MGR_GUARANTEED_WRITE_WAIT3, 1163 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1164 1165 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1166 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 1167 } 1168 1169 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1170 } 1171 1172 /** 1173 * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank 1174 * @rank_bgn: Rank number 1175 * @group: Read/Write group 1176 * @num_tries: Number of retries of the test 1177 * @all_correct: All bits must be correct in the mask 1178 * @bit_chk: Resulting bit mask after the test 1179 * @all_groups: Test all R/W groups 1180 * @all_ranks: Test all ranks 1181 * 1182 * Try a read and see if it returns correct data back. Test has dummy reads 1183 * inserted into the mix used to align DQS enable. Test has more thorough 1184 * checks than the regular read test. 1185 */ 1186 static int 1187 rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group, 1188 const u32 num_tries, const u32 all_correct, 1189 u32 *bit_chk, 1190 const u32 all_groups, const u32 all_ranks) 1191 { 1192 const u32 rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 1193 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 1194 const u32 quick_read_mode = 1195 ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) && 1196 ENABLE_SUPER_QUICK_CALIBRATION); 1197 u32 correct_mask_vg = param->read_correct_mask_vg; 1198 u32 tmp_bit_chk; 1199 u32 base_rw_mgr; 1200 u32 addr; 1201 1202 int r, vg, ret; 1203 1204 *bit_chk = param->read_correct_mask; 1205 1206 for (r = rank_bgn; r < rank_end; r++) { 1207 if (param->skip_ranks[r]) 1208 /* request to skip the rank */ 1209 continue; 1210 1211 /* set rank */ 1212 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 1213 1214 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1); 1215 1216 writel(RW_MGR_READ_B2B_WAIT1, 1217 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 1218 1219 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2); 1220 writel(RW_MGR_READ_B2B_WAIT2, 1221 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 1222 1223 if (quick_read_mode) 1224 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0); 1225 /* need at least two (1+1) reads to capture failures */ 1226 else if (all_groups) 1227 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0); 1228 else 1229 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0); 1230 1231 writel(RW_MGR_READ_B2B, 1232 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 1233 if (all_groups) 1234 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH * 1235 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1, 1236 &sdr_rw_load_mgr_regs->load_cntr3); 1237 else 1238 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3); 1239 1240 writel(RW_MGR_READ_B2B, 1241 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 1242 1243 tmp_bit_chk = 0; 1244 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1; vg >= 0; 1245 vg--) { 1246 /* Reset the FIFOs to get pointers to known state. */ 1247 writel(0, &phy_mgr_cmd->fifo_reset); 1248 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 1249 RW_MGR_RESET_READ_DATAPATH_OFFSET); 1250 1251 if (all_groups) { 1252 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 1253 RW_MGR_RUN_ALL_GROUPS_OFFSET; 1254 } else { 1255 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | 1256 RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1257 } 1258 1259 writel(RW_MGR_READ_B2B, addr + 1260 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + 1261 vg) << 2)); 1262 1263 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); 1264 tmp_bit_chk <<= RW_MGR_MEM_DQ_PER_READ_DQS / 1265 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS; 1266 tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr); 1267 } 1268 1269 *bit_chk &= tmp_bit_chk; 1270 } 1271 1272 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 1273 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2)); 1274 1275 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 1276 1277 if (all_correct) { 1278 ret = (*bit_chk == param->read_correct_mask); 1279 debug_cond(DLEVEL == 2, 1280 "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n", 1281 __func__, __LINE__, group, all_groups, *bit_chk, 1282 param->read_correct_mask, ret); 1283 } else { 1284 ret = (*bit_chk != 0x00); 1285 debug_cond(DLEVEL == 2, 1286 "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n", 1287 __func__, __LINE__, group, all_groups, *bit_chk, 1288 0, ret); 1289 } 1290 1291 return ret; 1292 } 1293 1294 /** 1295 * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks 1296 * @grp: Read/Write group 1297 * @num_tries: Number of retries of the test 1298 * @all_correct: All bits must be correct in the mask 1299 * @all_groups: Test all R/W groups 1300 * 1301 * Perform a READ test across all memory ranks. 1302 */ 1303 static int 1304 rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries, 1305 const u32 all_correct, 1306 const u32 all_groups) 1307 { 1308 u32 bit_chk; 1309 return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct, 1310 &bit_chk, all_groups, 1); 1311 } 1312 1313 /** 1314 * rw_mgr_incr_vfifo() - Increase VFIFO value 1315 * @grp: Read/Write group 1316 * 1317 * Increase VFIFO value. 1318 */ 1319 static void rw_mgr_incr_vfifo(const u32 grp) 1320 { 1321 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy); 1322 } 1323 1324 /** 1325 * rw_mgr_decr_vfifo() - Decrease VFIFO value 1326 * @grp: Read/Write group 1327 * 1328 * Decrease VFIFO value. 1329 */ 1330 static void rw_mgr_decr_vfifo(const u32 grp) 1331 { 1332 u32 i; 1333 1334 for (i = 0; i < VFIFO_SIZE - 1; i++) 1335 rw_mgr_incr_vfifo(grp); 1336 } 1337 1338 /** 1339 * find_vfifo_failing_read() - Push VFIFO to get a failing read 1340 * @grp: Read/Write group 1341 * 1342 * Push VFIFO until a failing read happens. 1343 */ 1344 static int find_vfifo_failing_read(const u32 grp) 1345 { 1346 u32 v, ret, fail_cnt = 0; 1347 1348 for (v = 0; v < VFIFO_SIZE; v++) { 1349 debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n", 1350 __func__, __LINE__, v); 1351 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1352 PASS_ONE_BIT, 0); 1353 if (!ret) { 1354 fail_cnt++; 1355 1356 if (fail_cnt == 2) 1357 return v; 1358 } 1359 1360 /* Fiddle with FIFO. */ 1361 rw_mgr_incr_vfifo(grp); 1362 } 1363 1364 /* No failing read found! Something must have gone wrong. */ 1365 debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__); 1366 return 0; 1367 } 1368 1369 /** 1370 * sdr_find_phase_delay() - Find DQS enable phase or delay 1371 * @working: If 1, look for working phase/delay, if 0, look for non-working 1372 * @delay: If 1, look for delay, if 0, look for phase 1373 * @grp: Read/Write group 1374 * @work: Working window position 1375 * @work_inc: Working window increment 1376 * @pd: DQS Phase/Delay Iterator 1377 * 1378 * Find working or non-working DQS enable phase setting. 1379 */ 1380 static int sdr_find_phase_delay(int working, int delay, const u32 grp, 1381 u32 *work, const u32 work_inc, u32 *pd) 1382 { 1383 const u32 max = delay ? IO_DQS_EN_DELAY_MAX : IO_DQS_EN_PHASE_MAX; 1384 u32 ret; 1385 1386 for (; *pd <= max; (*pd)++) { 1387 if (delay) 1388 scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd); 1389 else 1390 scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd); 1391 1392 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1393 PASS_ONE_BIT, 0); 1394 if (!working) 1395 ret = !ret; 1396 1397 if (ret) 1398 return 0; 1399 1400 if (work) 1401 *work += work_inc; 1402 } 1403 1404 return -EINVAL; 1405 } 1406 /** 1407 * sdr_find_phase() - Find DQS enable phase 1408 * @working: If 1, look for working phase, if 0, look for non-working phase 1409 * @grp: Read/Write group 1410 * @work: Working window position 1411 * @i: Iterator 1412 * @p: DQS Phase Iterator 1413 * 1414 * Find working or non-working DQS enable phase setting. 1415 */ 1416 static int sdr_find_phase(int working, const u32 grp, u32 *work, 1417 u32 *i, u32 *p) 1418 { 1419 const u32 end = VFIFO_SIZE + (working ? 0 : 1); 1420 int ret; 1421 1422 for (; *i < end; (*i)++) { 1423 if (working) 1424 *p = 0; 1425 1426 ret = sdr_find_phase_delay(working, 0, grp, work, 1427 IO_DELAY_PER_OPA_TAP, p); 1428 if (!ret) 1429 return 0; 1430 1431 if (*p > IO_DQS_EN_PHASE_MAX) { 1432 /* Fiddle with FIFO. */ 1433 rw_mgr_incr_vfifo(grp); 1434 if (!working) 1435 *p = 0; 1436 } 1437 } 1438 1439 return -EINVAL; 1440 } 1441 1442 /** 1443 * sdr_working_phase() - Find working DQS enable phase 1444 * @grp: Read/Write group 1445 * @work_bgn: Working window start position 1446 * @d: dtaps output value 1447 * @p: DQS Phase Iterator 1448 * @i: Iterator 1449 * 1450 * Find working DQS enable phase setting. 1451 */ 1452 static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d, 1453 u32 *p, u32 *i) 1454 { 1455 const u32 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP / 1456 IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1457 int ret; 1458 1459 *work_bgn = 0; 1460 1461 for (*d = 0; *d <= dtaps_per_ptap; (*d)++) { 1462 *i = 0; 1463 scc_mgr_set_dqs_en_delay_all_ranks(grp, *d); 1464 ret = sdr_find_phase(1, grp, work_bgn, i, p); 1465 if (!ret) 1466 return 0; 1467 *work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1468 } 1469 1470 /* Cannot find working solution */ 1471 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n", 1472 __func__, __LINE__); 1473 return -EINVAL; 1474 } 1475 1476 /** 1477 * sdr_backup_phase() - Find DQS enable backup phase 1478 * @grp: Read/Write group 1479 * @work_bgn: Working window start position 1480 * @p: DQS Phase Iterator 1481 * 1482 * Find DQS enable backup phase setting. 1483 */ 1484 static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p) 1485 { 1486 u32 tmp_delay, d; 1487 int ret; 1488 1489 /* Special case code for backing up a phase */ 1490 if (*p == 0) { 1491 *p = IO_DQS_EN_PHASE_MAX; 1492 rw_mgr_decr_vfifo(grp); 1493 } else { 1494 (*p)--; 1495 } 1496 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP; 1497 scc_mgr_set_dqs_en_phase_all_ranks(grp, *p); 1498 1499 for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; d++) { 1500 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1501 1502 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1503 PASS_ONE_BIT, 0); 1504 if (ret) { 1505 *work_bgn = tmp_delay; 1506 break; 1507 } 1508 1509 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1510 } 1511 1512 /* Restore VFIFO to old state before we decremented it (if needed). */ 1513 (*p)++; 1514 if (*p > IO_DQS_EN_PHASE_MAX) { 1515 *p = 0; 1516 rw_mgr_incr_vfifo(grp); 1517 } 1518 1519 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); 1520 } 1521 1522 /** 1523 * sdr_nonworking_phase() - Find non-working DQS enable phase 1524 * @grp: Read/Write group 1525 * @work_end: Working window end position 1526 * @p: DQS Phase Iterator 1527 * @i: Iterator 1528 * 1529 * Find non-working DQS enable phase setting. 1530 */ 1531 static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i) 1532 { 1533 int ret; 1534 1535 (*p)++; 1536 *work_end += IO_DELAY_PER_OPA_TAP; 1537 if (*p > IO_DQS_EN_PHASE_MAX) { 1538 /* Fiddle with FIFO. */ 1539 *p = 0; 1540 rw_mgr_incr_vfifo(grp); 1541 } 1542 1543 ret = sdr_find_phase(0, grp, work_end, i, p); 1544 if (ret) { 1545 /* Cannot see edge of failing read. */ 1546 debug_cond(DLEVEL == 2, "%s:%d: end: failed\n", 1547 __func__, __LINE__); 1548 } 1549 1550 return ret; 1551 } 1552 1553 /** 1554 * sdr_find_window_center() - Find center of the working DQS window. 1555 * @grp: Read/Write group 1556 * @work_bgn: First working settings 1557 * @work_end: Last working settings 1558 * 1559 * Find center of the working DQS enable window. 1560 */ 1561 static int sdr_find_window_center(const u32 grp, const u32 work_bgn, 1562 const u32 work_end) 1563 { 1564 u32 work_mid; 1565 int tmp_delay = 0; 1566 int i, p, d; 1567 1568 work_mid = (work_bgn + work_end) / 2; 1569 1570 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n", 1571 work_bgn, work_end, work_mid); 1572 /* Get the middle delay to be less than a VFIFO delay */ 1573 tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP; 1574 1575 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay); 1576 work_mid %= tmp_delay; 1577 debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid); 1578 1579 tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP); 1580 if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP) 1581 tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP; 1582 p = tmp_delay / IO_DELAY_PER_OPA_TAP; 1583 1584 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay); 1585 1586 d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP); 1587 if (d > IO_DQS_EN_DELAY_MAX) 1588 d = IO_DQS_EN_DELAY_MAX; 1589 tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1590 1591 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay); 1592 1593 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1594 scc_mgr_set_dqs_en_delay_all_ranks(grp, d); 1595 1596 /* 1597 * push vfifo until we can successfully calibrate. We can do this 1598 * because the largest possible margin in 1 VFIFO cycle. 1599 */ 1600 for (i = 0; i < VFIFO_SIZE; i++) { 1601 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n"); 1602 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, 1603 PASS_ONE_BIT, 1604 0)) { 1605 debug_cond(DLEVEL == 2, 1606 "%s:%d center: found: ptap=%u dtap=%u\n", 1607 __func__, __LINE__, p, d); 1608 return 0; 1609 } 1610 1611 /* Fiddle with FIFO. */ 1612 rw_mgr_incr_vfifo(grp); 1613 } 1614 1615 debug_cond(DLEVEL == 2, "%s:%d center: failed.\n", 1616 __func__, __LINE__); 1617 return -EINVAL; 1618 } 1619 1620 /** 1621 * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use 1622 * @grp: Read/Write Group 1623 * 1624 * Find a good DQS enable to use. 1625 */ 1626 static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) 1627 { 1628 u32 d, p, i; 1629 u32 dtaps_per_ptap; 1630 u32 work_bgn, work_end; 1631 u32 found_passing_read, found_failing_read, initial_failing_dtap; 1632 int ret; 1633 1634 debug("%s:%d %u\n", __func__, __LINE__, grp); 1635 1636 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); 1637 1638 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); 1639 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); 1640 1641 /* Step 0: Determine number of delay taps for each phase tap. */ 1642 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP / IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1643 1644 /* Step 1: First push vfifo until we get a failing read. */ 1645 find_vfifo_failing_read(grp); 1646 1647 /* Step 2: Find first working phase, increment in ptaps. */ 1648 work_bgn = 0; 1649 ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i); 1650 if (ret) 1651 return ret; 1652 1653 work_end = work_bgn; 1654 1655 /* 1656 * If d is 0 then the working window covers a phase tap and we can 1657 * follow the old procedure. Otherwise, we've found the beginning 1658 * and we need to increment the dtaps until we find the end. 1659 */ 1660 if (d == 0) { 1661 /* 1662 * Step 3a: If we have room, back off by one and 1663 * increment in dtaps. 1664 */ 1665 sdr_backup_phase(grp, &work_bgn, &p); 1666 1667 /* 1668 * Step 4a: go forward from working phase to non working 1669 * phase, increment in ptaps. 1670 */ 1671 ret = sdr_nonworking_phase(grp, &work_end, &p, &i); 1672 if (ret) 1673 return ret; 1674 1675 /* Step 5a: Back off one from last, increment in dtaps. */ 1676 1677 /* Special case code for backing up a phase */ 1678 if (p == 0) { 1679 p = IO_DQS_EN_PHASE_MAX; 1680 rw_mgr_decr_vfifo(grp); 1681 } else { 1682 p = p - 1; 1683 } 1684 1685 work_end -= IO_DELAY_PER_OPA_TAP; 1686 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1687 1688 d = 0; 1689 1690 debug_cond(DLEVEL == 2, "%s:%d p: ptap=%u\n", 1691 __func__, __LINE__, p); 1692 } 1693 1694 /* The dtap increment to find the failing edge is done here. */ 1695 sdr_find_phase_delay(0, 1, grp, &work_end, 1696 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, &d); 1697 1698 /* Go back to working dtap */ 1699 if (d != 0) 1700 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP; 1701 1702 debug_cond(DLEVEL == 2, 1703 "%s:%d p/d: ptap=%u dtap=%u end=%u\n", 1704 __func__, __LINE__, p, d - 1, work_end); 1705 1706 if (work_end < work_bgn) { 1707 /* nil range */ 1708 debug_cond(DLEVEL == 2, "%s:%d end-2: failed\n", 1709 __func__, __LINE__); 1710 return -EINVAL; 1711 } 1712 1713 debug_cond(DLEVEL == 2, "%s:%d found range [%u,%u]\n", 1714 __func__, __LINE__, work_bgn, work_end); 1715 1716 /* 1717 * We need to calculate the number of dtaps that equal a ptap. 1718 * To do that we'll back up a ptap and re-find the edge of the 1719 * window using dtaps 1720 */ 1721 debug_cond(DLEVEL == 2, "%s:%d calculate dtaps_per_ptap for tracking\n", 1722 __func__, __LINE__); 1723 1724 /* Special case code for backing up a phase */ 1725 if (p == 0) { 1726 p = IO_DQS_EN_PHASE_MAX; 1727 rw_mgr_decr_vfifo(grp); 1728 debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n", 1729 __func__, __LINE__, p); 1730 } else { 1731 p = p - 1; 1732 debug_cond(DLEVEL == 2, "%s:%d backedup phase only: p=%u", 1733 __func__, __LINE__, p); 1734 } 1735 1736 scc_mgr_set_dqs_en_phase_all_ranks(grp, p); 1737 1738 /* 1739 * Increase dtap until we first see a passing read (in case the 1740 * window is smaller than a ptap), and then a failing read to 1741 * mark the edge of the window again. 1742 */ 1743 1744 /* Find a passing read. */ 1745 debug_cond(DLEVEL == 2, "%s:%d find passing read\n", 1746 __func__, __LINE__); 1747 1748 initial_failing_dtap = d; 1749 1750 found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d); 1751 if (found_passing_read) { 1752 /* Find a failing read. */ 1753 debug_cond(DLEVEL == 2, "%s:%d find failing read\n", 1754 __func__, __LINE__); 1755 d++; 1756 found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0, 1757 &d); 1758 } else { 1759 debug_cond(DLEVEL == 1, 1760 "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n", 1761 __func__, __LINE__); 1762 } 1763 1764 /* 1765 * The dynamically calculated dtaps_per_ptap is only valid if we 1766 * found a passing/failing read. If we didn't, it means d hit the max 1767 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its 1768 * statically calculated value. 1769 */ 1770 if (found_passing_read && found_failing_read) 1771 dtaps_per_ptap = d - initial_failing_dtap; 1772 1773 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap); 1774 debug_cond(DLEVEL == 2, "%s:%d dtaps_per_ptap=%u - %u = %u", 1775 __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap); 1776 1777 /* Step 6: Find the centre of the window. */ 1778 ret = sdr_find_window_center(grp, work_bgn, work_end); 1779 1780 return ret; 1781 } 1782 1783 /** 1784 * search_stop_check() - Check if the detected edge is valid 1785 * @write: Perform read (Stage 2) or write (Stage 3) calibration 1786 * @d: DQS delay 1787 * @rank_bgn: Rank number 1788 * @write_group: Write Group 1789 * @read_group: Read Group 1790 * @bit_chk: Resulting bit mask after the test 1791 * @sticky_bit_chk: Resulting sticky bit mask after the test 1792 * @use_read_test: Perform read test 1793 * 1794 * Test if the found edge is valid. 1795 */ 1796 static u32 search_stop_check(const int write, const int d, const int rank_bgn, 1797 const u32 write_group, const u32 read_group, 1798 u32 *bit_chk, u32 *sticky_bit_chk, 1799 const u32 use_read_test) 1800 { 1801 const u32 ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH / 1802 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; 1803 const u32 correct_mask = write ? param->write_correct_mask : 1804 param->read_correct_mask; 1805 const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS : 1806 RW_MGR_MEM_DQ_PER_READ_DQS; 1807 u32 ret; 1808 /* 1809 * Stop searching when the read test doesn't pass AND when 1810 * we've seen a passing read on every bit. 1811 */ 1812 if (write) { /* WRITE-ONLY */ 1813 ret = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1814 0, PASS_ONE_BIT, 1815 bit_chk, 0); 1816 } else if (use_read_test) { /* READ-ONLY */ 1817 ret = !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group, 1818 NUM_READ_PB_TESTS, 1819 PASS_ONE_BIT, bit_chk, 1820 0, 0); 1821 } else { /* READ-ONLY */ 1822 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, 1823 PASS_ONE_BIT, bit_chk, 0); 1824 *bit_chk = *bit_chk >> (per_dqs * 1825 (read_group - (write_group * ratio))); 1826 ret = (*bit_chk == 0); 1827 } 1828 *sticky_bit_chk = *sticky_bit_chk | *bit_chk; 1829 ret = ret && (*sticky_bit_chk == correct_mask); 1830 debug_cond(DLEVEL == 2, 1831 "%s:%d center(left): dtap=%u => %u == %u && %u", 1832 __func__, __LINE__, d, 1833 *sticky_bit_chk, correct_mask, ret); 1834 return ret; 1835 } 1836 1837 /** 1838 * search_left_edge() - Find left edge of DQ/DQS working phase 1839 * @write: Perform read (Stage 2) or write (Stage 3) calibration 1840 * @rank_bgn: Rank number 1841 * @write_group: Write Group 1842 * @read_group: Read Group 1843 * @test_bgn: Rank number to begin the test 1844 * @sticky_bit_chk: Resulting sticky bit mask after the test 1845 * @left_edge: Left edge of the DQ/DQS phase 1846 * @right_edge: Right edge of the DQ/DQS phase 1847 * @use_read_test: Perform read test 1848 * 1849 * Find left edge of DQ/DQS working phase. 1850 */ 1851 static void search_left_edge(const int write, const int rank_bgn, 1852 const u32 write_group, const u32 read_group, const u32 test_bgn, 1853 u32 *sticky_bit_chk, 1854 int *left_edge, int *right_edge, const u32 use_read_test) 1855 { 1856 const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX; 1857 const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX; 1858 const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS : 1859 RW_MGR_MEM_DQ_PER_READ_DQS; 1860 u32 stop, bit_chk; 1861 int i, d; 1862 1863 for (d = 0; d <= dqs_max; d++) { 1864 if (write) 1865 scc_mgr_apply_group_dq_out1_delay(d); 1866 else 1867 scc_mgr_apply_group_dq_in_delay(test_bgn, d); 1868 1869 writel(0, &sdr_scc_mgr->update); 1870 1871 stop = search_stop_check(write, d, rank_bgn, write_group, 1872 read_group, &bit_chk, sticky_bit_chk, 1873 use_read_test); 1874 if (stop == 1) 1875 break; 1876 1877 /* stop != 1 */ 1878 for (i = 0; i < per_dqs; i++) { 1879 if (bit_chk & 1) { 1880 /* 1881 * Remember a passing test as 1882 * the left_edge. 1883 */ 1884 left_edge[i] = d; 1885 } else { 1886 /* 1887 * If a left edge has not been seen 1888 * yet, then a future passing test 1889 * will mark this edge as the right 1890 * edge. 1891 */ 1892 if (left_edge[i] == delay_max + 1) 1893 right_edge[i] = -(d + 1); 1894 } 1895 bit_chk >>= 1; 1896 } 1897 } 1898 1899 /* Reset DQ delay chains to 0 */ 1900 if (write) 1901 scc_mgr_apply_group_dq_out1_delay(0); 1902 else 1903 scc_mgr_apply_group_dq_in_delay(test_bgn, 0); 1904 1905 *sticky_bit_chk = 0; 1906 for (i = per_dqs - 1; i >= 0; i--) { 1907 debug_cond(DLEVEL == 2, 1908 "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n", 1909 __func__, __LINE__, i, left_edge[i], 1910 i, right_edge[i]); 1911 1912 /* 1913 * Check for cases where we haven't found the left edge, 1914 * which makes our assignment of the the right edge invalid. 1915 * Reset it to the illegal value. 1916 */ 1917 if ((left_edge[i] == delay_max + 1) && 1918 (right_edge[i] != delay_max + 1)) { 1919 right_edge[i] = delay_max + 1; 1920 debug_cond(DLEVEL == 2, 1921 "%s:%d vfifo_center: reset right_edge[%u]: %d\n", 1922 __func__, __LINE__, i, right_edge[i]); 1923 } 1924 1925 /* 1926 * Reset sticky bit 1927 * READ: except for bits where we have seen both 1928 * the left and right edge. 1929 * WRITE: except for bits where we have seen the 1930 * left edge. 1931 */ 1932 *sticky_bit_chk <<= 1; 1933 if (write) { 1934 if (left_edge[i] != delay_max + 1) 1935 *sticky_bit_chk |= 1; 1936 } else { 1937 if ((left_edge[i] != delay_max + 1) && 1938 (right_edge[i] != delay_max + 1)) 1939 *sticky_bit_chk |= 1; 1940 } 1941 } 1942 1943 1944 } 1945 1946 /** 1947 * search_right_edge() - Find right edge of DQ/DQS working phase 1948 * @write: Perform read (Stage 2) or write (Stage 3) calibration 1949 * @rank_bgn: Rank number 1950 * @write_group: Write Group 1951 * @read_group: Read Group 1952 * @start_dqs: DQS start phase 1953 * @start_dqs_en: DQS enable start phase 1954 * @sticky_bit_chk: Resulting sticky bit mask after the test 1955 * @left_edge: Left edge of the DQ/DQS phase 1956 * @right_edge: Right edge of the DQ/DQS phase 1957 * @use_read_test: Perform read test 1958 * 1959 * Find right edge of DQ/DQS working phase. 1960 */ 1961 static int search_right_edge(const int write, const int rank_bgn, 1962 const u32 write_group, const u32 read_group, 1963 const int start_dqs, const int start_dqs_en, 1964 u32 *sticky_bit_chk, 1965 int *left_edge, int *right_edge, const u32 use_read_test) 1966 { 1967 const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX; 1968 const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX; 1969 const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS : 1970 RW_MGR_MEM_DQ_PER_READ_DQS; 1971 u32 stop, bit_chk; 1972 int i, d; 1973 1974 for (d = 0; d <= dqs_max - start_dqs; d++) { 1975 if (write) { /* WRITE-ONLY */ 1976 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, 1977 d + start_dqs); 1978 } else { /* READ-ONLY */ 1979 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs); 1980 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 1981 uint32_t delay = d + start_dqs_en; 1982 if (delay > IO_DQS_EN_DELAY_MAX) 1983 delay = IO_DQS_EN_DELAY_MAX; 1984 scc_mgr_set_dqs_en_delay(read_group, delay); 1985 } 1986 scc_mgr_load_dqs(read_group); 1987 } 1988 1989 writel(0, &sdr_scc_mgr->update); 1990 1991 stop = search_stop_check(write, d, rank_bgn, write_group, 1992 read_group, &bit_chk, sticky_bit_chk, 1993 use_read_test); 1994 if (stop == 1) { 1995 if (write && (d == 0)) { /* WRITE-ONLY */ 1996 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 1997 /* 1998 * d = 0 failed, but it passed when 1999 * testing the left edge, so it must be 2000 * marginal, set it to -1 2001 */ 2002 if (right_edge[i] == delay_max + 1 && 2003 left_edge[i] != delay_max + 1) 2004 right_edge[i] = -1; 2005 } 2006 } 2007 break; 2008 } 2009 2010 /* stop != 1 */ 2011 for (i = 0; i < per_dqs; i++) { 2012 if (bit_chk & 1) { 2013 /* 2014 * Remember a passing test as 2015 * the right_edge. 2016 */ 2017 right_edge[i] = d; 2018 } else { 2019 if (d != 0) { 2020 /* 2021 * If a right edge has not 2022 * been seen yet, then a future 2023 * passing test will mark this 2024 * edge as the left edge. 2025 */ 2026 if (right_edge[i] == delay_max + 1) 2027 left_edge[i] = -(d + 1); 2028 } else { 2029 /* 2030 * d = 0 failed, but it passed 2031 * when testing the left edge, 2032 * so it must be marginal, set 2033 * it to -1 2034 */ 2035 if (right_edge[i] == delay_max + 1 && 2036 left_edge[i] != delay_max + 1) 2037 right_edge[i] = -1; 2038 /* 2039 * If a right edge has not been 2040 * seen yet, then a future 2041 * passing test will mark this 2042 * edge as the left edge. 2043 */ 2044 else if (right_edge[i] == delay_max + 1) 2045 left_edge[i] = -(d + 1); 2046 } 2047 } 2048 2049 debug_cond(DLEVEL == 2, "%s:%d center[r,d=%u]: ", 2050 __func__, __LINE__, d); 2051 debug_cond(DLEVEL == 2, 2052 "bit_chk_test=%i left_edge[%u]: %d ", 2053 bit_chk & 1, i, left_edge[i]); 2054 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i, 2055 right_edge[i]); 2056 bit_chk >>= 1; 2057 } 2058 } 2059 2060 /* Check that all bits have a window */ 2061 for (i = 0; i < per_dqs; i++) { 2062 debug_cond(DLEVEL == 2, 2063 "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d", 2064 __func__, __LINE__, i, left_edge[i], 2065 i, right_edge[i]); 2066 if ((left_edge[i] == dqs_max + 1) || 2067 (right_edge[i] == dqs_max + 1)) 2068 return i + 1; /* FIXME: If we fail, retval > 0 */ 2069 } 2070 2071 return 0; 2072 } 2073 2074 /** 2075 * get_window_mid_index() - Find the best middle setting of DQ/DQS phase 2076 * @write: Perform read (Stage 2) or write (Stage 3) calibration 2077 * @left_edge: Left edge of the DQ/DQS phase 2078 * @right_edge: Right edge of the DQ/DQS phase 2079 * @mid_min: Best DQ/DQS phase middle setting 2080 * 2081 * Find index and value of the middle of the DQ/DQS working phase. 2082 */ 2083 static int get_window_mid_index(const int write, int *left_edge, 2084 int *right_edge, int *mid_min) 2085 { 2086 const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS : 2087 RW_MGR_MEM_DQ_PER_READ_DQS; 2088 int i, mid, min_index; 2089 2090 /* Find middle of window for each DQ bit */ 2091 *mid_min = left_edge[0] - right_edge[0]; 2092 min_index = 0; 2093 for (i = 1; i < per_dqs; i++) { 2094 mid = left_edge[i] - right_edge[i]; 2095 if (mid < *mid_min) { 2096 *mid_min = mid; 2097 min_index = i; 2098 } 2099 } 2100 2101 /* 2102 * -mid_min/2 represents the amount that we need to move DQS. 2103 * If mid_min is odd and positive we'll need to add one to make 2104 * sure the rounding in further calculations is correct (always 2105 * bias to the right), so just add 1 for all positive values. 2106 */ 2107 if (*mid_min > 0) 2108 (*mid_min)++; 2109 *mid_min = *mid_min / 2; 2110 2111 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n", 2112 __func__, __LINE__, *mid_min, min_index); 2113 return min_index; 2114 } 2115 2116 /** 2117 * center_dq_windows() - Center the DQ/DQS windows 2118 * @write: Perform read (Stage 2) or write (Stage 3) calibration 2119 * @left_edge: Left edge of the DQ/DQS phase 2120 * @right_edge: Right edge of the DQ/DQS phase 2121 * @mid_min: Adjusted DQ/DQS phase middle setting 2122 * @orig_mid_min: Original DQ/DQS phase middle setting 2123 * @min_index: DQ/DQS phase middle setting index 2124 * @test_bgn: Rank number to begin the test 2125 * @dq_margin: Amount of shift for the DQ 2126 * @dqs_margin: Amount of shift for the DQS 2127 * 2128 * Align the DQ/DQS windows in each group. 2129 */ 2130 static void center_dq_windows(const int write, int *left_edge, int *right_edge, 2131 const int mid_min, const int orig_mid_min, 2132 const int min_index, const int test_bgn, 2133 int *dq_margin, int *dqs_margin) 2134 { 2135 const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX; 2136 const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS : 2137 RW_MGR_MEM_DQ_PER_READ_DQS; 2138 const u32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET : 2139 SCC_MGR_IO_IN_DELAY_OFFSET; 2140 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off; 2141 2142 u32 temp_dq_io_delay1, temp_dq_io_delay2; 2143 int shift_dq, i, p; 2144 2145 /* Initialize data for export structures */ 2146 *dqs_margin = delay_max + 1; 2147 *dq_margin = delay_max + 1; 2148 2149 /* add delay to bring centre of all DQ windows to the same "level" */ 2150 for (i = 0, p = test_bgn; i < per_dqs; i++, p++) { 2151 /* Use values before divide by 2 to reduce round off error */ 2152 shift_dq = (left_edge[i] - right_edge[i] - 2153 (left_edge[min_index] - right_edge[min_index]))/2 + 2154 (orig_mid_min - mid_min); 2155 2156 debug_cond(DLEVEL == 2, 2157 "vfifo_center: before: shift_dq[%u]=%d\n", 2158 i, shift_dq); 2159 2160 temp_dq_io_delay1 = readl(addr + (p << 2)); 2161 temp_dq_io_delay2 = readl(addr + (i << 2)); 2162 2163 if (shift_dq + temp_dq_io_delay1 > delay_max) 2164 shift_dq = delay_max - temp_dq_io_delay2; 2165 else if (shift_dq + temp_dq_io_delay1 < 0) 2166 shift_dq = -temp_dq_io_delay1; 2167 2168 debug_cond(DLEVEL == 2, 2169 "vfifo_center: after: shift_dq[%u]=%d\n", 2170 i, shift_dq); 2171 2172 if (write) 2173 scc_mgr_set_dq_out1_delay(i, temp_dq_io_delay1 + shift_dq); 2174 else 2175 scc_mgr_set_dq_in_delay(p, temp_dq_io_delay1 + shift_dq); 2176 2177 scc_mgr_load_dq(p); 2178 2179 debug_cond(DLEVEL == 2, 2180 "vfifo_center: margin[%u]=[%d,%d]\n", i, 2181 left_edge[i] - shift_dq + (-mid_min), 2182 right_edge[i] + shift_dq - (-mid_min)); 2183 2184 /* To determine values for export structures */ 2185 if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin) 2186 *dq_margin = left_edge[i] - shift_dq + (-mid_min); 2187 2188 if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin) 2189 *dqs_margin = right_edge[i] + shift_dq - (-mid_min); 2190 } 2191 2192 } 2193 2194 /* per-bit deskew DQ and center */ 2195 static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn, 2196 const u32 rw_group, const u32 test_bgn, 2197 const int use_read_test, const int update_fom) 2198 { 2199 const u32 addr = 2200 SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET + 2201 (rw_group << 2); 2202 /* 2203 * Store these as signed since there are comparisons with 2204 * signed numbers. 2205 */ 2206 uint32_t sticky_bit_chk; 2207 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; 2208 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; 2209 int32_t orig_mid_min, mid_min; 2210 int32_t new_dqs, start_dqs, start_dqs_en, final_dqs_en; 2211 int32_t dq_margin, dqs_margin; 2212 int i, min_index; 2213 int ret; 2214 2215 debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn); 2216 2217 start_dqs = readl(addr); 2218 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) 2219 start_dqs_en = readl(addr - IO_DQS_EN_DELAY_OFFSET); 2220 2221 /* set the left and right edge of each bit to an illegal value */ 2222 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */ 2223 sticky_bit_chk = 0; 2224 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { 2225 left_edge[i] = IO_IO_IN_DELAY_MAX + 1; 2226 right_edge[i] = IO_IO_IN_DELAY_MAX + 1; 2227 } 2228 2229 /* Search for the left edge of the window for each bit */ 2230 search_left_edge(0, rank_bgn, rw_group, rw_group, test_bgn, 2231 &sticky_bit_chk, 2232 left_edge, right_edge, use_read_test); 2233 2234 2235 /* Search for the right edge of the window for each bit */ 2236 ret = search_right_edge(0, rank_bgn, rw_group, rw_group, 2237 start_dqs, start_dqs_en, 2238 &sticky_bit_chk, 2239 left_edge, right_edge, use_read_test); 2240 if (ret) { 2241 /* 2242 * Restore delay chain settings before letting the loop 2243 * in rw_mgr_mem_calibrate_vfifo to retry different 2244 * dqs/ck relationships. 2245 */ 2246 scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs); 2247 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) 2248 scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en); 2249 2250 scc_mgr_load_dqs(rw_group); 2251 writel(0, &sdr_scc_mgr->update); 2252 2253 debug_cond(DLEVEL == 1, 2254 "%s:%d vfifo_center: failed to find edge [%u]: %d %d", 2255 __func__, __LINE__, i, left_edge[i], right_edge[i]); 2256 if (use_read_test) { 2257 set_failing_group_stage(rw_group * 2258 RW_MGR_MEM_DQ_PER_READ_DQS + i, 2259 CAL_STAGE_VFIFO, 2260 CAL_SUBSTAGE_VFIFO_CENTER); 2261 } else { 2262 set_failing_group_stage(rw_group * 2263 RW_MGR_MEM_DQ_PER_READ_DQS + i, 2264 CAL_STAGE_VFIFO_AFTER_WRITES, 2265 CAL_SUBSTAGE_VFIFO_CENTER); 2266 } 2267 return 0; 2268 } 2269 2270 min_index = get_window_mid_index(0, left_edge, right_edge, &mid_min); 2271 2272 /* Determine the amount we can change DQS (which is -mid_min) */ 2273 orig_mid_min = mid_min; 2274 new_dqs = start_dqs - mid_min; 2275 if (new_dqs > IO_DQS_IN_DELAY_MAX) 2276 new_dqs = IO_DQS_IN_DELAY_MAX; 2277 else if (new_dqs < 0) 2278 new_dqs = 0; 2279 2280 mid_min = start_dqs - new_dqs; 2281 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n", 2282 mid_min, new_dqs); 2283 2284 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2285 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX) 2286 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX; 2287 else if (start_dqs_en - mid_min < 0) 2288 mid_min += start_dqs_en - mid_min; 2289 } 2290 new_dqs = start_dqs - mid_min; 2291 2292 debug_cond(DLEVEL == 1, 2293 "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n", 2294 start_dqs, 2295 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1, 2296 new_dqs, mid_min); 2297 2298 /* Add delay to bring centre of all DQ windows to the same "level". */ 2299 center_dq_windows(0, left_edge, right_edge, mid_min, orig_mid_min, 2300 min_index, test_bgn, &dq_margin, &dqs_margin); 2301 2302 /* Move DQS-en */ 2303 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { 2304 final_dqs_en = start_dqs_en - mid_min; 2305 scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en); 2306 scc_mgr_load_dqs(rw_group); 2307 } 2308 2309 /* Move DQS */ 2310 scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs); 2311 scc_mgr_load_dqs(rw_group); 2312 debug_cond(DLEVEL == 2, 2313 "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d", 2314 __func__, __LINE__, dq_margin, dqs_margin); 2315 2316 /* 2317 * Do not remove this line as it makes sure all of our decisions 2318 * have been applied. Apply the update bit. 2319 */ 2320 writel(0, &sdr_scc_mgr->update); 2321 2322 return (dq_margin >= 0) && (dqs_margin >= 0); 2323 } 2324 2325 /** 2326 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device 2327 * @rw_group: Read/Write Group 2328 * @phase: DQ/DQS phase 2329 * 2330 * Because initially no communication ca be reliably performed with the memory 2331 * device, the sequencer uses a guaranteed write mechanism to write data into 2332 * the memory device. 2333 */ 2334 static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group, 2335 const u32 phase) 2336 { 2337 int ret; 2338 2339 /* Set a particular DQ/DQS phase. */ 2340 scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase); 2341 2342 debug_cond(DLEVEL == 1, "%s:%d guaranteed write: g=%u p=%u\n", 2343 __func__, __LINE__, rw_group, phase); 2344 2345 /* 2346 * Altera EMI_RM 2015.05.04 :: Figure 1-25 2347 * Load up the patterns used by read calibration using the 2348 * current DQDQS phase. 2349 */ 2350 rw_mgr_mem_calibrate_read_load_patterns(0, 1); 2351 2352 if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ) 2353 return 0; 2354 2355 /* 2356 * Altera EMI_RM 2015.05.04 :: Figure 1-26 2357 * Back-to-Back reads of the patterns used for calibration. 2358 */ 2359 ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1); 2360 if (ret) 2361 debug_cond(DLEVEL == 1, 2362 "%s:%d Guaranteed read test failed: g=%u p=%u\n", 2363 __func__, __LINE__, rw_group, phase); 2364 return ret; 2365 } 2366 2367 /** 2368 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration 2369 * @rw_group: Read/Write Group 2370 * @test_bgn: Rank at which the test begins 2371 * 2372 * DQS enable calibration ensures reliable capture of the DQ signal without 2373 * glitches on the DQS line. 2374 */ 2375 static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group, 2376 const u32 test_bgn) 2377 { 2378 /* 2379 * Altera EMI_RM 2015.05.04 :: Figure 1-27 2380 * DQS and DQS Eanble Signal Relationships. 2381 */ 2382 2383 /* We start at zero, so have one less dq to devide among */ 2384 const u32 delay_step = IO_IO_IN_DELAY_MAX / 2385 (RW_MGR_MEM_DQ_PER_READ_DQS - 1); 2386 int ret; 2387 u32 i, p, d, r; 2388 2389 debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn); 2390 2391 /* Try different dq_in_delays since the DQ path is shorter than DQS. */ 2392 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 2393 r += NUM_RANKS_PER_SHADOW_REG) { 2394 for (i = 0, p = test_bgn, d = 0; 2395 i < RW_MGR_MEM_DQ_PER_READ_DQS; 2396 i++, p++, d += delay_step) { 2397 debug_cond(DLEVEL == 1, 2398 "%s:%d: g=%u r=%u i=%u p=%u d=%u\n", 2399 __func__, __LINE__, rw_group, r, i, p, d); 2400 2401 scc_mgr_set_dq_in_delay(p, d); 2402 scc_mgr_load_dq(p); 2403 } 2404 2405 writel(0, &sdr_scc_mgr->update); 2406 } 2407 2408 /* 2409 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different 2410 * dq_in_delay values 2411 */ 2412 ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group); 2413 2414 debug_cond(DLEVEL == 1, 2415 "%s:%d: g=%u found=%u; Reseting delay chain to zero\n", 2416 __func__, __LINE__, rw_group, !ret); 2417 2418 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 2419 r += NUM_RANKS_PER_SHADOW_REG) { 2420 scc_mgr_apply_group_dq_in_delay(test_bgn, 0); 2421 writel(0, &sdr_scc_mgr->update); 2422 } 2423 2424 return ret; 2425 } 2426 2427 /** 2428 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS 2429 * @rw_group: Read/Write Group 2430 * @test_bgn: Rank at which the test begins 2431 * @use_read_test: Perform a read test 2432 * @update_fom: Update FOM 2433 * 2434 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads 2435 * within a group. 2436 */ 2437 static int 2438 rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn, 2439 const int use_read_test, 2440 const int update_fom) 2441 2442 { 2443 int ret, grp_calibrated; 2444 u32 rank_bgn, sr; 2445 2446 /* 2447 * Altera EMI_RM 2015.05.04 :: Figure 1-28 2448 * Read per-bit deskew can be done on a per shadow register basis. 2449 */ 2450 grp_calibrated = 1; 2451 for (rank_bgn = 0, sr = 0; 2452 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; 2453 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) { 2454 /* Check if this set of ranks should be skipped entirely. */ 2455 if (param->skip_shadow_regs[sr]) 2456 continue; 2457 2458 ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group, 2459 test_bgn, 2460 use_read_test, 2461 update_fom); 2462 if (ret) 2463 continue; 2464 2465 grp_calibrated = 0; 2466 } 2467 2468 if (!grp_calibrated) 2469 return -EIO; 2470 2471 return 0; 2472 } 2473 2474 /** 2475 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO 2476 * @rw_group: Read/Write Group 2477 * @test_bgn: Rank at which the test begins 2478 * 2479 * Stage 1: Calibrate the read valid prediction FIFO. 2480 * 2481 * This function implements UniPHY calibration Stage 1, as explained in 2482 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". 2483 * 2484 * - read valid prediction will consist of finding: 2485 * - DQS enable phase and DQS enable delay (DQS Enable Calibration) 2486 * - DQS input phase and DQS input delay (DQ/DQS Centering) 2487 * - we also do a per-bit deskew on the DQ lines. 2488 */ 2489 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn) 2490 { 2491 uint32_t p, d; 2492 uint32_t dtaps_per_ptap; 2493 uint32_t failed_substage; 2494 2495 int ret; 2496 2497 debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn); 2498 2499 /* Update info for sims */ 2500 reg_file_set_group(rw_group); 2501 reg_file_set_stage(CAL_STAGE_VFIFO); 2502 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ); 2503 2504 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; 2505 2506 /* USER Determine number of delay taps for each phase tap. */ 2507 dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, 2508 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1; 2509 2510 for (d = 0; d <= dtaps_per_ptap; d += 2) { 2511 /* 2512 * In RLDRAMX we may be messing the delay of pins in 2513 * the same write rw_group but outside of the current read 2514 * the rw_group, but that's ok because we haven't calibrated 2515 * output side yet. 2516 */ 2517 if (d > 0) { 2518 scc_mgr_apply_group_all_out_delay_add_all_ranks( 2519 rw_group, d); 2520 } 2521 2522 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++) { 2523 /* 1) Guaranteed Write */ 2524 ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p); 2525 if (ret) 2526 break; 2527 2528 /* 2) DQS Enable Calibration */ 2529 ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group, 2530 test_bgn); 2531 if (ret) { 2532 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; 2533 continue; 2534 } 2535 2536 /* 3) Centering DQ/DQS */ 2537 /* 2538 * If doing read after write calibration, do not update 2539 * FOM now. Do it then. 2540 */ 2541 ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, 2542 test_bgn, 1, 0); 2543 if (ret) { 2544 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER; 2545 continue; 2546 } 2547 2548 /* All done. */ 2549 goto cal_done_ok; 2550 } 2551 } 2552 2553 /* Calibration Stage 1 failed. */ 2554 set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage); 2555 return 0; 2556 2557 /* Calibration Stage 1 completed OK. */ 2558 cal_done_ok: 2559 /* 2560 * Reset the delay chains back to zero if they have moved > 1 2561 * (check for > 1 because loop will increase d even when pass in 2562 * first case). 2563 */ 2564 if (d > 2) 2565 scc_mgr_zero_group(rw_group, 1); 2566 2567 return 1; 2568 } 2569 2570 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */ 2571 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group, 2572 uint32_t test_bgn) 2573 { 2574 uint32_t rank_bgn, sr; 2575 uint32_t grp_calibrated; 2576 uint32_t write_group; 2577 2578 debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn); 2579 2580 /* update info for sims */ 2581 2582 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES); 2583 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); 2584 2585 write_group = read_group; 2586 2587 /* update info for sims */ 2588 reg_file_set_group(read_group); 2589 2590 grp_calibrated = 1; 2591 /* Read per-bit deskew can be done on a per shadow register basis */ 2592 for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; 2593 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { 2594 /* Determine if this set of ranks should be skipped entirely */ 2595 if (!param->skip_shadow_regs[sr]) { 2596 /* This is the last calibration round, update FOM here */ 2597 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn, 2598 read_group, 2599 test_bgn, 0, 2600 1)) { 2601 grp_calibrated = 0; 2602 } 2603 } 2604 } 2605 2606 2607 if (grp_calibrated == 0) { 2608 set_failing_group_stage(write_group, 2609 CAL_STAGE_VFIFO_AFTER_WRITES, 2610 CAL_SUBSTAGE_VFIFO_CENTER); 2611 return 0; 2612 } 2613 2614 return 1; 2615 } 2616 2617 /* Calibrate LFIFO to find smallest read latency */ 2618 static uint32_t rw_mgr_mem_calibrate_lfifo(void) 2619 { 2620 uint32_t found_one; 2621 2622 debug("%s:%d\n", __func__, __LINE__); 2623 2624 /* update info for sims */ 2625 reg_file_set_stage(CAL_STAGE_LFIFO); 2626 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY); 2627 2628 /* Load up the patterns used by read calibration for all ranks */ 2629 rw_mgr_mem_calibrate_read_load_patterns(0, 1); 2630 found_one = 0; 2631 2632 do { 2633 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 2634 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u", 2635 __func__, __LINE__, gbl->curr_read_lat); 2636 2637 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, 2638 NUM_READ_TESTS, 2639 PASS_ALL_BITS, 2640 1)) { 2641 break; 2642 } 2643 2644 found_one = 1; 2645 /* reduce read latency and see if things are working */ 2646 /* correctly */ 2647 gbl->curr_read_lat--; 2648 } while (gbl->curr_read_lat > 0); 2649 2650 /* reset the fifos to get pointers to known state */ 2651 2652 writel(0, &phy_mgr_cmd->fifo_reset); 2653 2654 if (found_one) { 2655 /* add a fudge factor to the read latency that was determined */ 2656 gbl->curr_read_lat += 2; 2657 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 2658 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \ 2659 read_lat=%u\n", __func__, __LINE__, 2660 gbl->curr_read_lat); 2661 return 1; 2662 } else { 2663 set_failing_group_stage(0xff, CAL_STAGE_LFIFO, 2664 CAL_SUBSTAGE_READ_LATENCY); 2665 2666 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \ 2667 read_lat=%u\n", __func__, __LINE__, 2668 gbl->curr_read_lat); 2669 return 0; 2670 } 2671 } 2672 2673 /* 2674 * issue write test command. 2675 * two variants are provided. one that just tests a write pattern and 2676 * another that tests datamask functionality. 2677 */ 2678 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group, 2679 uint32_t test_dm) 2680 { 2681 uint32_t mcc_instruction; 2682 uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) && 2683 ENABLE_SUPER_QUICK_CALIBRATION); 2684 uint32_t rw_wl_nop_cycles; 2685 uint32_t addr; 2686 2687 /* 2688 * Set counter and jump addresses for the right 2689 * number of NOP cycles. 2690 * The number of supported NOP cycles can range from -1 to infinity 2691 * Three different cases are handled: 2692 * 2693 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping 2694 * mechanism will be used to insert the right number of NOPs 2695 * 2696 * 2. For a number of NOP cycles equals to 0, the micro-instruction 2697 * issuing the write command will jump straight to the 2698 * micro-instruction that turns on DQS (for DDRx), or outputs write 2699 * data (for RLD), skipping 2700 * the NOP micro-instruction all together 2701 * 2702 * 3. A number of NOP cycles equal to -1 indicates that DQS must be 2703 * turned on in the same micro-instruction that issues the write 2704 * command. Then we need 2705 * to directly jump to the micro-instruction that sends out the data 2706 * 2707 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters 2708 * (2 and 3). One jump-counter (0) is used to perform multiple 2709 * write-read operations. 2710 * one counter left to issue this command in "multiple-group" mode 2711 */ 2712 2713 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles; 2714 2715 if (rw_wl_nop_cycles == -1) { 2716 /* 2717 * CNTR 2 - We want to execute the special write operation that 2718 * turns on DQS right away and then skip directly to the 2719 * instruction that sends out the data. We set the counter to a 2720 * large number so that the jump is always taken. 2721 */ 2722 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); 2723 2724 /* CNTR 3 - Not used */ 2725 if (test_dm) { 2726 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1; 2727 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA, 2728 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2729 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, 2730 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2731 } else { 2732 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1; 2733 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA, 2734 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2735 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP, 2736 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2737 } 2738 } else if (rw_wl_nop_cycles == 0) { 2739 /* 2740 * CNTR 2 - We want to skip the NOP operation and go straight 2741 * to the DQS enable instruction. We set the counter to a large 2742 * number so that the jump is always taken. 2743 */ 2744 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); 2745 2746 /* CNTR 3 - Not used */ 2747 if (test_dm) { 2748 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0; 2749 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS, 2750 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2751 } else { 2752 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0; 2753 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS, 2754 &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2755 } 2756 } else { 2757 /* 2758 * CNTR 2 - In this case we want to execute the next instruction 2759 * and NOT take the jump. So we set the counter to 0. The jump 2760 * address doesn't count. 2761 */ 2762 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2); 2763 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2); 2764 2765 /* 2766 * CNTR 3 - Set the nop counter to the number of cycles we 2767 * need to loop for, minus 1. 2768 */ 2769 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3); 2770 if (test_dm) { 2771 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0; 2772 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, 2773 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2774 } else { 2775 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0; 2776 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP, 2777 &sdr_rw_load_jump_mgr_regs->load_jump_add3); 2778 } 2779 } 2780 2781 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | 2782 RW_MGR_RESET_READ_DATAPATH_OFFSET); 2783 2784 if (quick_write_mode) 2785 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0); 2786 else 2787 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0); 2788 2789 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0); 2790 2791 /* 2792 * CNTR 1 - This is used to ensure enough time elapses 2793 * for read data to come back. 2794 */ 2795 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1); 2796 2797 if (test_dm) { 2798 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT, 2799 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 2800 } else { 2801 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT, 2802 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 2803 } 2804 2805 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; 2806 writel(mcc_instruction, addr + (group << 2)); 2807 } 2808 2809 /* Test writes, can check for a single bit pass or multiple bit pass */ 2810 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, 2811 uint32_t write_group, uint32_t use_dm, uint32_t all_correct, 2812 uint32_t *bit_chk, uint32_t all_ranks) 2813 { 2814 uint32_t r; 2815 uint32_t correct_mask_vg; 2816 uint32_t tmp_bit_chk; 2817 uint32_t vg; 2818 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : 2819 (rank_bgn + NUM_RANKS_PER_SHADOW_REG); 2820 uint32_t addr_rw_mgr; 2821 uint32_t base_rw_mgr; 2822 2823 *bit_chk = param->write_correct_mask; 2824 correct_mask_vg = param->write_correct_mask_vg; 2825 2826 for (r = rank_bgn; r < rank_end; r++) { 2827 if (param->skip_ranks[r]) { 2828 /* request to skip the rank */ 2829 continue; 2830 } 2831 2832 /* set rank */ 2833 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); 2834 2835 tmp_bit_chk = 0; 2836 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS; 2837 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) { 2838 /* reset the fifos to get pointers to known state */ 2839 writel(0, &phy_mgr_cmd->fifo_reset); 2840 2841 tmp_bit_chk = tmp_bit_chk << 2842 (RW_MGR_MEM_DQ_PER_WRITE_DQS / 2843 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS); 2844 rw_mgr_mem_calibrate_write_test_issue(write_group * 2845 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg, 2846 use_dm); 2847 2848 base_rw_mgr = readl(addr_rw_mgr); 2849 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr)); 2850 if (vg == 0) 2851 break; 2852 } 2853 *bit_chk &= tmp_bit_chk; 2854 } 2855 2856 if (all_correct) { 2857 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 2858 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \ 2859 %u => %lu", write_group, use_dm, 2860 *bit_chk, param->write_correct_mask, 2861 (long unsigned int)(*bit_chk == 2862 param->write_correct_mask)); 2863 return *bit_chk == param->write_correct_mask; 2864 } else { 2865 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); 2866 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ", 2867 write_group, use_dm, *bit_chk); 2868 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0, 2869 (long unsigned int)(*bit_chk != 0)); 2870 return *bit_chk != 0x00; 2871 } 2872 } 2873 2874 /* 2875 * center all windows. do per-bit-deskew to possibly increase size of 2876 * certain windows. 2877 */ 2878 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn, 2879 uint32_t write_group, uint32_t test_bgn) 2880 { 2881 uint32_t i, min_index; 2882 int32_t d; 2883 /* 2884 * Store these as signed since there are comparisons with 2885 * signed numbers. 2886 */ 2887 uint32_t bit_chk; 2888 uint32_t sticky_bit_chk; 2889 int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; 2890 int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; 2891 int32_t mid; 2892 int32_t mid_min, orig_mid_min; 2893 int32_t new_dqs, start_dqs; 2894 int32_t dq_margin, dqs_margin, dm_margin; 2895 uint32_t addr; 2896 2897 int ret; 2898 2899 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn); 2900 2901 dm_margin = 0; 2902 2903 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET; 2904 start_dqs = readl(addr + 2905 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2)); 2906 2907 /* per-bit deskew */ 2908 2909 /* 2910 * set the left and right edge of each bit to an illegal value 2911 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value. 2912 */ 2913 sticky_bit_chk = 0; 2914 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { 2915 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2916 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; 2917 } 2918 2919 /* Search for the left edge of the window for each bit */ 2920 search_left_edge(1, rank_bgn, write_group, 0, test_bgn, 2921 &sticky_bit_chk, 2922 left_edge, right_edge, 0); 2923 2924 /* Search for the right edge of the window for each bit */ 2925 ret = search_right_edge(1, rank_bgn, write_group, 0, 2926 start_dqs, 0, 2927 &sticky_bit_chk, 2928 left_edge, right_edge, 0); 2929 if (ret) { 2930 set_failing_group_stage(test_bgn + ret - 1, CAL_STAGE_WRITES, 2931 CAL_SUBSTAGE_WRITES_CENTER); 2932 return 0; 2933 } 2934 2935 min_index = get_window_mid_index(1, left_edge, right_edge, &mid_min); 2936 2937 /* Determine the amount we can change DQS (which is -mid_min) */ 2938 orig_mid_min = mid_min; 2939 new_dqs = start_dqs; 2940 mid_min = 0; 2941 debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \ 2942 mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min); 2943 2944 /* Add delay to bring centre of all DQ windows to the same "level". */ 2945 center_dq_windows(1, left_edge, right_edge, mid_min, orig_mid_min, 2946 min_index, 0, &dq_margin, &dqs_margin); 2947 2948 /* Move DQS */ 2949 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); 2950 writel(0, &sdr_scc_mgr->update); 2951 2952 /* Centre DM */ 2953 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__); 2954 2955 /* 2956 * set the left and right edge of each bit to an illegal value, 2957 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value, 2958 */ 2959 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; 2960 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; 2961 int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 2962 int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1; 2963 int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1; 2964 int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1; 2965 int32_t win_best = 0; 2966 2967 /* Search for the/part of the window with DM shift */ 2968 for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) { 2969 scc_mgr_apply_group_dm_out1_delay(d); 2970 writel(0, &sdr_scc_mgr->update); 2971 2972 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, 2973 PASS_ALL_BITS, &bit_chk, 2974 0)) { 2975 /* USE Set current end of the window */ 2976 end_curr = -d; 2977 /* 2978 * If a starting edge of our window has not been seen 2979 * this is our current start of the DM window. 2980 */ 2981 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) 2982 bgn_curr = -d; 2983 2984 /* 2985 * If current window is bigger than best seen. 2986 * Set best seen to be current window. 2987 */ 2988 if ((end_curr-bgn_curr+1) > win_best) { 2989 win_best = end_curr-bgn_curr+1; 2990 bgn_best = bgn_curr; 2991 end_best = end_curr; 2992 } 2993 } else { 2994 /* We just saw a failing test. Reset temp edge */ 2995 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 2996 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 2997 } 2998 } 2999 3000 3001 /* Reset DM delay chains to 0 */ 3002 scc_mgr_apply_group_dm_out1_delay(0); 3003 3004 /* 3005 * Check to see if the current window nudges up aganist 0 delay. 3006 * If so we need to continue the search by shifting DQS otherwise DQS 3007 * search begins as a new search. */ 3008 if (end_curr != 0) { 3009 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 3010 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 3011 } 3012 3013 /* Search for the/part of the window with DQS shifts */ 3014 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) { 3015 /* 3016 * Note: This only shifts DQS, so are we limiting ourselve to 3017 * width of DQ unnecessarily. 3018 */ 3019 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, 3020 d + new_dqs); 3021 3022 writel(0, &sdr_scc_mgr->update); 3023 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, 3024 PASS_ALL_BITS, &bit_chk, 3025 0)) { 3026 /* USE Set current end of the window */ 3027 end_curr = d; 3028 /* 3029 * If a beginning edge of our window has not been seen 3030 * this is our current begin of the DM window. 3031 */ 3032 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) 3033 bgn_curr = d; 3034 3035 /* 3036 * If current window is bigger than best seen. Set best 3037 * seen to be current window. 3038 */ 3039 if ((end_curr-bgn_curr+1) > win_best) { 3040 win_best = end_curr-bgn_curr+1; 3041 bgn_best = bgn_curr; 3042 end_best = end_curr; 3043 } 3044 } else { 3045 /* We just saw a failing test. Reset temp edge */ 3046 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; 3047 end_curr = IO_IO_OUT1_DELAY_MAX + 1; 3048 3049 /* Early exit optimization: if ther remaining delay 3050 chain space is less than already seen largest window 3051 we can exit */ 3052 if ((win_best-1) > 3053 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) { 3054 break; 3055 } 3056 } 3057 } 3058 3059 /* assign left and right edge for cal and reporting; */ 3060 left_edge[0] = -1*bgn_best; 3061 right_edge[0] = end_best; 3062 3063 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__, 3064 __LINE__, left_edge[0], right_edge[0]); 3065 3066 /* Move DQS (back to orig) */ 3067 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); 3068 3069 /* Move DM */ 3070 3071 /* Find middle of window for the DM bit */ 3072 mid = (left_edge[0] - right_edge[0]) / 2; 3073 3074 /* only move right, since we are not moving DQS/DQ */ 3075 if (mid < 0) 3076 mid = 0; 3077 3078 /* dm_marign should fail if we never find a window */ 3079 if (win_best == 0) 3080 dm_margin = -1; 3081 else 3082 dm_margin = left_edge[0] - mid; 3083 3084 scc_mgr_apply_group_dm_out1_delay(mid); 3085 writel(0, &sdr_scc_mgr->update); 3086 3087 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \ 3088 dm_margin=%d\n", __func__, __LINE__, left_edge[0], 3089 right_edge[0], mid, dm_margin); 3090 /* Export values */ 3091 gbl->fom_out += dq_margin + dqs_margin; 3092 3093 debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \ 3094 dqs_margin=%d dm_margin=%d\n", __func__, __LINE__, 3095 dq_margin, dqs_margin, dm_margin); 3096 3097 /* 3098 * Do not remove this line as it makes sure all of our 3099 * decisions have been applied. 3100 */ 3101 writel(0, &sdr_scc_mgr->update); 3102 return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0); 3103 } 3104 3105 /** 3106 * rw_mgr_mem_calibrate_writes() - Write Calibration Part One 3107 * @rank_bgn: Rank number 3108 * @group: Read/Write Group 3109 * @test_bgn: Rank at which the test begins 3110 * 3111 * Stage 2: Write Calibration Part One. 3112 * 3113 * This function implements UniPHY calibration Stage 2, as explained in 3114 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". 3115 */ 3116 static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group, 3117 const u32 test_bgn) 3118 { 3119 int ret; 3120 3121 /* Update info for sims */ 3122 debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn); 3123 3124 reg_file_set_group(group); 3125 reg_file_set_stage(CAL_STAGE_WRITES); 3126 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER); 3127 3128 ret = rw_mgr_mem_calibrate_writes_center(rank_bgn, group, test_bgn); 3129 if (!ret) { 3130 set_failing_group_stage(group, CAL_STAGE_WRITES, 3131 CAL_SUBSTAGE_WRITES_CENTER); 3132 return -EIO; 3133 } 3134 3135 return 0; 3136 } 3137 3138 /** 3139 * mem_precharge_and_activate() - Precharge all banks and activate 3140 * 3141 * Precharge all banks and activate row 0 in bank "000..." and bank "111...". 3142 */ 3143 static void mem_precharge_and_activate(void) 3144 { 3145 int r; 3146 3147 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { 3148 /* Test if the rank should be skipped. */ 3149 if (param->skip_ranks[r]) 3150 continue; 3151 3152 /* Set rank. */ 3153 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); 3154 3155 /* Precharge all banks. */ 3156 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS | 3157 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 3158 3159 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0); 3160 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1, 3161 &sdr_rw_load_jump_mgr_regs->load_jump_add0); 3162 3163 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1); 3164 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2, 3165 &sdr_rw_load_jump_mgr_regs->load_jump_add1); 3166 3167 /* Activate rows. */ 3168 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS | 3169 RW_MGR_RUN_SINGLE_GROUP_OFFSET); 3170 } 3171 } 3172 3173 /** 3174 * mem_init_latency() - Configure memory RLAT and WLAT settings 3175 * 3176 * Configure memory RLAT and WLAT parameters. 3177 */ 3178 static void mem_init_latency(void) 3179 { 3180 /* 3181 * For AV/CV, LFIFO is hardened and always runs at full rate 3182 * so max latency in AFI clocks, used here, is correspondingly 3183 * smaller. 3184 */ 3185 const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1; 3186 u32 rlat, wlat; 3187 3188 debug("%s:%d\n", __func__, __LINE__); 3189 3190 /* 3191 * Read in write latency. 3192 * WL for Hard PHY does not include additive latency. 3193 */ 3194 wlat = readl(&data_mgr->t_wl_add); 3195 wlat += readl(&data_mgr->mem_t_add); 3196 3197 gbl->rw_wl_nop_cycles = wlat - 1; 3198 3199 /* Read in readl latency. */ 3200 rlat = readl(&data_mgr->t_rl_add); 3201 3202 /* Set a pretty high read latency initially. */ 3203 gbl->curr_read_lat = rlat + 16; 3204 if (gbl->curr_read_lat > max_latency) 3205 gbl->curr_read_lat = max_latency; 3206 3207 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 3208 3209 /* Advertise write latency. */ 3210 writel(wlat, &phy_mgr_cfg->afi_wlat); 3211 } 3212 3213 /** 3214 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings 3215 * 3216 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode. 3217 */ 3218 static void mem_skip_calibrate(void) 3219 { 3220 uint32_t vfifo_offset; 3221 uint32_t i, j, r; 3222 3223 debug("%s:%d\n", __func__, __LINE__); 3224 /* Need to update every shadow register set used by the interface */ 3225 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; 3226 r += NUM_RANKS_PER_SHADOW_REG) { 3227 /* 3228 * Set output phase alignment settings appropriate for 3229 * skip calibration. 3230 */ 3231 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3232 scc_mgr_set_dqs_en_phase(i, 0); 3233 #if IO_DLL_CHAIN_LENGTH == 6 3234 scc_mgr_set_dqdqs_output_phase(i, 6); 3235 #else 3236 scc_mgr_set_dqdqs_output_phase(i, 7); 3237 #endif 3238 /* 3239 * Case:33398 3240 * 3241 * Write data arrives to the I/O two cycles before write 3242 * latency is reached (720 deg). 3243 * -> due to bit-slip in a/c bus 3244 * -> to allow board skew where dqs is longer than ck 3245 * -> how often can this happen!? 3246 * -> can claim back some ptaps for high freq 3247 * support if we can relax this, but i digress... 3248 * 3249 * The write_clk leads mem_ck by 90 deg 3250 * The minimum ptap of the OPA is 180 deg 3251 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay 3252 * The write_clk is always delayed by 2 ptaps 3253 * 3254 * Hence, to make DQS aligned to CK, we need to delay 3255 * DQS by: 3256 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH)) 3257 * 3258 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH) 3259 * gives us the number of ptaps, which simplies to: 3260 * 3261 * (1.25 * IO_DLL_CHAIN_LENGTH - 2) 3262 */ 3263 scc_mgr_set_dqdqs_output_phase(i, 3264 1.25 * IO_DLL_CHAIN_LENGTH - 2); 3265 } 3266 writel(0xff, &sdr_scc_mgr->dqs_ena); 3267 writel(0xff, &sdr_scc_mgr->dqs_io_ena); 3268 3269 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { 3270 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | 3271 SCC_MGR_GROUP_COUNTER_OFFSET); 3272 } 3273 writel(0xff, &sdr_scc_mgr->dq_ena); 3274 writel(0xff, &sdr_scc_mgr->dm_ena); 3275 writel(0, &sdr_scc_mgr->update); 3276 } 3277 3278 /* Compensate for simulation model behaviour */ 3279 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3280 scc_mgr_set_dqs_bus_in_delay(i, 10); 3281 scc_mgr_load_dqs(i); 3282 } 3283 writel(0, &sdr_scc_mgr->update); 3284 3285 /* 3286 * ArriaV has hard FIFOs that can only be initialized by incrementing 3287 * in sequencer. 3288 */ 3289 vfifo_offset = CALIB_VFIFO_OFFSET; 3290 for (j = 0; j < vfifo_offset; j++) 3291 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy); 3292 writel(0, &phy_mgr_cmd->fifo_reset); 3293 3294 /* 3295 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal 3296 * setting from generation-time constant. 3297 */ 3298 gbl->curr_read_lat = CALIB_LFIFO_OFFSET; 3299 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); 3300 } 3301 3302 /** 3303 * mem_calibrate() - Memory calibration entry point. 3304 * 3305 * Perform memory calibration. 3306 */ 3307 static uint32_t mem_calibrate(void) 3308 { 3309 uint32_t i; 3310 uint32_t rank_bgn, sr; 3311 uint32_t write_group, write_test_bgn; 3312 uint32_t read_group, read_test_bgn; 3313 uint32_t run_groups, current_run; 3314 uint32_t failing_groups = 0; 3315 uint32_t group_failed = 0; 3316 3317 const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH / 3318 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; 3319 3320 debug("%s:%d\n", __func__, __LINE__); 3321 3322 /* Initialize the data settings */ 3323 gbl->error_substage = CAL_SUBSTAGE_NIL; 3324 gbl->error_stage = CAL_STAGE_NIL; 3325 gbl->error_group = 0xff; 3326 gbl->fom_in = 0; 3327 gbl->fom_out = 0; 3328 3329 /* Initialize WLAT and RLAT. */ 3330 mem_init_latency(); 3331 3332 /* Initialize bit slips. */ 3333 mem_precharge_and_activate(); 3334 3335 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { 3336 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | 3337 SCC_MGR_GROUP_COUNTER_OFFSET); 3338 /* Only needed once to set all groups, pins, DQ, DQS, DM. */ 3339 if (i == 0) 3340 scc_mgr_set_hhp_extras(); 3341 3342 scc_set_bypass_mode(i); 3343 } 3344 3345 /* Calibration is skipped. */ 3346 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) { 3347 /* 3348 * Set VFIFO and LFIFO to instant-on settings in skip 3349 * calibration mode. 3350 */ 3351 mem_skip_calibrate(); 3352 3353 /* 3354 * Do not remove this line as it makes sure all of our 3355 * decisions have been applied. 3356 */ 3357 writel(0, &sdr_scc_mgr->update); 3358 return 1; 3359 } 3360 3361 /* Calibration is not skipped. */ 3362 for (i = 0; i < NUM_CALIB_REPEAT; i++) { 3363 /* 3364 * Zero all delay chain/phase settings for all 3365 * groups and all shadow register sets. 3366 */ 3367 scc_mgr_zero_all(); 3368 3369 run_groups = ~param->skip_groups; 3370 3371 for (write_group = 0, write_test_bgn = 0; write_group 3372 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++, 3373 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) { 3374 3375 /* Initialize the group failure */ 3376 group_failed = 0; 3377 3378 current_run = run_groups & ((1 << 3379 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1); 3380 run_groups = run_groups >> 3381 RW_MGR_NUM_DQS_PER_WRITE_GROUP; 3382 3383 if (current_run == 0) 3384 continue; 3385 3386 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS | 3387 SCC_MGR_GROUP_COUNTER_OFFSET); 3388 scc_mgr_zero_group(write_group, 0); 3389 3390 for (read_group = write_group * rwdqs_ratio, 3391 read_test_bgn = 0; 3392 read_group < (write_group + 1) * rwdqs_ratio; 3393 read_group++, 3394 read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) { 3395 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO) 3396 continue; 3397 3398 /* Calibrate the VFIFO */ 3399 if (rw_mgr_mem_calibrate_vfifo(read_group, 3400 read_test_bgn)) 3401 continue; 3402 3403 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS)) 3404 return 0; 3405 3406 /* The group failed, we're done. */ 3407 goto grp_failed; 3408 } 3409 3410 /* Calibrate the output side */ 3411 for (rank_bgn = 0, sr = 0; 3412 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; 3413 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) { 3414 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) 3415 continue; 3416 3417 /* Not needed in quick mode! */ 3418 if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) 3419 continue; 3420 3421 /* 3422 * Determine if this set of ranks 3423 * should be skipped entirely. 3424 */ 3425 if (param->skip_shadow_regs[sr]) 3426 continue; 3427 3428 /* Calibrate WRITEs */ 3429 if (!rw_mgr_mem_calibrate_writes(rank_bgn, 3430 write_group, write_test_bgn)) 3431 continue; 3432 3433 group_failed = 1; 3434 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS)) 3435 return 0; 3436 } 3437 3438 /* Some group failed, we're done. */ 3439 if (group_failed) 3440 goto grp_failed; 3441 3442 for (read_group = write_group * rwdqs_ratio, 3443 read_test_bgn = 0; 3444 read_group < (write_group + 1) * rwdqs_ratio; 3445 read_group++, 3446 read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) { 3447 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) 3448 continue; 3449 3450 if (rw_mgr_mem_calibrate_vfifo_end(read_group, 3451 read_test_bgn)) 3452 continue; 3453 3454 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS)) 3455 return 0; 3456 3457 /* The group failed, we're done. */ 3458 goto grp_failed; 3459 } 3460 3461 /* No group failed, continue as usual. */ 3462 continue; 3463 3464 grp_failed: /* A group failed, increment the counter. */ 3465 failing_groups++; 3466 } 3467 3468 /* 3469 * USER If there are any failing groups then report 3470 * the failure. 3471 */ 3472 if (failing_groups != 0) 3473 return 0; 3474 3475 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO) 3476 continue; 3477 3478 /* 3479 * If we're skipping groups as part of debug, 3480 * don't calibrate LFIFO. 3481 */ 3482 if (param->skip_groups != 0) 3483 continue; 3484 3485 /* Calibrate the LFIFO */ 3486 if (!rw_mgr_mem_calibrate_lfifo()) 3487 return 0; 3488 } 3489 3490 /* 3491 * Do not remove this line as it makes sure all of our decisions 3492 * have been applied. 3493 */ 3494 writel(0, &sdr_scc_mgr->update); 3495 return 1; 3496 } 3497 3498 /** 3499 * run_mem_calibrate() - Perform memory calibration 3500 * 3501 * This function triggers the entire memory calibration procedure. 3502 */ 3503 static int run_mem_calibrate(void) 3504 { 3505 int pass; 3506 3507 debug("%s:%d\n", __func__, __LINE__); 3508 3509 /* Reset pass/fail status shown on afi_cal_success/fail */ 3510 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status); 3511 3512 /* Stop tracking manager. */ 3513 clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22); 3514 3515 phy_mgr_initialize(); 3516 rw_mgr_mem_initialize(); 3517 3518 /* Perform the actual memory calibration. */ 3519 pass = mem_calibrate(); 3520 3521 mem_precharge_and_activate(); 3522 writel(0, &phy_mgr_cmd->fifo_reset); 3523 3524 /* Handoff. */ 3525 rw_mgr_mem_handoff(); 3526 /* 3527 * In Hard PHY this is a 2-bit control: 3528 * 0: AFI Mux Select 3529 * 1: DDIO Mux Select 3530 */ 3531 writel(0x2, &phy_mgr_cfg->mux_sel); 3532 3533 /* Start tracking manager. */ 3534 setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22); 3535 3536 return pass; 3537 } 3538 3539 /** 3540 * debug_mem_calibrate() - Report result of memory calibration 3541 * @pass: Value indicating whether calibration passed or failed 3542 * 3543 * This function reports the results of the memory calibration 3544 * and writes debug information into the register file. 3545 */ 3546 static void debug_mem_calibrate(int pass) 3547 { 3548 uint32_t debug_info; 3549 3550 if (pass) { 3551 printf("%s: CALIBRATION PASSED\n", __FILE__); 3552 3553 gbl->fom_in /= 2; 3554 gbl->fom_out /= 2; 3555 3556 if (gbl->fom_in > 0xff) 3557 gbl->fom_in = 0xff; 3558 3559 if (gbl->fom_out > 0xff) 3560 gbl->fom_out = 0xff; 3561 3562 /* Update the FOM in the register file */ 3563 debug_info = gbl->fom_in; 3564 debug_info |= gbl->fom_out << 8; 3565 writel(debug_info, &sdr_reg_file->fom); 3566 3567 writel(debug_info, &phy_mgr_cfg->cal_debug_info); 3568 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status); 3569 } else { 3570 printf("%s: CALIBRATION FAILED\n", __FILE__); 3571 3572 debug_info = gbl->error_stage; 3573 debug_info |= gbl->error_substage << 8; 3574 debug_info |= gbl->error_group << 16; 3575 3576 writel(debug_info, &sdr_reg_file->failing_stage); 3577 writel(debug_info, &phy_mgr_cfg->cal_debug_info); 3578 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status); 3579 3580 /* Update the failing group/stage in the register file */ 3581 debug_info = gbl->error_stage; 3582 debug_info |= gbl->error_substage << 8; 3583 debug_info |= gbl->error_group << 16; 3584 writel(debug_info, &sdr_reg_file->failing_stage); 3585 } 3586 3587 printf("%s: Calibration complete\n", __FILE__); 3588 } 3589 3590 /** 3591 * hc_initialize_rom_data() - Initialize ROM data 3592 * 3593 * Initialize ROM data. 3594 */ 3595 static void hc_initialize_rom_data(void) 3596 { 3597 u32 i, addr; 3598 3599 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET; 3600 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++) 3601 writel(inst_rom_init[i], addr + (i << 2)); 3602 3603 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET; 3604 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++) 3605 writel(ac_rom_init[i], addr + (i << 2)); 3606 } 3607 3608 /** 3609 * initialize_reg_file() - Initialize SDR register file 3610 * 3611 * Initialize SDR register file. 3612 */ 3613 static void initialize_reg_file(void) 3614 { 3615 /* Initialize the register file with the correct data */ 3616 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature); 3617 writel(0, &sdr_reg_file->debug_data_addr); 3618 writel(0, &sdr_reg_file->cur_stage); 3619 writel(0, &sdr_reg_file->fom); 3620 writel(0, &sdr_reg_file->failing_stage); 3621 writel(0, &sdr_reg_file->debug1); 3622 writel(0, &sdr_reg_file->debug2); 3623 } 3624 3625 /** 3626 * initialize_hps_phy() - Initialize HPS PHY 3627 * 3628 * Initialize HPS PHY. 3629 */ 3630 static void initialize_hps_phy(void) 3631 { 3632 uint32_t reg; 3633 /* 3634 * Tracking also gets configured here because it's in the 3635 * same register. 3636 */ 3637 uint32_t trk_sample_count = 7500; 3638 uint32_t trk_long_idle_sample_count = (10 << 16) | 100; 3639 /* 3640 * Format is number of outer loops in the 16 MSB, sample 3641 * count in 16 LSB. 3642 */ 3643 3644 reg = 0; 3645 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2); 3646 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1); 3647 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1); 3648 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1); 3649 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0); 3650 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1); 3651 /* 3652 * This field selects the intrinsic latency to RDATA_EN/FULL path. 3653 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles. 3654 */ 3655 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0); 3656 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET( 3657 trk_sample_count); 3658 writel(reg, &sdr_ctrl->phy_ctrl0); 3659 3660 reg = 0; 3661 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET( 3662 trk_sample_count >> 3663 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH); 3664 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET( 3665 trk_long_idle_sample_count); 3666 writel(reg, &sdr_ctrl->phy_ctrl1); 3667 3668 reg = 0; 3669 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET( 3670 trk_long_idle_sample_count >> 3671 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH); 3672 writel(reg, &sdr_ctrl->phy_ctrl2); 3673 } 3674 3675 /** 3676 * initialize_tracking() - Initialize tracking 3677 * 3678 * Initialize the register file with usable initial data. 3679 */ 3680 static void initialize_tracking(void) 3681 { 3682 /* 3683 * Initialize the register file with the correct data. 3684 * Compute usable version of value in case we skip full 3685 * computation later. 3686 */ 3687 writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1, 3688 &sdr_reg_file->dtaps_per_ptap); 3689 3690 /* trk_sample_count */ 3691 writel(7500, &sdr_reg_file->trk_sample_count); 3692 3693 /* longidle outer loop [15:0] */ 3694 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle); 3695 3696 /* 3697 * longidle sample count [31:24] 3698 * trfc, worst case of 933Mhz 4Gb [23:16] 3699 * trcd, worst case [15:8] 3700 * vfifo wait [7:0] 3701 */ 3702 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0), 3703 &sdr_reg_file->delays); 3704 3705 /* mux delay */ 3706 writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) | 3707 (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0), 3708 &sdr_reg_file->trk_rw_mgr_addr); 3709 3710 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, 3711 &sdr_reg_file->trk_read_dqs_width); 3712 3713 /* trefi [7:0] */ 3714 writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0), 3715 &sdr_reg_file->trk_rfsh); 3716 } 3717 3718 int sdram_calibration_full(void) 3719 { 3720 struct param_type my_param; 3721 struct gbl_type my_gbl; 3722 uint32_t pass; 3723 3724 memset(&my_param, 0, sizeof(my_param)); 3725 memset(&my_gbl, 0, sizeof(my_gbl)); 3726 3727 param = &my_param; 3728 gbl = &my_gbl; 3729 3730 /* Set the calibration enabled by default */ 3731 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT; 3732 /* 3733 * Only sweep all groups (regardless of fail state) by default 3734 * Set enabled read test by default. 3735 */ 3736 #if DISABLE_GUARANTEED_READ 3737 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ; 3738 #endif 3739 /* Initialize the register file */ 3740 initialize_reg_file(); 3741 3742 /* Initialize any PHY CSR */ 3743 initialize_hps_phy(); 3744 3745 scc_mgr_initialize(); 3746 3747 initialize_tracking(); 3748 3749 printf("%s: Preparing to start memory calibration\n", __FILE__); 3750 3751 debug("%s:%d\n", __func__, __LINE__); 3752 debug_cond(DLEVEL == 1, 3753 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ", 3754 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM, 3755 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS, 3756 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS, 3757 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS); 3758 debug_cond(DLEVEL == 1, 3759 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ", 3760 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH, 3761 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH, 3762 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP); 3763 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u", 3764 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH); 3765 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ", 3766 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX, 3767 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX); 3768 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ", 3769 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX, 3770 IO_IO_OUT2_DELAY_MAX); 3771 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n", 3772 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE); 3773 3774 hc_initialize_rom_data(); 3775 3776 /* update info for sims */ 3777 reg_file_set_stage(CAL_STAGE_NIL); 3778 reg_file_set_group(0); 3779 3780 /* 3781 * Load global needed for those actions that require 3782 * some dynamic calibration support. 3783 */ 3784 dyn_calib_steps = STATIC_CALIB_STEPS; 3785 /* 3786 * Load global to allow dynamic selection of delay loop settings 3787 * based on calibration mode. 3788 */ 3789 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS)) 3790 skip_delay_mask = 0xff; 3791 else 3792 skip_delay_mask = 0x0; 3793 3794 pass = run_mem_calibrate(); 3795 debug_mem_calibrate(pass); 3796 return pass; 3797 } 3798