1 /* 2 * Copyright (C) Marvell International Ltd. and its affiliates 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 */ 6 7 #include <common.h> 8 #include <spl.h> 9 #include <asm/io.h> 10 #include <asm/arch/cpu.h> 11 #include <asm/arch/soc.h> 12 13 #include "ddr3_init.h" 14 15 #define GET_MAX_VALUE(x, y) \ 16 ((x) > (y)) ? (x) : (y) 17 #define CEIL_DIVIDE(x, y) \ 18 ((x - (x / y) * y) == 0) ? ((x / y) - 1) : (x / y) 19 20 #define TIME_2_CLOCK_CYCLES CEIL_DIVIDE 21 22 #define GET_CS_FROM_MASK(mask) (cs_mask2_num[mask]) 23 #define CS_CBE_VALUE(cs_num) (cs_cbe_reg[cs_num]) 24 25 u32 window_mem_addr = 0; 26 u32 phy_reg0_val = 0; 27 u32 phy_reg1_val = 8; 28 u32 phy_reg2_val = 0; 29 u32 phy_reg3_val = 0xa; 30 enum hws_ddr_freq init_freq = DDR_FREQ_667; 31 enum hws_ddr_freq low_freq = DDR_FREQ_LOW_FREQ; 32 enum hws_ddr_freq medium_freq; 33 u32 debug_dunit = 0; 34 u32 odt_additional = 1; 35 u32 *dq_map_table = NULL; 36 u32 odt_config = 1; 37 38 #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ALLEYCAT3) || \ 39 defined(CONFIG_ARMADA_39X) 40 u32 is_pll_before_init = 0, is_adll_calib_before_init = 0, is_dfs_in_init = 0; 41 u32 dfs_low_freq = 130; 42 #else 43 u32 is_pll_before_init = 0, is_adll_calib_before_init = 1, is_dfs_in_init = 0; 44 u32 dfs_low_freq = 100; 45 #endif 46 u32 g_rtt_nom_c_s0, g_rtt_nom_c_s1; 47 u8 calibration_update_control; /* 2 external only, 1 is internal only */ 48 49 enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM]; 50 enum auto_tune_stage training_stage = INIT_CONTROLLER; 51 u32 finger_test = 0, p_finger_start = 11, p_finger_end = 64, 52 n_finger_start = 11, n_finger_end = 64, 53 p_finger_step = 3, n_finger_step = 3; 54 u32 clamp_tbl[] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }; 55 56 /* Initiate to 0xff, this variable is define by user in debug mode */ 57 u32 mode2_t = 0xff; 58 u32 xsb_validate_type = 0; 59 u32 xsb_validation_base_address = 0xf000; 60 u32 first_active_if = 0; 61 u32 dfs_low_phy1 = 0x1f; 62 u32 multicast_id = 0; 63 int use_broadcast = 0; 64 struct hws_tip_freq_config_info *freq_info_table = NULL; 65 u8 is_cbe_required = 0; 66 u32 debug_mode = 0; 67 u32 delay_enable = 0; 68 int rl_mid_freq_wa = 0; 69 70 u32 effective_cs = 0; 71 72 u32 mask_tune_func = (SET_MEDIUM_FREQ_MASK_BIT | 73 WRITE_LEVELING_MASK_BIT | 74 LOAD_PATTERN_2_MASK_BIT | 75 READ_LEVELING_MASK_BIT | 76 SET_TARGET_FREQ_MASK_BIT | WRITE_LEVELING_TF_MASK_BIT | 77 READ_LEVELING_TF_MASK_BIT | 78 CENTRALIZATION_RX_MASK_BIT | CENTRALIZATION_TX_MASK_BIT); 79 80 void ddr3_print_version(void) 81 { 82 printf(DDR3_TIP_VERSION_STRING); 83 } 84 85 static int ddr3_tip_ddr3_training_main_flow(u32 dev_num); 86 static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type, 87 u32 if_id, u32 cl_value, u32 cwl_value); 88 static int ddr3_tip_ddr3_auto_tune(u32 dev_num); 89 static int is_bus_access_done(u32 dev_num, u32 if_id, 90 u32 dunit_reg_adrr, u32 bit); 91 #ifdef ODT_TEST_SUPPORT 92 static int odt_test(u32 dev_num, enum hws_algo_type algo_type); 93 #endif 94 95 int adll_calibration(u32 dev_num, enum hws_access_type access_type, 96 u32 if_id, enum hws_ddr_freq frequency); 97 static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type, 98 u32 if_id, enum hws_ddr_freq frequency); 99 100 static struct page_element page_param[] = { 101 /* 102 * 8bits 16 bits 103 * page-size(K) page-size(K) mask 104 */ 105 { 1, 2, 2}, 106 /* 512M */ 107 { 1, 2, 3}, 108 /* 1G */ 109 { 1, 2, 0}, 110 /* 2G */ 111 { 1, 2, 4}, 112 /* 4G */ 113 { 2, 2, 5} 114 /* 8G */ 115 }; 116 117 static u8 mem_size_config[MEM_SIZE_LAST] = { 118 0x2, /* 512Mbit */ 119 0x3, /* 1Gbit */ 120 0x0, /* 2Gbit */ 121 0x4, /* 4Gbit */ 122 0x5 /* 8Gbit */ 123 }; 124 125 static u8 cs_mask2_num[] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 }; 126 127 static struct reg_data odpg_default_value[] = { 128 {0x1034, 0x38000, MASK_ALL_BITS}, 129 {0x1038, 0x0, MASK_ALL_BITS}, 130 {0x10b0, 0x0, MASK_ALL_BITS}, 131 {0x10b8, 0x0, MASK_ALL_BITS}, 132 {0x10c0, 0x0, MASK_ALL_BITS}, 133 {0x10f0, 0x0, MASK_ALL_BITS}, 134 {0x10f4, 0x0, MASK_ALL_BITS}, 135 {0x10f8, 0xff, MASK_ALL_BITS}, 136 {0x10fc, 0xffff, MASK_ALL_BITS}, 137 {0x1130, 0x0, MASK_ALL_BITS}, 138 {0x1830, 0x2000000, MASK_ALL_BITS}, 139 {0x14d0, 0x0, MASK_ALL_BITS}, 140 {0x14d4, 0x0, MASK_ALL_BITS}, 141 {0x14d8, 0x0, MASK_ALL_BITS}, 142 {0x14dc, 0x0, MASK_ALL_BITS}, 143 {0x1454, 0x0, MASK_ALL_BITS}, 144 {0x1594, 0x0, MASK_ALL_BITS}, 145 {0x1598, 0x0, MASK_ALL_BITS}, 146 {0x159c, 0x0, MASK_ALL_BITS}, 147 {0x15a0, 0x0, MASK_ALL_BITS}, 148 {0x15a4, 0x0, MASK_ALL_BITS}, 149 {0x15a8, 0x0, MASK_ALL_BITS}, 150 {0x15ac, 0x0, MASK_ALL_BITS}, 151 {0x1604, 0x0, MASK_ALL_BITS}, 152 {0x1608, 0x0, MASK_ALL_BITS}, 153 {0x160c, 0x0, MASK_ALL_BITS}, 154 {0x1610, 0x0, MASK_ALL_BITS}, 155 {0x1614, 0x0, MASK_ALL_BITS}, 156 {0x1618, 0x0, MASK_ALL_BITS}, 157 {0x1624, 0x0, MASK_ALL_BITS}, 158 {0x1690, 0x0, MASK_ALL_BITS}, 159 {0x1694, 0x0, MASK_ALL_BITS}, 160 {0x1698, 0x0, MASK_ALL_BITS}, 161 {0x169c, 0x0, MASK_ALL_BITS}, 162 {0x14b8, 0x6f67, MASK_ALL_BITS}, 163 {0x1630, 0x0, MASK_ALL_BITS}, 164 {0x1634, 0x0, MASK_ALL_BITS}, 165 {0x1638, 0x0, MASK_ALL_BITS}, 166 {0x163c, 0x0, MASK_ALL_BITS}, 167 {0x16b0, 0x0, MASK_ALL_BITS}, 168 {0x16b4, 0x0, MASK_ALL_BITS}, 169 {0x16b8, 0x0, MASK_ALL_BITS}, 170 {0x16bc, 0x0, MASK_ALL_BITS}, 171 {0x16c0, 0x0, MASK_ALL_BITS}, 172 {0x16c4, 0x0, MASK_ALL_BITS}, 173 {0x16c8, 0x0, MASK_ALL_BITS}, 174 {0x16cc, 0x1, MASK_ALL_BITS}, 175 {0x16f0, 0x1, MASK_ALL_BITS}, 176 {0x16f4, 0x0, MASK_ALL_BITS}, 177 {0x16f8, 0x0, MASK_ALL_BITS}, 178 {0x16fc, 0x0, MASK_ALL_BITS} 179 }; 180 181 static int ddr3_tip_bus_access(u32 dev_num, enum hws_access_type interface_access, 182 u32 if_id, enum hws_access_type phy_access, 183 u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, 184 u32 data_value, enum hws_operation oper_type); 185 static int ddr3_tip_pad_inv(u32 dev_num, u32 if_id); 186 static int ddr3_tip_rank_control(u32 dev_num, u32 if_id); 187 188 /* 189 * Update global training parameters by data from user 190 */ 191 int ddr3_tip_tune_training_params(u32 dev_num, 192 struct tune_train_params *params) 193 { 194 if (params->ck_delay != -1) 195 ck_delay = params->ck_delay; 196 if (params->ck_delay_16 != -1) 197 ck_delay_16 = params->ck_delay_16; 198 if (params->phy_reg3_val != -1) 199 phy_reg3_val = params->phy_reg3_val; 200 201 return MV_OK; 202 } 203 204 /* 205 * Configure CS 206 */ 207 int ddr3_tip_configure_cs(u32 dev_num, u32 if_id, u32 cs_num, u32 enable) 208 { 209 u32 data, addr_hi, data_high; 210 u32 mem_index; 211 struct hws_topology_map *tm = ddr3_get_topology_map(); 212 213 if (enable == 1) { 214 data = (tm->interface_params[if_id].bus_width == 215 BUS_WIDTH_8) ? 0 : 1; 216 CHECK_STATUS(ddr3_tip_if_write 217 (dev_num, ACCESS_TYPE_UNICAST, if_id, 218 SDRAM_ACCESS_CONTROL_REG, (data << (cs_num * 4)), 219 0x3 << (cs_num * 4))); 220 mem_index = tm->interface_params[if_id].memory_size; 221 222 addr_hi = mem_size_config[mem_index] & 0x3; 223 CHECK_STATUS(ddr3_tip_if_write 224 (dev_num, ACCESS_TYPE_UNICAST, if_id, 225 SDRAM_ACCESS_CONTROL_REG, 226 (addr_hi << (2 + cs_num * 4)), 227 0x3 << (2 + cs_num * 4))); 228 229 data_high = (mem_size_config[mem_index] & 0x4) >> 2; 230 CHECK_STATUS(ddr3_tip_if_write 231 (dev_num, ACCESS_TYPE_UNICAST, if_id, 232 SDRAM_ACCESS_CONTROL_REG, 233 data_high << (20 + cs_num), 1 << (20 + cs_num))); 234 235 /* Enable Address Select Mode */ 236 CHECK_STATUS(ddr3_tip_if_write 237 (dev_num, ACCESS_TYPE_UNICAST, if_id, 238 SDRAM_ACCESS_CONTROL_REG, 1 << (16 + cs_num), 239 1 << (16 + cs_num))); 240 } 241 switch (cs_num) { 242 case 0: 243 case 1: 244 case 2: 245 CHECK_STATUS(ddr3_tip_if_write 246 (dev_num, ACCESS_TYPE_UNICAST, if_id, 247 DDR_CONTROL_LOW_REG, (enable << (cs_num + 11)), 248 1 << (cs_num + 11))); 249 break; 250 case 3: 251 CHECK_STATUS(ddr3_tip_if_write 252 (dev_num, ACCESS_TYPE_UNICAST, if_id, 253 DDR_CONTROL_LOW_REG, (enable << 15), 1 << 15)); 254 break; 255 } 256 257 return MV_OK; 258 } 259 260 /* 261 * Calculate number of CS 262 */ 263 static int calc_cs_num(u32 dev_num, u32 if_id, u32 *cs_num) 264 { 265 u32 cs; 266 u32 bus_cnt; 267 u32 cs_count; 268 u32 cs_bitmask; 269 u32 curr_cs_num = 0; 270 struct hws_topology_map *tm = ddr3_get_topology_map(); 271 272 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 273 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 274 cs_count = 0; 275 cs_bitmask = tm->interface_params[if_id]. 276 as_bus_params[bus_cnt].cs_bitmask; 277 for (cs = 0; cs < MAX_CS_NUM; cs++) { 278 if ((cs_bitmask >> cs) & 1) 279 cs_count++; 280 } 281 282 if (curr_cs_num == 0) { 283 curr_cs_num = cs_count; 284 } else if (cs_count != curr_cs_num) { 285 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 286 ("CS number is different per bus (IF %d BUS %d cs_num %d curr_cs_num %d)\n", 287 if_id, bus_cnt, cs_count, 288 curr_cs_num)); 289 return MV_NOT_SUPPORTED; 290 } 291 } 292 *cs_num = curr_cs_num; 293 294 return MV_OK; 295 } 296 297 /* 298 * Init Controller Flow 299 */ 300 int hws_ddr3_tip_init_controller(u32 dev_num, struct init_cntr_param *init_cntr_prm) 301 { 302 u32 if_id; 303 u32 cs_num; 304 u32 t_refi = 0, t_hclk = 0, t_ckclk = 0, t_faw = 0, t_pd = 0, 305 t_wr = 0, t2t = 0, txpdll = 0; 306 u32 data_value = 0, bus_width = 0, page_size = 0, cs_cnt = 0, 307 mem_mask = 0, bus_index = 0; 308 enum hws_speed_bin speed_bin_index = SPEED_BIN_DDR_2133N; 309 enum hws_mem_size memory_size = MEM_2G; 310 enum hws_ddr_freq freq = init_freq; 311 enum hws_timing timing; 312 u32 cs_mask = 0; 313 u32 cl_value = 0, cwl_val = 0; 314 u32 refresh_interval_cnt = 0, bus_cnt = 0, adll_tap = 0; 315 enum hws_access_type access_type = ACCESS_TYPE_UNICAST; 316 u32 data_read[MAX_INTERFACE_NUM]; 317 struct hws_topology_map *tm = ddr3_get_topology_map(); 318 u32 odt_config = g_odt_config_2cs; 319 320 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 321 ("Init_controller, do_mrs_phy=%d, is_ctrl64_bit=%d\n", 322 init_cntr_prm->do_mrs_phy, 323 init_cntr_prm->is_ctrl64_bit)); 324 325 if (init_cntr_prm->init_phy == 1) { 326 CHECK_STATUS(ddr3_tip_configure_phy(dev_num)); 327 } 328 329 if (generic_init_controller == 1) { 330 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 331 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 332 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 333 ("active IF %d\n", if_id)); 334 mem_mask = 0; 335 for (bus_index = 0; 336 bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 337 bus_index++) { 338 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 339 mem_mask |= 340 tm->interface_params[if_id]. 341 as_bus_params[bus_index].mirror_enable_bitmask; 342 } 343 344 if (mem_mask != 0) { 345 CHECK_STATUS(ddr3_tip_if_write 346 (dev_num, ACCESS_TYPE_MULTICAST, 347 if_id, CS_ENABLE_REG, 0, 348 0x8)); 349 } 350 351 memory_size = 352 tm->interface_params[if_id]. 353 memory_size; 354 speed_bin_index = 355 tm->interface_params[if_id]. 356 speed_bin_index; 357 freq = init_freq; 358 t_refi = 359 (tm->interface_params[if_id]. 360 interface_temp == 361 HWS_TEMP_HIGH) ? TREFI_HIGH : TREFI_LOW; 362 t_refi *= 1000; /* psec */ 363 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 364 ("memy_size %d speed_bin_ind %d freq %d t_refi %d\n", 365 memory_size, speed_bin_index, freq, 366 t_refi)); 367 /* HCLK & CK CLK in 2:1[ps] */ 368 /* t_ckclk is external clock */ 369 t_ckclk = (MEGA / freq_val[freq]); 370 /* t_hclk is internal clock */ 371 t_hclk = 2 * t_ckclk; 372 refresh_interval_cnt = t_refi / t_hclk; /* no units */ 373 bus_width = 374 (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) 375 == 1) ? (16) : (32); 376 377 if (init_cntr_prm->is_ctrl64_bit) 378 bus_width = 64; 379 380 data_value = 381 (refresh_interval_cnt | 0x4000 | 382 ((bus_width == 383 32) ? 0x8000 : 0) | 0x1000000) & ~(1 << 26); 384 385 /* Interface Bus Width */ 386 /* SRMode */ 387 CHECK_STATUS(ddr3_tip_if_write 388 (dev_num, access_type, if_id, 389 SDRAM_CONFIGURATION_REG, data_value, 390 0x100ffff)); 391 392 /* Interleave first command pre-charge enable (TBD) */ 393 CHECK_STATUS(ddr3_tip_if_write 394 (dev_num, access_type, if_id, 395 SDRAM_OPEN_PAGE_CONTROL_REG, (1 << 10), 396 (1 << 10))); 397 398 /* PHY configuration */ 399 /* 400 * Postamble Length = 1.5cc, Addresscntl to clk skew 401 * \BD, Preamble length normal, parralal ADLL enable 402 */ 403 CHECK_STATUS(ddr3_tip_if_write 404 (dev_num, access_type, if_id, 405 DRAM_PHY_CONFIGURATION, 0x28, 0x3e)); 406 if (init_cntr_prm->is_ctrl64_bit) { 407 /* positive edge */ 408 CHECK_STATUS(ddr3_tip_if_write 409 (dev_num, access_type, if_id, 410 DRAM_PHY_CONFIGURATION, 0x0, 411 0xff80)); 412 } 413 414 /* calibration block disable */ 415 /* Xbar Read buffer select (for Internal access) */ 416 CHECK_STATUS(ddr3_tip_if_write 417 (dev_num, access_type, if_id, 418 CALIB_MACHINE_CTRL_REG, 0x1200c, 419 0x7dffe01c)); 420 CHECK_STATUS(ddr3_tip_if_write 421 (dev_num, access_type, if_id, 422 CALIB_MACHINE_CTRL_REG, 423 calibration_update_control << 3, 0x3 << 3)); 424 425 /* Pad calibration control - enable */ 426 CHECK_STATUS(ddr3_tip_if_write 427 (dev_num, access_type, if_id, 428 CALIB_MACHINE_CTRL_REG, 0x1, 0x1)); 429 430 cs_mask = 0; 431 data_value = 0x7; 432 /* 433 * Address ctrl \96 Part of the Generic code 434 * The next configuration is done: 435 * 1) Memory Size 436 * 2) Bus_width 437 * 3) CS# 438 * 4) Page Number 439 * 5) t_faw 440 * Per Dunit get from the Map_topology the parameters: 441 * Bus_width 442 * t_faw is per Dunit not per CS 443 */ 444 page_size = 445 (tm->interface_params[if_id]. 446 bus_width == 447 BUS_WIDTH_8) ? page_param[memory_size]. 448 page_size_8bit : page_param[memory_size]. 449 page_size_16bit; 450 451 t_faw = 452 (page_size == 1) ? speed_bin_table(speed_bin_index, 453 SPEED_BIN_TFAW1K) 454 : speed_bin_table(speed_bin_index, 455 SPEED_BIN_TFAW2K); 456 457 data_value = TIME_2_CLOCK_CYCLES(t_faw, t_ckclk); 458 data_value = data_value << 24; 459 CHECK_STATUS(ddr3_tip_if_write 460 (dev_num, access_type, if_id, 461 SDRAM_ACCESS_CONTROL_REG, data_value, 462 0x7f000000)); 463 464 data_value = 465 (tm->interface_params[if_id]. 466 bus_width == BUS_WIDTH_8) ? 0 : 1; 467 468 /* create merge cs mask for all cs available in dunit */ 469 for (bus_cnt = 0; 470 bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); 471 bus_cnt++) { 472 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 473 cs_mask |= 474 tm->interface_params[if_id]. 475 as_bus_params[bus_cnt].cs_bitmask; 476 } 477 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 478 ("Init_controller IF %d cs_mask %d\n", 479 if_id, cs_mask)); 480 /* 481 * Configure the next upon the Map Topology \96 If the 482 * Dunit is CS0 Configure CS0 if it is multi CS 483 * configure them both: The Bust_width it\92s the 484 * Memory Bus width \96 x8 or x16 485 */ 486 for (cs_cnt = 0; cs_cnt < NUM_OF_CS; cs_cnt++) { 487 ddr3_tip_configure_cs(dev_num, if_id, cs_cnt, 488 ((cs_mask & (1 << cs_cnt)) ? 1 489 : 0)); 490 } 491 492 if (init_cntr_prm->do_mrs_phy) { 493 /* 494 * MR0 \96 Part of the Generic code 495 * The next configuration is done: 496 * 1) Burst Length 497 * 2) CAS Latency 498 * get for each dunit what is it Speed_bin & 499 * Target Frequency. From those both parameters 500 * get the appropriate Cas_l from the CL table 501 */ 502 cl_value = 503 tm->interface_params[if_id]. 504 cas_l; 505 cwl_val = 506 tm->interface_params[if_id]. 507 cas_wl; 508 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 509 ("cl_value 0x%x cwl_val 0x%x\n", 510 cl_value, cwl_val)); 511 512 data_value = 513 ((cl_mask_table[cl_value] & 0x1) << 2) | 514 ((cl_mask_table[cl_value] & 0xe) << 3); 515 CHECK_STATUS(ddr3_tip_if_write 516 (dev_num, access_type, if_id, 517 MR0_REG, data_value, 518 (0x7 << 4) | (1 << 2))); 519 CHECK_STATUS(ddr3_tip_if_write 520 (dev_num, access_type, if_id, 521 MR0_REG, twr_mask_table[t_wr + 1], 522 0xe00)); 523 524 /* 525 * MR1: Set RTT and DIC Design GL values 526 * configured by user 527 */ 528 CHECK_STATUS(ddr3_tip_if_write 529 (dev_num, ACCESS_TYPE_MULTICAST, 530 PARAM_NOT_CARE, MR1_REG, 531 g_dic | g_rtt_nom, 0x266)); 532 533 /* MR2 - Part of the Generic code */ 534 /* 535 * The next configuration is done: 536 * 1) SRT 537 * 2) CAS Write Latency 538 */ 539 data_value = (cwl_mask_table[cwl_val] << 3); 540 data_value |= 541 ((tm->interface_params[if_id]. 542 interface_temp == 543 HWS_TEMP_HIGH) ? (1 << 7) : 0); 544 CHECK_STATUS(ddr3_tip_if_write 545 (dev_num, access_type, if_id, 546 MR2_REG, data_value, 547 (0x7 << 3) | (0x1 << 7) | (0x3 << 548 9))); 549 } 550 551 ddr3_tip_write_odt(dev_num, access_type, if_id, 552 cl_value, cwl_val); 553 ddr3_tip_set_timing(dev_num, access_type, if_id, freq); 554 555 CHECK_STATUS(ddr3_tip_if_write 556 (dev_num, access_type, if_id, 557 DUNIT_CONTROL_HIGH_REG, 0x177, 558 0x1000177)); 559 560 if (init_cntr_prm->is_ctrl64_bit) { 561 /* disable 0.25 cc delay */ 562 CHECK_STATUS(ddr3_tip_if_write 563 (dev_num, access_type, if_id, 564 DUNIT_CONTROL_HIGH_REG, 0x0, 565 0x800)); 566 } 567 568 /* reset bit 7 */ 569 CHECK_STATUS(ddr3_tip_if_write 570 (dev_num, access_type, if_id, 571 DUNIT_CONTROL_HIGH_REG, 572 (init_cntr_prm->msys_init << 7), (1 << 7))); 573 574 /* calculate number of CS (per interface) */ 575 CHECK_STATUS(calc_cs_num 576 (dev_num, if_id, &cs_num)); 577 timing = tm->interface_params[if_id].timing; 578 579 if (mode2_t != 0xff) { 580 t2t = mode2_t; 581 } else if (timing != HWS_TIM_DEFAULT) { 582 /* Board topology map is forcing timing */ 583 t2t = (timing == HWS_TIM_2T) ? 1 : 0; 584 } else { 585 t2t = (cs_num == 1) ? 0 : 1; 586 } 587 588 CHECK_STATUS(ddr3_tip_if_write 589 (dev_num, access_type, if_id, 590 DDR_CONTROL_LOW_REG, t2t << 3, 591 0x3 << 3)); 592 /* move the block to ddr3_tip_set_timing - start */ 593 t_pd = GET_MAX_VALUE(t_ckclk * 3, 594 speed_bin_table(speed_bin_index, 595 SPEED_BIN_TPD)); 596 t_pd = TIME_2_CLOCK_CYCLES(t_pd, t_ckclk); 597 txpdll = GET_MAX_VALUE(t_ckclk * 10, 24); 598 txpdll = CEIL_DIVIDE((txpdll - 1), t_ckclk); 599 CHECK_STATUS(ddr3_tip_if_write 600 (dev_num, access_type, if_id, 601 DDR_TIMING_REG, txpdll << 4, 602 0x1f << 4)); 603 CHECK_STATUS(ddr3_tip_if_write 604 (dev_num, access_type, if_id, 605 DDR_TIMING_REG, 0x28 << 9, 0x3f << 9)); 606 CHECK_STATUS(ddr3_tip_if_write 607 (dev_num, access_type, if_id, 608 DDR_TIMING_REG, 0xa << 21, 0xff << 21)); 609 610 /* move the block to ddr3_tip_set_timing - end */ 611 /* AUTO_ZQC_TIMING */ 612 CHECK_STATUS(ddr3_tip_if_write 613 (dev_num, access_type, if_id, 614 TIMING_REG, (AUTO_ZQC_TIMING | (2 << 20)), 615 0x3fffff)); 616 CHECK_STATUS(ddr3_tip_if_read 617 (dev_num, access_type, if_id, 618 DRAM_PHY_CONFIGURATION, data_read, 0x30)); 619 data_value = 620 (data_read[if_id] == 0) ? (1 << 11) : 0; 621 CHECK_STATUS(ddr3_tip_if_write 622 (dev_num, access_type, if_id, 623 DUNIT_CONTROL_HIGH_REG, data_value, 624 (1 << 11))); 625 626 /* Set Active control for ODT write transactions */ 627 if (cs_num == 1) 628 odt_config = g_odt_config_1cs; 629 CHECK_STATUS(ddr3_tip_if_write 630 (dev_num, ACCESS_TYPE_MULTICAST, 631 PARAM_NOT_CARE, 0x1494, odt_config, 632 MASK_ALL_BITS)); 633 } 634 } else { 635 #ifdef STATIC_ALGO_SUPPORT 636 CHECK_STATUS(ddr3_tip_static_init_controller(dev_num)); 637 #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X) 638 CHECK_STATUS(ddr3_tip_static_phy_init_controller(dev_num)); 639 #endif 640 #endif /* STATIC_ALGO_SUPPORT */ 641 } 642 643 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 644 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 645 CHECK_STATUS(ddr3_tip_rank_control(dev_num, if_id)); 646 647 if (init_cntr_prm->do_mrs_phy) { 648 CHECK_STATUS(ddr3_tip_pad_inv(dev_num, if_id)); 649 } 650 651 /* Pad calibration control - disable */ 652 CHECK_STATUS(ddr3_tip_if_write 653 (dev_num, access_type, if_id, 654 CALIB_MACHINE_CTRL_REG, 0x0, 0x1)); 655 CHECK_STATUS(ddr3_tip_if_write 656 (dev_num, access_type, if_id, 657 CALIB_MACHINE_CTRL_REG, 658 calibration_update_control << 3, 0x3 << 3)); 659 } 660 661 CHECK_STATUS(ddr3_tip_enable_init_sequence(dev_num)); 662 663 if (delay_enable != 0) { 664 adll_tap = MEGA / (freq_val[freq] * 64); 665 ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap); 666 } 667 668 return MV_OK; 669 } 670 671 /* 672 * Load Topology map 673 */ 674 int hws_ddr3_tip_load_topology_map(u32 dev_num, struct hws_topology_map *tm) 675 { 676 enum hws_speed_bin speed_bin_index; 677 enum hws_ddr_freq freq = DDR_FREQ_LIMIT; 678 u32 if_id; 679 680 freq_val[DDR_FREQ_LOW_FREQ] = dfs_low_freq; 681 tm = ddr3_get_topology_map(); 682 CHECK_STATUS(ddr3_tip_get_first_active_if 683 ((u8)dev_num, tm->if_act_mask, 684 &first_active_if)); 685 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 686 ("board IF_Mask=0x%x num_of_bus_per_interface=0x%x\n", 687 tm->if_act_mask, 688 tm->num_of_bus_per_interface)); 689 690 /* 691 * if CL, CWL values are missing in topology map, then fill them 692 * according to speedbin tables 693 */ 694 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 695 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 696 speed_bin_index = 697 tm->interface_params[if_id].speed_bin_index; 698 /* TBD memory frequency of interface 0 only is used ! */ 699 freq = tm->interface_params[first_active_if].memory_freq; 700 701 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 702 ("speed_bin_index =%d freq=%d cl=%d cwl=%d\n", 703 speed_bin_index, freq_val[freq], 704 tm->interface_params[if_id]. 705 cas_l, 706 tm->interface_params[if_id]. 707 cas_wl)); 708 709 if (tm->interface_params[if_id].cas_l == 0) { 710 tm->interface_params[if_id].cas_l = 711 cas_latency_table[speed_bin_index].cl_val[freq]; 712 } 713 714 if (tm->interface_params[if_id].cas_wl == 0) { 715 tm->interface_params[if_id].cas_wl = 716 cas_write_latency_table[speed_bin_index].cl_val[freq]; 717 } 718 } 719 720 return MV_OK; 721 } 722 723 /* 724 * RANK Control Flow 725 */ 726 static int ddr3_tip_rank_control(u32 dev_num, u32 if_id) 727 { 728 u32 data_value = 0, bus_cnt; 729 struct hws_topology_map *tm = ddr3_get_topology_map(); 730 731 for (bus_cnt = 1; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 732 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 733 if ((tm->interface_params[if_id]. 734 as_bus_params[0].cs_bitmask != 735 tm->interface_params[if_id]. 736 as_bus_params[bus_cnt].cs_bitmask) || 737 (tm->interface_params[if_id]. 738 as_bus_params[0].mirror_enable_bitmask != 739 tm->interface_params[if_id]. 740 as_bus_params[bus_cnt].mirror_enable_bitmask)) 741 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 742 ("WARNING:Wrong configuration for pup #%d CS mask and CS mirroring for all pups should be the same\n", 743 bus_cnt)); 744 } 745 746 data_value |= tm->interface_params[if_id]. 747 as_bus_params[0].cs_bitmask; 748 data_value |= tm->interface_params[if_id]. 749 as_bus_params[0].mirror_enable_bitmask << 4; 750 751 CHECK_STATUS(ddr3_tip_if_write 752 (dev_num, ACCESS_TYPE_UNICAST, if_id, RANK_CTRL_REG, 753 data_value, 0xff)); 754 755 return MV_OK; 756 } 757 758 /* 759 * PAD Inverse Flow 760 */ 761 static int ddr3_tip_pad_inv(u32 dev_num, u32 if_id) 762 { 763 u32 bus_cnt, data_value, ck_swap_pup_ctrl; 764 struct hws_topology_map *tm = ddr3_get_topology_map(); 765 766 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 767 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 768 if (tm->interface_params[if_id]. 769 as_bus_params[bus_cnt].is_dqs_swap == 1) { 770 /* dqs swap */ 771 ddr3_tip_bus_read_modify_write(dev_num, ACCESS_TYPE_UNICAST, 772 if_id, bus_cnt, 773 DDR_PHY_DATA, 774 PHY_CONTROL_PHY_REG, 0xc0, 775 0xc0); 776 } 777 778 if (tm->interface_params[if_id]. 779 as_bus_params[bus_cnt].is_ck_swap == 1) { 780 if (bus_cnt <= 1) 781 data_value = 0x5 << 2; 782 else 783 data_value = 0xa << 2; 784 785 /* mask equals data */ 786 /* ck swap pup is only control pup #0 ! */ 787 ck_swap_pup_ctrl = 0; 788 ddr3_tip_bus_read_modify_write(dev_num, ACCESS_TYPE_UNICAST, 789 if_id, ck_swap_pup_ctrl, 790 DDR_PHY_CONTROL, 791 PHY_CONTROL_PHY_REG, 792 data_value, data_value); 793 } 794 } 795 796 return MV_OK; 797 } 798 799 /* 800 * Run Training Flow 801 */ 802 int hws_ddr3_tip_run_alg(u32 dev_num, enum hws_algo_type algo_type) 803 { 804 int ret = MV_OK, ret_tune = MV_OK; 805 806 #ifdef ODT_TEST_SUPPORT 807 if (finger_test == 1) 808 return odt_test(dev_num, algo_type); 809 #endif 810 811 if (algo_type == ALGO_TYPE_DYNAMIC) { 812 ret = ddr3_tip_ddr3_auto_tune(dev_num); 813 } else { 814 #ifdef STATIC_ALGO_SUPPORT 815 { 816 enum hws_ddr_freq freq; 817 freq = init_freq; 818 819 /* add to mask */ 820 if (is_adll_calib_before_init != 0) { 821 printf("with adll calib before init\n"); 822 adll_calibration(dev_num, ACCESS_TYPE_MULTICAST, 823 0, freq); 824 } 825 /* 826 * Frequency per interface is not relevant, 827 * only interface 0 828 */ 829 ret = ddr3_tip_run_static_alg(dev_num, 830 freq); 831 } 832 #endif 833 } 834 835 if (ret != MV_OK) { 836 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 837 ("Run_alg: tuning failed %d\n", ret_tune)); 838 } 839 840 return ret; 841 } 842 843 #ifdef ODT_TEST_SUPPORT 844 /* 845 * ODT Test 846 */ 847 static int odt_test(u32 dev_num, enum hws_algo_type algo_type) 848 { 849 int ret = MV_OK, ret_tune = MV_OK; 850 int pfinger_val = 0, nfinger_val; 851 852 for (pfinger_val = p_finger_start; pfinger_val <= p_finger_end; 853 pfinger_val += p_finger_step) { 854 for (nfinger_val = n_finger_start; nfinger_val <= n_finger_end; 855 nfinger_val += n_finger_step) { 856 if (finger_test != 0) { 857 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 858 ("pfinger_val %d nfinger_val %d\n", 859 pfinger_val, nfinger_val)); 860 p_finger = pfinger_val; 861 n_finger = nfinger_val; 862 } 863 864 if (algo_type == ALGO_TYPE_DYNAMIC) { 865 ret = ddr3_tip_ddr3_auto_tune(dev_num); 866 } else { 867 /* 868 * Frequency per interface is not relevant, 869 * only interface 0 870 */ 871 ret = ddr3_tip_run_static_alg(dev_num, 872 init_freq); 873 } 874 } 875 } 876 877 if (ret_tune != MV_OK) { 878 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 879 ("Run_alg: tuning failed %d\n", ret_tune)); 880 ret = (ret == MV_OK) ? ret_tune : ret; 881 } 882 883 return ret; 884 } 885 #endif 886 887 /* 888 * Select Controller 889 */ 890 int hws_ddr3_tip_select_ddr_controller(u32 dev_num, int enable) 891 { 892 if (config_func_info[dev_num].tip_dunit_mux_select_func != NULL) { 893 return config_func_info[dev_num]. 894 tip_dunit_mux_select_func((u8)dev_num, enable); 895 } 896 897 return MV_FAIL; 898 } 899 900 /* 901 * Dunit Register Write 902 */ 903 int ddr3_tip_if_write(u32 dev_num, enum hws_access_type interface_access, 904 u32 if_id, u32 reg_addr, u32 data_value, u32 mask) 905 { 906 if (config_func_info[dev_num].tip_dunit_write_func != NULL) { 907 return config_func_info[dev_num]. 908 tip_dunit_write_func((u8)dev_num, interface_access, 909 if_id, reg_addr, 910 data_value, mask); 911 } 912 913 return MV_FAIL; 914 } 915 916 /* 917 * Dunit Register Read 918 */ 919 int ddr3_tip_if_read(u32 dev_num, enum hws_access_type interface_access, 920 u32 if_id, u32 reg_addr, u32 *data, u32 mask) 921 { 922 if (config_func_info[dev_num].tip_dunit_read_func != NULL) { 923 return config_func_info[dev_num]. 924 tip_dunit_read_func((u8)dev_num, interface_access, 925 if_id, reg_addr, 926 data, mask); 927 } 928 929 return MV_FAIL; 930 } 931 932 /* 933 * Dunit Register Polling 934 */ 935 int ddr3_tip_if_polling(u32 dev_num, enum hws_access_type access_type, 936 u32 if_id, u32 exp_value, u32 mask, u32 offset, 937 u32 poll_tries) 938 { 939 u32 poll_cnt = 0, interface_num = 0, start_if, end_if; 940 u32 read_data[MAX_INTERFACE_NUM]; 941 int ret; 942 int is_fail = 0, is_if_fail; 943 struct hws_topology_map *tm = ddr3_get_topology_map(); 944 945 if (access_type == ACCESS_TYPE_MULTICAST) { 946 start_if = 0; 947 end_if = MAX_INTERFACE_NUM - 1; 948 } else { 949 start_if = if_id; 950 end_if = if_id; 951 } 952 953 for (interface_num = start_if; interface_num <= end_if; interface_num++) { 954 /* polling bit 3 for n times */ 955 VALIDATE_ACTIVE(tm->if_act_mask, interface_num); 956 957 is_if_fail = 0; 958 for (poll_cnt = 0; poll_cnt < poll_tries; poll_cnt++) { 959 ret = 960 ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, 961 interface_num, offset, read_data, 962 mask); 963 if (ret != MV_OK) 964 return ret; 965 966 if (read_data[interface_num] == exp_value) 967 break; 968 } 969 970 if (poll_cnt >= poll_tries) { 971 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 972 ("max poll IF #%d\n", interface_num)); 973 is_fail = 1; 974 is_if_fail = 1; 975 } 976 977 training_result[training_stage][interface_num] = 978 (is_if_fail == 1) ? TEST_FAILED : TEST_SUCCESS; 979 } 980 981 return (is_fail == 0) ? MV_OK : MV_FAIL; 982 } 983 984 /* 985 * Bus read access 986 */ 987 int ddr3_tip_bus_read(u32 dev_num, u32 if_id, 988 enum hws_access_type phy_access, u32 phy_id, 989 enum hws_ddr_phy phy_type, u32 reg_addr, u32 *data) 990 { 991 u32 bus_index = 0; 992 u32 data_read[MAX_INTERFACE_NUM]; 993 struct hws_topology_map *tm = ddr3_get_topology_map(); 994 995 if (phy_access == ACCESS_TYPE_MULTICAST) { 996 for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 997 bus_index++) { 998 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 999 CHECK_STATUS(ddr3_tip_bus_access 1000 (dev_num, ACCESS_TYPE_UNICAST, 1001 if_id, ACCESS_TYPE_UNICAST, 1002 bus_index, phy_type, reg_addr, 0, 1003 OPERATION_READ)); 1004 CHECK_STATUS(ddr3_tip_if_read 1005 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1006 PHY_REG_FILE_ACCESS, data_read, 1007 MASK_ALL_BITS)); 1008 data[bus_index] = (data_read[if_id] & 0xffff); 1009 } 1010 } else { 1011 CHECK_STATUS(ddr3_tip_bus_access 1012 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1013 phy_access, phy_id, phy_type, reg_addr, 0, 1014 OPERATION_READ)); 1015 CHECK_STATUS(ddr3_tip_if_read 1016 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1017 PHY_REG_FILE_ACCESS, data_read, MASK_ALL_BITS)); 1018 1019 /* 1020 * only 16 lsb bit are valid in Phy (each register is different, 1021 * some can actually be less than 16 bits) 1022 */ 1023 *data = (data_read[if_id] & 0xffff); 1024 } 1025 1026 return MV_OK; 1027 } 1028 1029 /* 1030 * Bus write access 1031 */ 1032 int ddr3_tip_bus_write(u32 dev_num, enum hws_access_type interface_access, 1033 u32 if_id, enum hws_access_type phy_access, 1034 u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, 1035 u32 data_value) 1036 { 1037 CHECK_STATUS(ddr3_tip_bus_access 1038 (dev_num, interface_access, if_id, phy_access, 1039 phy_id, phy_type, reg_addr, data_value, OPERATION_WRITE)); 1040 1041 return MV_OK; 1042 } 1043 1044 /* 1045 * Bus access routine (relevant for both read & write) 1046 */ 1047 static int ddr3_tip_bus_access(u32 dev_num, enum hws_access_type interface_access, 1048 u32 if_id, enum hws_access_type phy_access, 1049 u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, 1050 u32 data_value, enum hws_operation oper_type) 1051 { 1052 u32 addr_low = 0x3f & reg_addr; 1053 u32 addr_hi = ((0xc0 & reg_addr) >> 6); 1054 u32 data_p1 = 1055 (oper_type << 30) + (addr_hi << 28) + (phy_access << 27) + 1056 (phy_type << 26) + (phy_id << 22) + (addr_low << 16) + 1057 (data_value & 0xffff); 1058 u32 data_p2 = data_p1 + (1 << 31); 1059 u32 start_if, end_if; 1060 struct hws_topology_map *tm = ddr3_get_topology_map(); 1061 1062 CHECK_STATUS(ddr3_tip_if_write 1063 (dev_num, interface_access, if_id, PHY_REG_FILE_ACCESS, 1064 data_p1, MASK_ALL_BITS)); 1065 CHECK_STATUS(ddr3_tip_if_write 1066 (dev_num, interface_access, if_id, PHY_REG_FILE_ACCESS, 1067 data_p2, MASK_ALL_BITS)); 1068 1069 if (interface_access == ACCESS_TYPE_UNICAST) { 1070 start_if = if_id; 1071 end_if = if_id; 1072 } else { 1073 start_if = 0; 1074 end_if = MAX_INTERFACE_NUM - 1; 1075 } 1076 1077 /* polling for read/write execution done */ 1078 for (if_id = start_if; if_id <= end_if; if_id++) { 1079 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1080 CHECK_STATUS(is_bus_access_done 1081 (dev_num, if_id, PHY_REG_FILE_ACCESS, 31)); 1082 } 1083 1084 return MV_OK; 1085 } 1086 1087 /* 1088 * Check bus access done 1089 */ 1090 static int is_bus_access_done(u32 dev_num, u32 if_id, u32 dunit_reg_adrr, 1091 u32 bit) 1092 { 1093 u32 rd_data = 1; 1094 u32 cnt = 0; 1095 u32 data_read[MAX_INTERFACE_NUM]; 1096 1097 CHECK_STATUS(ddr3_tip_if_read 1098 (dev_num, ACCESS_TYPE_UNICAST, if_id, dunit_reg_adrr, 1099 data_read, MASK_ALL_BITS)); 1100 rd_data = data_read[if_id]; 1101 rd_data &= (1 << bit); 1102 1103 while (rd_data != 0) { 1104 if (cnt++ >= MAX_POLLING_ITERATIONS) 1105 break; 1106 1107 CHECK_STATUS(ddr3_tip_if_read 1108 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1109 dunit_reg_adrr, data_read, MASK_ALL_BITS)); 1110 rd_data = data_read[if_id]; 1111 rd_data &= (1 << bit); 1112 } 1113 1114 if (cnt < MAX_POLLING_ITERATIONS) 1115 return MV_OK; 1116 else 1117 return MV_FAIL; 1118 } 1119 1120 /* 1121 * Phy read-modify-write 1122 */ 1123 int ddr3_tip_bus_read_modify_write(u32 dev_num, enum hws_access_type access_type, 1124 u32 interface_id, u32 phy_id, 1125 enum hws_ddr_phy phy_type, u32 reg_addr, 1126 u32 data_value, u32 reg_mask) 1127 { 1128 u32 data_val = 0, if_id, start_if, end_if; 1129 struct hws_topology_map *tm = ddr3_get_topology_map(); 1130 1131 if (access_type == ACCESS_TYPE_MULTICAST) { 1132 start_if = 0; 1133 end_if = MAX_INTERFACE_NUM - 1; 1134 } else { 1135 start_if = interface_id; 1136 end_if = interface_id; 1137 } 1138 1139 for (if_id = start_if; if_id <= end_if; if_id++) { 1140 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1141 CHECK_STATUS(ddr3_tip_bus_read 1142 (dev_num, if_id, ACCESS_TYPE_UNICAST, phy_id, 1143 phy_type, reg_addr, &data_val)); 1144 data_value = (data_val & (~reg_mask)) | (data_value & reg_mask); 1145 CHECK_STATUS(ddr3_tip_bus_write 1146 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1147 ACCESS_TYPE_UNICAST, phy_id, phy_type, reg_addr, 1148 data_value)); 1149 } 1150 1151 return MV_OK; 1152 } 1153 1154 /* 1155 * ADLL Calibration 1156 */ 1157 int adll_calibration(u32 dev_num, enum hws_access_type access_type, 1158 u32 if_id, enum hws_ddr_freq frequency) 1159 { 1160 struct hws_tip_freq_config_info freq_config_info; 1161 u32 bus_cnt = 0; 1162 struct hws_topology_map *tm = ddr3_get_topology_map(); 1163 1164 /* Reset Diver_b assert -> de-assert */ 1165 CHECK_STATUS(ddr3_tip_if_write 1166 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1167 0, 0x10000000)); 1168 mdelay(10); 1169 CHECK_STATUS(ddr3_tip_if_write 1170 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1171 0x10000000, 0x10000000)); 1172 1173 if (config_func_info[dev_num].tip_get_freq_config_info_func != NULL) { 1174 CHECK_STATUS(config_func_info[dev_num]. 1175 tip_get_freq_config_info_func((u8)dev_num, frequency, 1176 &freq_config_info)); 1177 } else { 1178 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1179 ("tip_get_freq_config_info_func is NULL")); 1180 return MV_NOT_INITIALIZED; 1181 } 1182 1183 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 1184 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 1185 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1186 (dev_num, access_type, if_id, bus_cnt, 1187 DDR_PHY_DATA, BW_PHY_REG, 1188 freq_config_info.bw_per_freq << 8, 0x700)); 1189 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1190 (dev_num, access_type, if_id, bus_cnt, 1191 DDR_PHY_DATA, RATE_PHY_REG, 1192 freq_config_info.rate_per_freq, 0x7)); 1193 } 1194 1195 /* DUnit to Phy drive post edge, ADLL reset assert de-assert */ 1196 CHECK_STATUS(ddr3_tip_if_write 1197 (dev_num, access_type, if_id, DRAM_PHY_CONFIGURATION, 1198 0, (0x80000000 | 0x40000000))); 1199 mdelay(100 / (freq_val[frequency] / freq_val[DDR_FREQ_LOW_FREQ])); 1200 CHECK_STATUS(ddr3_tip_if_write 1201 (dev_num, access_type, if_id, DRAM_PHY_CONFIGURATION, 1202 (0x80000000 | 0x40000000), (0x80000000 | 0x40000000))); 1203 1204 /* polling for ADLL Done */ 1205 if (ddr3_tip_if_polling(dev_num, access_type, if_id, 1206 0x3ff03ff, 0x3ff03ff, PHY_LOCK_STATUS_REG, 1207 MAX_POLLING_ITERATIONS) != MV_OK) { 1208 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1209 ("Freq_set: DDR3 poll failed(1)")); 1210 } 1211 1212 /* pup data_pup reset assert-> deassert */ 1213 CHECK_STATUS(ddr3_tip_if_write 1214 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1215 0, 0x60000000)); 1216 mdelay(10); 1217 CHECK_STATUS(ddr3_tip_if_write 1218 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1219 0x60000000, 0x60000000)); 1220 1221 return MV_OK; 1222 } 1223 1224 int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type access_type, 1225 u32 if_id, enum hws_ddr_freq frequency) 1226 { 1227 u32 cl_value = 0, cwl_value = 0, mem_mask = 0, val = 0, 1228 bus_cnt = 0, t_hclk = 0, t_wr = 0, 1229 refresh_interval_cnt = 0, cnt_id; 1230 u32 t_refi = 0, end_if, start_if; 1231 u32 bus_index = 0; 1232 int is_dll_off = 0; 1233 enum hws_speed_bin speed_bin_index = 0; 1234 struct hws_tip_freq_config_info freq_config_info; 1235 enum hws_result *flow_result = training_result[training_stage]; 1236 u32 adll_tap = 0; 1237 u32 cs_mask[MAX_INTERFACE_NUM]; 1238 struct hws_topology_map *tm = ddr3_get_topology_map(); 1239 1240 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1241 ("dev %d access %d IF %d freq %d\n", dev_num, 1242 access_type, if_id, frequency)); 1243 1244 if (frequency == DDR_FREQ_LOW_FREQ) 1245 is_dll_off = 1; 1246 if (access_type == ACCESS_TYPE_MULTICAST) { 1247 start_if = 0; 1248 end_if = MAX_INTERFACE_NUM - 1; 1249 } else { 1250 start_if = if_id; 1251 end_if = if_id; 1252 } 1253 1254 /* calculate interface cs mask - Oferb 4/11 */ 1255 /* speed bin can be different for each interface */ 1256 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1257 /* cs enable is active low */ 1258 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1259 cs_mask[if_id] = CS_BIT_MASK; 1260 training_result[training_stage][if_id] = TEST_SUCCESS; 1261 ddr3_tip_calc_cs_mask(dev_num, if_id, effective_cs, 1262 &cs_mask[if_id]); 1263 } 1264 1265 /* speed bin can be different for each interface */ 1266 /* 1267 * moti b - need to remove the loop for multicas access functions 1268 * and loop the unicast access functions 1269 */ 1270 for (if_id = start_if; if_id <= end_if; if_id++) { 1271 if (IS_ACTIVE(tm->if_act_mask, if_id) == 0) 1272 continue; 1273 1274 flow_result[if_id] = TEST_SUCCESS; 1275 speed_bin_index = 1276 tm->interface_params[if_id].speed_bin_index; 1277 if (tm->interface_params[if_id].memory_freq == 1278 frequency) { 1279 cl_value = 1280 tm->interface_params[if_id].cas_l; 1281 cwl_value = 1282 tm->interface_params[if_id].cas_wl; 1283 } else { 1284 cl_value = 1285 cas_latency_table[speed_bin_index].cl_val[frequency]; 1286 cwl_value = 1287 cas_write_latency_table[speed_bin_index]. 1288 cl_val[frequency]; 1289 } 1290 1291 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1292 ("Freq_set dev 0x%x access 0x%x if 0x%x freq 0x%x speed %d:\n\t", 1293 dev_num, access_type, if_id, 1294 frequency, speed_bin_index)); 1295 1296 for (cnt_id = 0; cnt_id < DDR_FREQ_LIMIT; cnt_id++) { 1297 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1298 ("%d ", 1299 cas_latency_table[speed_bin_index]. 1300 cl_val[cnt_id])); 1301 } 1302 1303 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, ("\n")); 1304 mem_mask = 0; 1305 for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 1306 bus_index++) { 1307 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 1308 mem_mask |= 1309 tm->interface_params[if_id]. 1310 as_bus_params[bus_index].mirror_enable_bitmask; 1311 } 1312 1313 if (mem_mask != 0) { 1314 /* motib redundant in KW28 */ 1315 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1316 if_id, 1317 CS_ENABLE_REG, 0, 0x8)); 1318 } 1319 1320 /* dll state after exiting SR */ 1321 if (is_dll_off == 1) { 1322 CHECK_STATUS(ddr3_tip_if_write 1323 (dev_num, access_type, if_id, 1324 DFS_REG, 0x1, 0x1)); 1325 } else { 1326 CHECK_STATUS(ddr3_tip_if_write 1327 (dev_num, access_type, if_id, 1328 DFS_REG, 0, 0x1)); 1329 } 1330 1331 CHECK_STATUS(ddr3_tip_if_write 1332 (dev_num, access_type, if_id, 1333 DUNIT_MMASK_REG, 0, 0x1)); 1334 /* DFS - block transactions */ 1335 CHECK_STATUS(ddr3_tip_if_write 1336 (dev_num, access_type, if_id, 1337 DFS_REG, 0x2, 0x2)); 1338 1339 /* disable ODT in case of dll off */ 1340 if (is_dll_off == 1) { 1341 CHECK_STATUS(ddr3_tip_if_write 1342 (dev_num, access_type, if_id, 1343 0x1874, 0, 0x244)); 1344 CHECK_STATUS(ddr3_tip_if_write 1345 (dev_num, access_type, if_id, 1346 0x1884, 0, 0x244)); 1347 CHECK_STATUS(ddr3_tip_if_write 1348 (dev_num, access_type, if_id, 1349 0x1894, 0, 0x244)); 1350 CHECK_STATUS(ddr3_tip_if_write 1351 (dev_num, access_type, if_id, 1352 0x18a4, 0, 0x244)); 1353 } 1354 1355 /* DFS - Enter Self-Refresh */ 1356 CHECK_STATUS(ddr3_tip_if_write 1357 (dev_num, access_type, if_id, DFS_REG, 0x4, 1358 0x4)); 1359 /* polling on self refresh entry */ 1360 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, 1361 if_id, 0x8, 0x8, DFS_REG, 1362 MAX_POLLING_ITERATIONS) != MV_OK) { 1363 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1364 ("Freq_set: DDR3 poll failed on SR entry\n")); 1365 } 1366 1367 /* PLL configuration */ 1368 if (config_func_info[dev_num].tip_set_freq_divider_func != NULL) { 1369 config_func_info[dev_num]. 1370 tip_set_freq_divider_func(dev_num, if_id, 1371 frequency); 1372 } 1373 1374 /* PLL configuration End */ 1375 1376 /* adjust t_refi to new frequency */ 1377 t_refi = (tm->interface_params[if_id].interface_temp == 1378 HWS_TEMP_HIGH) ? TREFI_LOW : TREFI_HIGH; 1379 t_refi *= 1000; /*psec */ 1380 1381 /* HCLK in[ps] */ 1382 t_hclk = MEGA / (freq_val[frequency] / 2); 1383 refresh_interval_cnt = t_refi / t_hclk; /* no units */ 1384 val = 0x4000 | refresh_interval_cnt; 1385 CHECK_STATUS(ddr3_tip_if_write 1386 (dev_num, access_type, if_id, 1387 SDRAM_CONFIGURATION_REG, val, 0x7fff)); 1388 1389 /* DFS - CL/CWL/WR parameters after exiting SR */ 1390 CHECK_STATUS(ddr3_tip_if_write 1391 (dev_num, access_type, if_id, DFS_REG, 1392 (cl_mask_table[cl_value] << 8), 0xf00)); 1393 CHECK_STATUS(ddr3_tip_if_write 1394 (dev_num, access_type, if_id, DFS_REG, 1395 (cwl_mask_table[cwl_value] << 12), 0x7000)); 1396 t_wr = speed_bin_table(speed_bin_index, SPEED_BIN_TWR); 1397 t_wr = (t_wr / 1000); 1398 CHECK_STATUS(ddr3_tip_if_write 1399 (dev_num, access_type, if_id, DFS_REG, 1400 (twr_mask_table[t_wr + 1] << 16), 0x70000)); 1401 1402 /* Restore original RTT values if returning from DLL OFF mode */ 1403 if (is_dll_off == 1) { 1404 CHECK_STATUS(ddr3_tip_if_write 1405 (dev_num, access_type, if_id, 0x1874, 1406 g_dic | g_rtt_nom, 0x266)); 1407 CHECK_STATUS(ddr3_tip_if_write 1408 (dev_num, access_type, if_id, 0x1884, 1409 g_dic | g_rtt_nom, 0x266)); 1410 CHECK_STATUS(ddr3_tip_if_write 1411 (dev_num, access_type, if_id, 0x1894, 1412 g_dic | g_rtt_nom, 0x266)); 1413 CHECK_STATUS(ddr3_tip_if_write 1414 (dev_num, access_type, if_id, 0x18a4, 1415 g_dic | g_rtt_nom, 0x266)); 1416 } 1417 1418 /* Reset Diver_b assert -> de-assert */ 1419 CHECK_STATUS(ddr3_tip_if_write 1420 (dev_num, access_type, if_id, 1421 SDRAM_CONFIGURATION_REG, 0, 0x10000000)); 1422 mdelay(10); 1423 CHECK_STATUS(ddr3_tip_if_write 1424 (dev_num, access_type, if_id, 1425 SDRAM_CONFIGURATION_REG, 0x10000000, 0x10000000)); 1426 1427 /* Adll configuration function of process and Frequency */ 1428 if (config_func_info[dev_num].tip_get_freq_config_info_func != NULL) { 1429 CHECK_STATUS(config_func_info[dev_num]. 1430 tip_get_freq_config_info_func(dev_num, frequency, 1431 &freq_config_info)); 1432 } 1433 /* TBD check milo5 using device ID ? */ 1434 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); 1435 bus_cnt++) { 1436 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 1437 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1438 (dev_num, ACCESS_TYPE_UNICAST, 1439 if_id, bus_cnt, DDR_PHY_DATA, 1440 0x92, 1441 freq_config_info. 1442 bw_per_freq << 8 1443 /*freq_mask[dev_num][frequency] << 8 */ 1444 , 0x700)); 1445 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1446 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1447 bus_cnt, DDR_PHY_DATA, 0x94, 1448 freq_config_info.rate_per_freq, 0x7)); 1449 } 1450 1451 /* DUnit to Phy drive post edge, ADLL reset assert de-assert */ 1452 CHECK_STATUS(ddr3_tip_if_write 1453 (dev_num, access_type, if_id, 1454 DRAM_PHY_CONFIGURATION, 0, 1455 (0x80000000 | 0x40000000))); 1456 mdelay(100 / (freq_val[frequency] / freq_val[DDR_FREQ_LOW_FREQ])); 1457 CHECK_STATUS(ddr3_tip_if_write 1458 (dev_num, access_type, if_id, 1459 DRAM_PHY_CONFIGURATION, (0x80000000 | 0x40000000), 1460 (0x80000000 | 0x40000000))); 1461 1462 /* polling for ADLL Done */ 1463 if (ddr3_tip_if_polling 1464 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x3ff03ff, 1465 0x3ff03ff, PHY_LOCK_STATUS_REG, 1466 MAX_POLLING_ITERATIONS) != MV_OK) { 1467 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1468 ("Freq_set: DDR3 poll failed(1)\n")); 1469 } 1470 1471 /* pup data_pup reset assert-> deassert */ 1472 CHECK_STATUS(ddr3_tip_if_write 1473 (dev_num, access_type, if_id, 1474 SDRAM_CONFIGURATION_REG, 0, 0x60000000)); 1475 mdelay(10); 1476 CHECK_STATUS(ddr3_tip_if_write 1477 (dev_num, access_type, if_id, 1478 SDRAM_CONFIGURATION_REG, 0x60000000, 0x60000000)); 1479 1480 /* Set proper timing params before existing Self-Refresh */ 1481 ddr3_tip_set_timing(dev_num, access_type, if_id, frequency); 1482 if (delay_enable != 0) { 1483 adll_tap = MEGA / (freq_val[frequency] * 64); 1484 ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap); 1485 } 1486 1487 /* Exit SR */ 1488 CHECK_STATUS(ddr3_tip_if_write 1489 (dev_num, access_type, if_id, DFS_REG, 0, 1490 0x4)); 1491 if (ddr3_tip_if_polling 1492 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x8, DFS_REG, 1493 MAX_POLLING_ITERATIONS) != MV_OK) { 1494 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1495 ("Freq_set: DDR3 poll failed(2)")); 1496 } 1497 1498 /* Refresh Command */ 1499 CHECK_STATUS(ddr3_tip_if_write 1500 (dev_num, access_type, if_id, 1501 SDRAM_OPERATION_REG, 0x2, 0xf1f)); 1502 if (ddr3_tip_if_polling 1503 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f, 1504 SDRAM_OPERATION_REG, MAX_POLLING_ITERATIONS) != MV_OK) { 1505 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1506 ("Freq_set: DDR3 poll failed(3)")); 1507 } 1508 1509 /* Release DFS Block */ 1510 CHECK_STATUS(ddr3_tip_if_write 1511 (dev_num, access_type, if_id, DFS_REG, 0, 1512 0x2)); 1513 /* Controller to MBUS Retry - normal */ 1514 CHECK_STATUS(ddr3_tip_if_write 1515 (dev_num, access_type, if_id, DUNIT_MMASK_REG, 1516 0x1, 0x1)); 1517 1518 /* MRO: Burst Length 8, CL , Auto_precharge 0x16cc */ 1519 val = 1520 ((cl_mask_table[cl_value] & 0x1) << 2) | 1521 ((cl_mask_table[cl_value] & 0xe) << 3); 1522 CHECK_STATUS(ddr3_tip_if_write 1523 (dev_num, access_type, if_id, MR0_REG, 1524 val, (0x7 << 4) | (1 << 2))); 1525 /* MR2: CWL = 10 , Auto Self-Refresh - disable */ 1526 val = (cwl_mask_table[cwl_value] << 3); 1527 /* 1528 * nklein 24.10.13 - should not be here - leave value as set in 1529 * the init configuration val |= (1 << 9); 1530 * val |= ((tm->interface_params[if_id]. 1531 * interface_temp == HWS_TEMP_HIGH) ? (1 << 7) : 0); 1532 */ 1533 /* nklein 24.10.13 - see above comment */ 1534 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1535 if_id, MR2_REG, 1536 val, (0x7 << 3))); 1537 1538 /* ODT TIMING */ 1539 val = ((cl_value - cwl_value + 1) << 4) | 1540 ((cl_value - cwl_value + 6) << 8) | 1541 ((cl_value - 1) << 12) | ((cl_value + 6) << 16); 1542 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1543 if_id, ODT_TIMING_LOW, 1544 val, 0xffff0)); 1545 val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12); 1546 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1547 if_id, ODT_TIMING_HI_REG, 1548 val, 0xffff)); 1549 1550 /* ODT Active */ 1551 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1552 if_id, 1553 DUNIT_ODT_CONTROL_REG, 1554 0xf, 0xf)); 1555 1556 /* re-write CL */ 1557 val = ((cl_mask_table[cl_value] & 0x1) << 2) | 1558 ((cl_mask_table[cl_value] & 0xe) << 3); 1559 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1560 0, MR0_REG, val, 1561 (0x7 << 4) | (1 << 2))); 1562 1563 /* re-write CWL */ 1564 val = (cwl_mask_table[cwl_value] << 3); 1565 CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask, MRS2_CMD, 1566 val, (0x7 << 3))); 1567 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1568 0, MR2_REG, val, (0x7 << 3))); 1569 1570 if (mem_mask != 0) { 1571 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1572 if_id, 1573 CS_ENABLE_REG, 1574 1 << 3, 0x8)); 1575 } 1576 } 1577 1578 return MV_OK; 1579 } 1580 1581 /* 1582 * Set ODT values 1583 */ 1584 static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type, 1585 u32 if_id, u32 cl_value, u32 cwl_value) 1586 { 1587 /* ODT TIMING */ 1588 u32 val = (cl_value - cwl_value + 6); 1589 1590 val = ((cl_value - cwl_value + 1) << 4) | ((val & 0xf) << 8) | 1591 (((cl_value - 1) & 0xf) << 12) | 1592 (((cl_value + 6) & 0xf) << 16) | (((val & 0x10) >> 4) << 21); 1593 val |= (((cl_value - 1) >> 4) << 22) | (((cl_value + 6) >> 4) << 23); 1594 1595 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1596 ODT_TIMING_LOW, val, 0xffff0)); 1597 val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12); 1598 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1599 ODT_TIMING_HI_REG, val, 0xffff)); 1600 if (odt_additional == 1) { 1601 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1602 if_id, 1603 SDRAM_ODT_CONTROL_HIGH_REG, 1604 0xf, 0xf)); 1605 } 1606 1607 /* ODT Active */ 1608 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1609 DUNIT_ODT_CONTROL_REG, 0xf, 0xf)); 1610 1611 return MV_OK; 1612 } 1613 1614 /* 1615 * Set Timing values for training 1616 */ 1617 static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type, 1618 u32 if_id, enum hws_ddr_freq frequency) 1619 { 1620 u32 t_ckclk = 0, t_ras = 0; 1621 u32 t_rcd = 0, t_rp = 0, t_wr = 0, t_wtr = 0, t_rrd = 0, t_rtp = 0, 1622 t_rfc = 0, t_mod = 0; 1623 u32 val = 0, page_size = 0; 1624 enum hws_speed_bin speed_bin_index; 1625 enum hws_mem_size memory_size = MEM_2G; 1626 struct hws_topology_map *tm = ddr3_get_topology_map(); 1627 1628 speed_bin_index = tm->interface_params[if_id].speed_bin_index; 1629 memory_size = tm->interface_params[if_id].memory_size; 1630 page_size = 1631 (tm->interface_params[if_id].bus_width == 1632 BUS_WIDTH_8) ? page_param[memory_size]. 1633 page_size_8bit : page_param[memory_size].page_size_16bit; 1634 t_ckclk = (MEGA / freq_val[frequency]); 1635 t_rrd = (page_size == 1) ? speed_bin_table(speed_bin_index, 1636 SPEED_BIN_TRRD1K) : 1637 speed_bin_table(speed_bin_index, SPEED_BIN_TRRD2K); 1638 t_rrd = GET_MAX_VALUE(t_ckclk * 4, t_rrd); 1639 t_rtp = GET_MAX_VALUE(t_ckclk * 4, speed_bin_table(speed_bin_index, 1640 SPEED_BIN_TRTP)); 1641 t_wtr = GET_MAX_VALUE(t_ckclk * 4, speed_bin_table(speed_bin_index, 1642 SPEED_BIN_TWTR)); 1643 t_ras = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1644 SPEED_BIN_TRAS), 1645 t_ckclk); 1646 t_rcd = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1647 SPEED_BIN_TRCD), 1648 t_ckclk); 1649 t_rp = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1650 SPEED_BIN_TRP), 1651 t_ckclk); 1652 t_wr = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1653 SPEED_BIN_TWR), 1654 t_ckclk); 1655 t_wtr = TIME_2_CLOCK_CYCLES(t_wtr, t_ckclk); 1656 t_rrd = TIME_2_CLOCK_CYCLES(t_rrd, t_ckclk); 1657 t_rtp = TIME_2_CLOCK_CYCLES(t_rtp, t_ckclk); 1658 t_rfc = TIME_2_CLOCK_CYCLES(rfc_table[memory_size] * 1000, t_ckclk); 1659 t_mod = GET_MAX_VALUE(t_ckclk * 24, 15000); 1660 t_mod = TIME_2_CLOCK_CYCLES(t_mod, t_ckclk); 1661 1662 /* SDRAM Timing Low */ 1663 val = (t_ras & 0xf) | (t_rcd << 4) | (t_rp << 8) | (t_wr << 12) | 1664 (t_wtr << 16) | (((t_ras & 0x30) >> 4) << 20) | (t_rrd << 24) | 1665 (t_rtp << 28); 1666 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1667 SDRAM_TIMING_LOW_REG, val, 0xff3fffff)); 1668 1669 /* SDRAM Timing High */ 1670 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1671 SDRAM_TIMING_HIGH_REG, 1672 t_rfc & 0x7f, 0x7f)); 1673 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1674 SDRAM_TIMING_HIGH_REG, 1675 0x180, 0x180)); 1676 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1677 SDRAM_TIMING_HIGH_REG, 1678 0x600, 0x600)); 1679 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1680 SDRAM_TIMING_HIGH_REG, 1681 0x1800, 0xf800)); 1682 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1683 SDRAM_TIMING_HIGH_REG, 1684 ((t_rfc & 0x380) >> 7) << 16, 0x70000)); 1685 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1686 SDRAM_TIMING_HIGH_REG, 0, 1687 0x380000)); 1688 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1689 SDRAM_TIMING_HIGH_REG, 1690 (t_mod & 0xf) << 25, 0x1e00000)); 1691 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1692 SDRAM_TIMING_HIGH_REG, 1693 (t_mod >> 4) << 30, 0xc0000000)); 1694 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1695 SDRAM_TIMING_HIGH_REG, 1696 0x16000000, 0x1e000000)); 1697 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1698 SDRAM_TIMING_HIGH_REG, 1699 0x40000000, 0xc0000000)); 1700 1701 return MV_OK; 1702 } 1703 1704 /* 1705 * Mode Read 1706 */ 1707 int hws_ddr3_tip_mode_read(u32 dev_num, struct mode_info *mode_info) 1708 { 1709 u32 ret; 1710 1711 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1712 MR0_REG, mode_info->reg_mr0, MASK_ALL_BITS); 1713 if (ret != MV_OK) 1714 return ret; 1715 1716 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1717 MR1_REG, mode_info->reg_mr1, MASK_ALL_BITS); 1718 if (ret != MV_OK) 1719 return ret; 1720 1721 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1722 MR2_REG, mode_info->reg_mr2, MASK_ALL_BITS); 1723 if (ret != MV_OK) 1724 return ret; 1725 1726 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1727 MR3_REG, mode_info->reg_mr2, MASK_ALL_BITS); 1728 if (ret != MV_OK) 1729 return ret; 1730 1731 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1732 READ_DATA_SAMPLE_DELAY, mode_info->read_data_sample, 1733 MASK_ALL_BITS); 1734 if (ret != MV_OK) 1735 return ret; 1736 1737 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1738 READ_DATA_READY_DELAY, mode_info->read_data_ready, 1739 MASK_ALL_BITS); 1740 if (ret != MV_OK) 1741 return ret; 1742 1743 return MV_OK; 1744 } 1745 1746 /* 1747 * Get first active IF 1748 */ 1749 int ddr3_tip_get_first_active_if(u8 dev_num, u32 interface_mask, 1750 u32 *interface_id) 1751 { 1752 u32 if_id; 1753 struct hws_topology_map *tm = ddr3_get_topology_map(); 1754 1755 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1756 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1757 if (interface_mask & (1 << if_id)) { 1758 *interface_id = if_id; 1759 break; 1760 } 1761 } 1762 1763 return MV_OK; 1764 } 1765 1766 /* 1767 * Write CS Result 1768 */ 1769 int ddr3_tip_write_cs_result(u32 dev_num, u32 offset) 1770 { 1771 u32 if_id, bus_num, cs_bitmask, data_val, cs_num; 1772 struct hws_topology_map *tm = ddr3_get_topology_map(); 1773 1774 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1775 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1776 for (bus_num = 0; bus_num < tm->num_of_bus_per_interface; 1777 bus_num++) { 1778 VALIDATE_ACTIVE(tm->bus_act_mask, bus_num); 1779 cs_bitmask = 1780 tm->interface_params[if_id]. 1781 as_bus_params[bus_num].cs_bitmask; 1782 if (cs_bitmask != effective_cs) { 1783 cs_num = GET_CS_FROM_MASK(cs_bitmask); 1784 ddr3_tip_bus_read(dev_num, if_id, 1785 ACCESS_TYPE_UNICAST, bus_num, 1786 DDR_PHY_DATA, 1787 offset + 1788 CS_REG_VALUE(effective_cs), 1789 &data_val); 1790 ddr3_tip_bus_write(dev_num, 1791 ACCESS_TYPE_UNICAST, 1792 if_id, 1793 ACCESS_TYPE_UNICAST, 1794 bus_num, DDR_PHY_DATA, 1795 offset + 1796 CS_REG_VALUE(cs_num), 1797 data_val); 1798 } 1799 } 1800 } 1801 1802 return MV_OK; 1803 } 1804 1805 /* 1806 * Write MRS 1807 */ 1808 int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, u32 cmd, 1809 u32 data, u32 mask) 1810 { 1811 u32 if_id, reg; 1812 struct hws_topology_map *tm = ddr3_get_topology_map(); 1813 1814 reg = (cmd == MRS1_CMD) ? MR1_REG : MR2_REG; 1815 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1816 PARAM_NOT_CARE, reg, data, mask)); 1817 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1818 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1819 CHECK_STATUS(ddr3_tip_if_write 1820 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1821 SDRAM_OPERATION_REG, 1822 (cs_mask_arr[if_id] << 8) | cmd, 0xf1f)); 1823 } 1824 1825 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1826 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1827 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 1828 0x1f, SDRAM_OPERATION_REG, 1829 MAX_POLLING_ITERATIONS) != MV_OK) { 1830 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1831 ("write_mrs_cmd: Poll cmd fail")); 1832 } 1833 } 1834 1835 return MV_OK; 1836 } 1837 1838 /* 1839 * Reset XSB Read FIFO 1840 */ 1841 int ddr3_tip_reset_fifo_ptr(u32 dev_num) 1842 { 1843 u32 if_id = 0; 1844 1845 /* Configure PHY reset value to 0 in order to "clean" the FIFO */ 1846 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1847 if_id, 0x15c8, 0, 0xff000000)); 1848 /* 1849 * Move PHY to RL mode (only in RL mode the PHY overrides FIFO values 1850 * during FIFO reset) 1851 */ 1852 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1853 if_id, TRAINING_SW_2_REG, 1854 0x1, 0x9)); 1855 /* In order that above configuration will influence the PHY */ 1856 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1857 if_id, 0x15b0, 1858 0x80000000, 0x80000000)); 1859 /* Reset read fifo assertion */ 1860 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1861 if_id, 0x1400, 0, 0x40000000)); 1862 /* Reset read fifo deassertion */ 1863 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1864 if_id, 0x1400, 1865 0x40000000, 0x40000000)); 1866 /* Move PHY back to functional mode */ 1867 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1868 if_id, TRAINING_SW_2_REG, 1869 0x8, 0x9)); 1870 /* Stop training machine */ 1871 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1872 if_id, 0x15b4, 0x10000, 0x10000)); 1873 1874 return MV_OK; 1875 } 1876 1877 /* 1878 * Reset Phy registers 1879 */ 1880 int ddr3_tip_ddr3_reset_phy_regs(u32 dev_num) 1881 { 1882 u32 if_id, phy_id, cs; 1883 struct hws_topology_map *tm = ddr3_get_topology_map(); 1884 1885 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1886 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1887 for (phy_id = 0; phy_id < tm->num_of_bus_per_interface; 1888 phy_id++) { 1889 VALIDATE_ACTIVE(tm->bus_act_mask, phy_id); 1890 CHECK_STATUS(ddr3_tip_bus_write 1891 (dev_num, ACCESS_TYPE_UNICAST, 1892 if_id, ACCESS_TYPE_UNICAST, 1893 phy_id, DDR_PHY_DATA, 1894 WL_PHY_REG + 1895 CS_REG_VALUE(effective_cs), 1896 phy_reg0_val)); 1897 CHECK_STATUS(ddr3_tip_bus_write 1898 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1899 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1900 RL_PHY_REG + CS_REG_VALUE(effective_cs), 1901 phy_reg2_val)); 1902 CHECK_STATUS(ddr3_tip_bus_write 1903 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1904 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1905 READ_CENTRALIZATION_PHY_REG + 1906 CS_REG_VALUE(effective_cs), phy_reg3_val)); 1907 CHECK_STATUS(ddr3_tip_bus_write 1908 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1909 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1910 WRITE_CENTRALIZATION_PHY_REG + 1911 CS_REG_VALUE(effective_cs), phy_reg3_val)); 1912 } 1913 } 1914 1915 /* Set Receiver Calibration value */ 1916 for (cs = 0; cs < MAX_CS_NUM; cs++) { 1917 /* PHY register 0xdb bits[5:0] - configure to 63 */ 1918 CHECK_STATUS(ddr3_tip_bus_write 1919 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1920 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1921 DDR_PHY_DATA, CSN_IOB_VREF_REG(cs), 63)); 1922 } 1923 1924 return MV_OK; 1925 } 1926 1927 /* 1928 * Restore Dunit registers 1929 */ 1930 int ddr3_tip_restore_dunit_regs(u32 dev_num) 1931 { 1932 u32 index_cnt; 1933 1934 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1935 PARAM_NOT_CARE, CALIB_MACHINE_CTRL_REG, 1936 0x1, 0x1)); 1937 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1938 PARAM_NOT_CARE, CALIB_MACHINE_CTRL_REG, 1939 calibration_update_control << 3, 1940 0x3 << 3)); 1941 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1942 PARAM_NOT_CARE, 1943 ODPG_WRITE_READ_MODE_ENABLE_REG, 1944 0xffff, MASK_ALL_BITS)); 1945 1946 for (index_cnt = 0; index_cnt < ARRAY_SIZE(odpg_default_value); 1947 index_cnt++) { 1948 CHECK_STATUS(ddr3_tip_if_write 1949 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1950 odpg_default_value[index_cnt].reg_addr, 1951 odpg_default_value[index_cnt].reg_data, 1952 odpg_default_value[index_cnt].reg_mask)); 1953 } 1954 1955 return MV_OK; 1956 } 1957 1958 /* 1959 * Auto tune main flow 1960 */ 1961 static int ddr3_tip_ddr3_training_main_flow(u32 dev_num) 1962 { 1963 enum hws_ddr_freq freq = init_freq; 1964 struct init_cntr_param init_cntr_prm; 1965 int ret = MV_OK; 1966 u32 if_id; 1967 u32 max_cs = hws_ddr3_tip_max_cs_get(); 1968 struct hws_topology_map *tm = ddr3_get_topology_map(); 1969 1970 #ifndef EXCLUDE_SWITCH_DEBUG 1971 if (debug_training == DEBUG_LEVEL_TRACE) { 1972 CHECK_STATUS(print_device_info((u8)dev_num)); 1973 } 1974 #endif 1975 1976 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 1977 CHECK_STATUS(ddr3_tip_ddr3_reset_phy_regs(dev_num)); 1978 } 1979 /* Set to 0 after each loop to avoid illegal value may be used */ 1980 effective_cs = 0; 1981 1982 freq = init_freq; 1983 if (is_pll_before_init != 0) { 1984 for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { 1985 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1986 config_func_info[dev_num].tip_set_freq_divider_func( 1987 (u8)dev_num, if_id, freq); 1988 } 1989 } 1990 1991 if (is_adll_calib_before_init != 0) { 1992 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 1993 ("with adll calib before init\n")); 1994 adll_calibration(dev_num, ACCESS_TYPE_MULTICAST, 0, freq); 1995 } 1996 1997 if (is_reg_dump != 0) { 1998 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 1999 ("Dump before init controller\n")); 2000 ddr3_tip_reg_dump(dev_num); 2001 } 2002 2003 if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) { 2004 training_stage = INIT_CONTROLLER; 2005 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2006 ("INIT_CONTROLLER_MASK_BIT\n")); 2007 init_cntr_prm.do_mrs_phy = 1; 2008 init_cntr_prm.is_ctrl64_bit = 0; 2009 init_cntr_prm.init_phy = 1; 2010 init_cntr_prm.msys_init = 0; 2011 ret = hws_ddr3_tip_init_controller(dev_num, &init_cntr_prm); 2012 if (is_reg_dump != 0) 2013 ddr3_tip_reg_dump(dev_num); 2014 if (ret != MV_OK) { 2015 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2016 ("hws_ddr3_tip_init_controller failure\n")); 2017 if (debug_mode == 0) 2018 return MV_FAIL; 2019 } 2020 } 2021 2022 #ifdef STATIC_ALGO_SUPPORT 2023 if (mask_tune_func & STATIC_LEVELING_MASK_BIT) { 2024 training_stage = STATIC_LEVELING; 2025 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2026 ("STATIC_LEVELING_MASK_BIT\n")); 2027 ret = ddr3_tip_run_static_alg(dev_num, freq); 2028 if (is_reg_dump != 0) 2029 ddr3_tip_reg_dump(dev_num); 2030 if (ret != MV_OK) { 2031 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2032 ("ddr3_tip_run_static_alg failure\n")); 2033 if (debug_mode == 0) 2034 return MV_FAIL; 2035 } 2036 } 2037 #endif 2038 2039 if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) { 2040 training_stage = SET_LOW_FREQ; 2041 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2042 ("SET_LOW_FREQ_MASK_BIT %d\n", 2043 freq_val[low_freq])); 2044 ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2045 PARAM_NOT_CARE, low_freq); 2046 if (is_reg_dump != 0) 2047 ddr3_tip_reg_dump(dev_num); 2048 if (ret != MV_OK) { 2049 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2050 ("ddr3_tip_freq_set failure\n")); 2051 if (debug_mode == 0) 2052 return MV_FAIL; 2053 } 2054 } 2055 2056 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2057 if (mask_tune_func & LOAD_PATTERN_MASK_BIT) { 2058 training_stage = LOAD_PATTERN; 2059 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2060 ("LOAD_PATTERN_MASK_BIT #%d\n", 2061 effective_cs)); 2062 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2063 if (is_reg_dump != 0) 2064 ddr3_tip_reg_dump(dev_num); 2065 if (ret != MV_OK) { 2066 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2067 ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n", 2068 effective_cs)); 2069 if (debug_mode == 0) 2070 return MV_FAIL; 2071 } 2072 } 2073 } 2074 /* Set to 0 after each loop to avoid illegal value may be used */ 2075 effective_cs = 0; 2076 2077 if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) { 2078 training_stage = SET_MEDIUM_FREQ; 2079 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2080 ("SET_MEDIUM_FREQ_MASK_BIT %d\n", 2081 freq_val[medium_freq])); 2082 ret = 2083 ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2084 PARAM_NOT_CARE, medium_freq); 2085 if (is_reg_dump != 0) 2086 ddr3_tip_reg_dump(dev_num); 2087 if (ret != MV_OK) { 2088 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2089 ("ddr3_tip_freq_set failure\n")); 2090 if (debug_mode == 0) 2091 return MV_FAIL; 2092 } 2093 } 2094 2095 if (mask_tune_func & WRITE_LEVELING_MASK_BIT) { 2096 training_stage = WRITE_LEVELING; 2097 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2098 ("WRITE_LEVELING_MASK_BIT\n")); 2099 if ((rl_mid_freq_wa == 0) || (freq_val[medium_freq] == 533)) { 2100 ret = ddr3_tip_dynamic_write_leveling(dev_num); 2101 } else { 2102 /* Use old WL */ 2103 ret = ddr3_tip_legacy_dynamic_write_leveling(dev_num); 2104 } 2105 2106 if (is_reg_dump != 0) 2107 ddr3_tip_reg_dump(dev_num); 2108 if (ret != MV_OK) { 2109 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2110 ("ddr3_tip_dynamic_write_leveling failure\n")); 2111 if (debug_mode == 0) 2112 return MV_FAIL; 2113 } 2114 } 2115 2116 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2117 if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) { 2118 training_stage = LOAD_PATTERN_2; 2119 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2120 ("LOAD_PATTERN_2_MASK_BIT CS #%d\n", 2121 effective_cs)); 2122 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2123 if (is_reg_dump != 0) 2124 ddr3_tip_reg_dump(dev_num); 2125 if (ret != MV_OK) { 2126 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2127 ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n", 2128 effective_cs)); 2129 if (debug_mode == 0) 2130 return MV_FAIL; 2131 } 2132 } 2133 } 2134 /* Set to 0 after each loop to avoid illegal value may be used */ 2135 effective_cs = 0; 2136 2137 if (mask_tune_func & READ_LEVELING_MASK_BIT) { 2138 training_stage = READ_LEVELING; 2139 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2140 ("READ_LEVELING_MASK_BIT\n")); 2141 if ((rl_mid_freq_wa == 0) || (freq_val[medium_freq] == 533)) { 2142 ret = ddr3_tip_dynamic_read_leveling(dev_num, medium_freq); 2143 } else { 2144 /* Use old RL */ 2145 ret = ddr3_tip_legacy_dynamic_read_leveling(dev_num); 2146 } 2147 2148 if (is_reg_dump != 0) 2149 ddr3_tip_reg_dump(dev_num); 2150 if (ret != MV_OK) { 2151 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2152 ("ddr3_tip_dynamic_read_leveling failure\n")); 2153 if (debug_mode == 0) 2154 return MV_FAIL; 2155 } 2156 } 2157 2158 if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) { 2159 training_stage = WRITE_LEVELING_SUPP; 2160 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2161 ("WRITE_LEVELING_SUPP_MASK_BIT\n")); 2162 ret = ddr3_tip_dynamic_write_leveling_supp(dev_num); 2163 if (is_reg_dump != 0) 2164 ddr3_tip_reg_dump(dev_num); 2165 if (ret != MV_OK) { 2166 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2167 ("ddr3_tip_dynamic_write_leveling_supp failure\n")); 2168 if (debug_mode == 0) 2169 return MV_FAIL; 2170 } 2171 } 2172 2173 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2174 if (mask_tune_func & PBS_RX_MASK_BIT) { 2175 training_stage = PBS_RX; 2176 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2177 ("PBS_RX_MASK_BIT CS #%d\n", 2178 effective_cs)); 2179 ret = ddr3_tip_pbs_rx(dev_num); 2180 if (is_reg_dump != 0) 2181 ddr3_tip_reg_dump(dev_num); 2182 if (ret != MV_OK) { 2183 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2184 ("ddr3_tip_pbs_rx failure CS #%d\n", 2185 effective_cs)); 2186 if (debug_mode == 0) 2187 return MV_FAIL; 2188 } 2189 } 2190 } 2191 2192 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2193 if (mask_tune_func & PBS_TX_MASK_BIT) { 2194 training_stage = PBS_TX; 2195 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2196 ("PBS_TX_MASK_BIT CS #%d\n", 2197 effective_cs)); 2198 ret = ddr3_tip_pbs_tx(dev_num); 2199 if (is_reg_dump != 0) 2200 ddr3_tip_reg_dump(dev_num); 2201 if (ret != MV_OK) { 2202 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2203 ("ddr3_tip_pbs_tx failure CS #%d\n", 2204 effective_cs)); 2205 if (debug_mode == 0) 2206 return MV_FAIL; 2207 } 2208 } 2209 } 2210 /* Set to 0 after each loop to avoid illegal value may be used */ 2211 effective_cs = 0; 2212 2213 if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) { 2214 training_stage = SET_TARGET_FREQ; 2215 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2216 ("SET_TARGET_FREQ_MASK_BIT %d\n", 2217 freq_val[tm-> 2218 interface_params[first_active_if]. 2219 memory_freq])); 2220 ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2221 PARAM_NOT_CARE, 2222 tm->interface_params[first_active_if]. 2223 memory_freq); 2224 if (is_reg_dump != 0) 2225 ddr3_tip_reg_dump(dev_num); 2226 if (ret != MV_OK) { 2227 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2228 ("ddr3_tip_freq_set failure\n")); 2229 if (debug_mode == 0) 2230 return MV_FAIL; 2231 } 2232 } 2233 2234 if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) { 2235 training_stage = WRITE_LEVELING_TF; 2236 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2237 ("WRITE_LEVELING_TF_MASK_BIT\n")); 2238 ret = ddr3_tip_dynamic_write_leveling(dev_num); 2239 if (is_reg_dump != 0) 2240 ddr3_tip_reg_dump(dev_num); 2241 if (ret != MV_OK) { 2242 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2243 ("ddr3_tip_dynamic_write_leveling TF failure\n")); 2244 if (debug_mode == 0) 2245 return MV_FAIL; 2246 } 2247 } 2248 2249 if (mask_tune_func & LOAD_PATTERN_HIGH_MASK_BIT) { 2250 training_stage = LOAD_PATTERN_HIGH; 2251 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("LOAD_PATTERN_HIGH\n")); 2252 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2253 if (is_reg_dump != 0) 2254 ddr3_tip_reg_dump(dev_num); 2255 if (ret != MV_OK) { 2256 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2257 ("ddr3_tip_load_all_pattern_to_mem failure\n")); 2258 if (debug_mode == 0) 2259 return MV_FAIL; 2260 } 2261 } 2262 2263 if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) { 2264 training_stage = READ_LEVELING_TF; 2265 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2266 ("READ_LEVELING_TF_MASK_BIT\n")); 2267 ret = ddr3_tip_dynamic_read_leveling(dev_num, tm-> 2268 interface_params[first_active_if]. 2269 memory_freq); 2270 if (is_reg_dump != 0) 2271 ddr3_tip_reg_dump(dev_num); 2272 if (ret != MV_OK) { 2273 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2274 ("ddr3_tip_dynamic_read_leveling TF failure\n")); 2275 if (debug_mode == 0) 2276 return MV_FAIL; 2277 } 2278 } 2279 2280 if (mask_tune_func & DM_PBS_TX_MASK_BIT) { 2281 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("DM_PBS_TX_MASK_BIT\n")); 2282 } 2283 2284 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2285 if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) { 2286 training_stage = VREF_CALIBRATION; 2287 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("VREF\n")); 2288 ret = ddr3_tip_vref(dev_num); 2289 if (is_reg_dump != 0) { 2290 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2291 ("VREF Dump\n")); 2292 ddr3_tip_reg_dump(dev_num); 2293 } 2294 if (ret != MV_OK) { 2295 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2296 ("ddr3_tip_vref failure\n")); 2297 if (debug_mode == 0) 2298 return MV_FAIL; 2299 } 2300 } 2301 } 2302 /* Set to 0 after each loop to avoid illegal value may be used */ 2303 effective_cs = 0; 2304 2305 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2306 if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) { 2307 training_stage = CENTRALIZATION_RX; 2308 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2309 ("CENTRALIZATION_RX_MASK_BIT CS #%d\n", 2310 effective_cs)); 2311 ret = ddr3_tip_centralization_rx(dev_num); 2312 if (is_reg_dump != 0) 2313 ddr3_tip_reg_dump(dev_num); 2314 if (ret != MV_OK) { 2315 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2316 ("ddr3_tip_centralization_rx failure CS #%d\n", 2317 effective_cs)); 2318 if (debug_mode == 0) 2319 return MV_FAIL; 2320 } 2321 } 2322 } 2323 /* Set to 0 after each loop to avoid illegal value may be used */ 2324 effective_cs = 0; 2325 2326 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2327 if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) { 2328 training_stage = WRITE_LEVELING_SUPP_TF; 2329 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2330 ("WRITE_LEVELING_SUPP_TF_MASK_BIT CS #%d\n", 2331 effective_cs)); 2332 ret = ddr3_tip_dynamic_write_leveling_supp(dev_num); 2333 if (is_reg_dump != 0) 2334 ddr3_tip_reg_dump(dev_num); 2335 if (ret != MV_OK) { 2336 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2337 ("ddr3_tip_dynamic_write_leveling_supp TF failure CS #%d\n", 2338 effective_cs)); 2339 if (debug_mode == 0) 2340 return MV_FAIL; 2341 } 2342 } 2343 } 2344 /* Set to 0 after each loop to avoid illegal value may be used */ 2345 effective_cs = 0; 2346 2347 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2348 if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) { 2349 training_stage = CENTRALIZATION_TX; 2350 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2351 ("CENTRALIZATION_TX_MASK_BIT CS #%d\n", 2352 effective_cs)); 2353 ret = ddr3_tip_centralization_tx(dev_num); 2354 if (is_reg_dump != 0) 2355 ddr3_tip_reg_dump(dev_num); 2356 if (ret != MV_OK) { 2357 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2358 ("ddr3_tip_centralization_tx failure CS #%d\n", 2359 effective_cs)); 2360 if (debug_mode == 0) 2361 return MV_FAIL; 2362 } 2363 } 2364 } 2365 /* Set to 0 after each loop to avoid illegal value may be used */ 2366 effective_cs = 0; 2367 2368 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("restore registers to default\n")); 2369 /* restore register values */ 2370 CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num)); 2371 2372 if (is_reg_dump != 0) 2373 ddr3_tip_reg_dump(dev_num); 2374 2375 return MV_OK; 2376 } 2377 2378 /* 2379 * DDR3 Dynamic training flow 2380 */ 2381 static int ddr3_tip_ddr3_auto_tune(u32 dev_num) 2382 { 2383 u32 if_id, stage, ret; 2384 int is_if_fail = 0, is_auto_tune_fail = 0; 2385 2386 training_stage = INIT_CONTROLLER; 2387 2388 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2389 for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) 2390 training_result[stage][if_id] = NO_TEST_DONE; 2391 } 2392 2393 ret = ddr3_tip_ddr3_training_main_flow(dev_num); 2394 2395 /* activate XSB test */ 2396 if (xsb_validate_type != 0) { 2397 run_xsb_test(dev_num, xsb_validation_base_address, 1, 1, 2398 0x1024); 2399 } 2400 2401 if (is_reg_dump != 0) 2402 ddr3_tip_reg_dump(dev_num); 2403 2404 /* print log */ 2405 CHECK_STATUS(ddr3_tip_print_log(dev_num, window_mem_addr)); 2406 2407 if (ret != MV_OK) { 2408 CHECK_STATUS(ddr3_tip_print_stability_log(dev_num)); 2409 } 2410 2411 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2412 is_if_fail = 0; 2413 for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) { 2414 if (training_result[stage][if_id] == TEST_FAILED) 2415 is_if_fail = 1; 2416 } 2417 if (is_if_fail == 1) { 2418 is_auto_tune_fail = 1; 2419 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2420 ("Auto Tune failed for IF %d\n", 2421 if_id)); 2422 } 2423 } 2424 2425 if ((ret == MV_FAIL) || (is_auto_tune_fail == 1)) 2426 return MV_FAIL; 2427 else 2428 return MV_OK; 2429 } 2430 2431 /* 2432 * Enable init sequence 2433 */ 2434 int ddr3_tip_enable_init_sequence(u32 dev_num) 2435 { 2436 int is_fail = 0; 2437 u32 if_id = 0, mem_mask = 0, bus_index = 0; 2438 struct hws_topology_map *tm = ddr3_get_topology_map(); 2439 2440 /* Enable init sequence */ 2441 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 0, 2442 SDRAM_INIT_CONTROL_REG, 0x1, 0x1)); 2443 2444 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2445 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 2446 2447 if (ddr3_tip_if_polling 2448 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1, 2449 SDRAM_INIT_CONTROL_REG, 2450 MAX_POLLING_ITERATIONS) != MV_OK) { 2451 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2452 ("polling failed IF %d\n", 2453 if_id)); 2454 is_fail = 1; 2455 continue; 2456 } 2457 2458 mem_mask = 0; 2459 for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 2460 bus_index++) { 2461 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 2462 mem_mask |= 2463 tm->interface_params[if_id]. 2464 as_bus_params[bus_index].mirror_enable_bitmask; 2465 } 2466 2467 if (mem_mask != 0) { 2468 /* Disable Multi CS */ 2469 CHECK_STATUS(ddr3_tip_if_write 2470 (dev_num, ACCESS_TYPE_MULTICAST, 2471 if_id, CS_ENABLE_REG, 1 << 3, 2472 1 << 3)); 2473 } 2474 } 2475 2476 return (is_fail == 0) ? MV_OK : MV_FAIL; 2477 } 2478 2479 int ddr3_tip_register_dq_table(u32 dev_num, u32 *table) 2480 { 2481 dq_map_table = table; 2482 2483 return MV_OK; 2484 } 2485 2486 /* 2487 * Check if pup search is locked 2488 */ 2489 int ddr3_tip_is_pup_lock(u32 *pup_buf, enum hws_training_result read_mode) 2490 { 2491 u32 bit_start = 0, bit_end = 0, bit_id; 2492 2493 if (read_mode == RESULT_PER_BIT) { 2494 bit_start = 0; 2495 bit_end = BUS_WIDTH_IN_BITS - 1; 2496 } else { 2497 bit_start = 0; 2498 bit_end = 0; 2499 } 2500 2501 for (bit_id = bit_start; bit_id <= bit_end; bit_id++) { 2502 if (GET_LOCK_RESULT(pup_buf[bit_id]) == 0) 2503 return 0; 2504 } 2505 2506 return 1; 2507 } 2508 2509 /* 2510 * Get minimum buffer value 2511 */ 2512 u8 ddr3_tip_get_buf_min(u8 *buf_ptr) 2513 { 2514 u8 min_val = 0xff; 2515 u8 cnt = 0; 2516 2517 for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) { 2518 if (buf_ptr[cnt] < min_val) 2519 min_val = buf_ptr[cnt]; 2520 } 2521 2522 return min_val; 2523 } 2524 2525 /* 2526 * Get maximum buffer value 2527 */ 2528 u8 ddr3_tip_get_buf_max(u8 *buf_ptr) 2529 { 2530 u8 max_val = 0; 2531 u8 cnt = 0; 2532 2533 for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) { 2534 if (buf_ptr[cnt] > max_val) 2535 max_val = buf_ptr[cnt]; 2536 } 2537 2538 return max_val; 2539 } 2540 2541 /* 2542 * The following functions return memory parameters: 2543 * bus and device width, device size 2544 */ 2545 2546 u32 hws_ddr3_get_bus_width(void) 2547 { 2548 struct hws_topology_map *tm = ddr3_get_topology_map(); 2549 2550 return (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == 2551 1) ? 16 : 32; 2552 } 2553 2554 u32 hws_ddr3_get_device_width(u32 if_id) 2555 { 2556 struct hws_topology_map *tm = ddr3_get_topology_map(); 2557 2558 return (tm->interface_params[if_id].bus_width == 2559 BUS_WIDTH_8) ? 8 : 16; 2560 } 2561 2562 u32 hws_ddr3_get_device_size(u32 if_id) 2563 { 2564 struct hws_topology_map *tm = ddr3_get_topology_map(); 2565 2566 if (tm->interface_params[if_id].memory_size >= 2567 MEM_SIZE_LAST) { 2568 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2569 ("Error: Wrong device size of Cs: %d", 2570 tm->interface_params[if_id].memory_size)); 2571 return 0; 2572 } else { 2573 return 1 << tm->interface_params[if_id].memory_size; 2574 } 2575 } 2576 2577 int hws_ddr3_calc_mem_cs_size(u32 if_id, u32 cs, u32 *cs_size) 2578 { 2579 u32 cs_mem_size, dev_size; 2580 2581 dev_size = hws_ddr3_get_device_size(if_id); 2582 if (dev_size != 0) { 2583 cs_mem_size = ((hws_ddr3_get_bus_width() / 2584 hws_ddr3_get_device_width(if_id)) * dev_size); 2585 2586 /* the calculated result in Gbytex16 to avoid float using */ 2587 2588 if (cs_mem_size == 2) { 2589 *cs_size = _128M; 2590 } else if (cs_mem_size == 4) { 2591 *cs_size = _256M; 2592 } else if (cs_mem_size == 8) { 2593 *cs_size = _512M; 2594 } else if (cs_mem_size == 16) { 2595 *cs_size = _1G; 2596 } else if (cs_mem_size == 32) { 2597 *cs_size = _2G; 2598 } else { 2599 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2600 ("Error: Wrong Memory size of Cs: %d", cs)); 2601 return MV_FAIL; 2602 } 2603 return MV_OK; 2604 } else { 2605 return MV_FAIL; 2606 } 2607 } 2608 2609 int hws_ddr3_cs_base_adr_calc(u32 if_id, u32 cs, u32 *cs_base_addr) 2610 { 2611 u32 cs_mem_size = 0; 2612 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE 2613 u32 physical_mem_size; 2614 u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE; 2615 #endif 2616 2617 if (hws_ddr3_calc_mem_cs_size(if_id, cs, &cs_mem_size) != MV_OK) 2618 return MV_FAIL; 2619 2620 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE 2621 struct hws_topology_map *tm = ddr3_get_topology_map(); 2622 /* 2623 * if number of address pins doesn't allow to use max mem size that 2624 * is defined in topology mem size is defined by 2625 * DEVICE_MAX_DRAM_ADDRESS_SIZE 2626 */ 2627 physical_mem_size = 2628 mv_hwsmem_size[tm->interface_params[0].memory_size]; 2629 2630 if (hws_ddr3_get_device_width(cs) == 16) { 2631 /* 2632 * 16bit mem device can be twice more - no need in less 2633 * significant pin 2634 */ 2635 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2; 2636 } 2637 2638 if (physical_mem_size > max_mem_size) { 2639 cs_mem_size = max_mem_size * 2640 (hws_ddr3_get_bus_width() / 2641 hws_ddr3_get_device_width(if_id)); 2642 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2643 ("Updated Physical Mem size is from 0x%x to %x\n", 2644 physical_mem_size, 2645 DEVICE_MAX_DRAM_ADDRESS_SIZE)); 2646 } 2647 #endif 2648 2649 /* calculate CS base addr */ 2650 *cs_base_addr = ((cs_mem_size) * cs) & 0xffff0000; 2651 2652 return MV_OK; 2653 } 2654