1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Marvell International Ltd. and its affiliates 4 */ 5 6 #include <common.h> 7 #include <spl.h> 8 #include <asm/io.h> 9 #include <asm/arch/cpu.h> 10 #include <asm/arch/soc.h> 11 12 #include "ddr3_init.h" 13 14 #define GET_MAX_VALUE(x, y) \ 15 ((x) > (y)) ? (x) : (y) 16 #define CEIL_DIVIDE(x, y) \ 17 ((x - (x / y) * y) == 0) ? ((x / y) - 1) : (x / y) 18 19 #define TIME_2_CLOCK_CYCLES CEIL_DIVIDE 20 21 #define GET_CS_FROM_MASK(mask) (cs_mask2_num[mask]) 22 #define CS_CBE_VALUE(cs_num) (cs_cbe_reg[cs_num]) 23 24 #define TIMES_9_TREFI_CYCLES 0x8 25 26 u32 window_mem_addr = 0; 27 u32 phy_reg0_val = 0; 28 u32 phy_reg1_val = 8; 29 u32 phy_reg2_val = 0; 30 u32 phy_reg3_val = 0xa; 31 enum hws_ddr_freq init_freq = DDR_FREQ_667; 32 enum hws_ddr_freq low_freq = DDR_FREQ_LOW_FREQ; 33 enum hws_ddr_freq medium_freq; 34 u32 debug_dunit = 0; 35 u32 odt_additional = 1; 36 u32 *dq_map_table = NULL; 37 u32 odt_config = 1; 38 39 #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ALLEYCAT3) || \ 40 defined(CONFIG_ARMADA_39X) 41 u32 is_pll_before_init = 0, is_adll_calib_before_init = 0, is_dfs_in_init = 0; 42 u32 dfs_low_freq = 130; 43 #else 44 u32 is_pll_before_init = 0, is_adll_calib_before_init = 1, is_dfs_in_init = 0; 45 u32 dfs_low_freq = 100; 46 #endif 47 u32 g_rtt_nom_c_s0, g_rtt_nom_c_s1; 48 u8 calibration_update_control; /* 2 external only, 1 is internal only */ 49 50 enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM]; 51 enum auto_tune_stage training_stage = INIT_CONTROLLER; 52 u32 finger_test = 0, p_finger_start = 11, p_finger_end = 64, 53 n_finger_start = 11, n_finger_end = 64, 54 p_finger_step = 3, n_finger_step = 3; 55 u32 clamp_tbl[] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }; 56 57 /* Initiate to 0xff, this variable is define by user in debug mode */ 58 u32 mode2_t = 0xff; 59 u32 xsb_validate_type = 0; 60 u32 xsb_validation_base_address = 0xf000; 61 u32 first_active_if = 0; 62 u32 dfs_low_phy1 = 0x1f; 63 u32 multicast_id = 0; 64 int use_broadcast = 0; 65 struct hws_tip_freq_config_info *freq_info_table = NULL; 66 u8 is_cbe_required = 0; 67 u32 debug_mode = 0; 68 u32 delay_enable = 0; 69 int rl_mid_freq_wa = 0; 70 71 u32 effective_cs = 0; 72 73 u32 mask_tune_func = (SET_MEDIUM_FREQ_MASK_BIT | 74 WRITE_LEVELING_MASK_BIT | 75 LOAD_PATTERN_2_MASK_BIT | 76 READ_LEVELING_MASK_BIT | 77 SET_TARGET_FREQ_MASK_BIT | WRITE_LEVELING_TF_MASK_BIT | 78 READ_LEVELING_TF_MASK_BIT | 79 CENTRALIZATION_RX_MASK_BIT | CENTRALIZATION_TX_MASK_BIT); 80 81 void ddr3_print_version(void) 82 { 83 printf(DDR3_TIP_VERSION_STRING); 84 } 85 86 static int ddr3_tip_ddr3_training_main_flow(u32 dev_num); 87 static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type, 88 u32 if_id, u32 cl_value, u32 cwl_value); 89 static int ddr3_tip_ddr3_auto_tune(u32 dev_num); 90 static int is_bus_access_done(u32 dev_num, u32 if_id, 91 u32 dunit_reg_adrr, u32 bit); 92 #ifdef ODT_TEST_SUPPORT 93 static int odt_test(u32 dev_num, enum hws_algo_type algo_type); 94 #endif 95 96 int adll_calibration(u32 dev_num, enum hws_access_type access_type, 97 u32 if_id, enum hws_ddr_freq frequency); 98 static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type, 99 u32 if_id, enum hws_ddr_freq frequency); 100 101 static struct page_element page_param[] = { 102 /* 103 * 8bits 16 bits 104 * page-size(K) page-size(K) mask 105 */ 106 { 1, 2, 2}, 107 /* 512M */ 108 { 1, 2, 3}, 109 /* 1G */ 110 { 1, 2, 0}, 111 /* 2G */ 112 { 1, 2, 4}, 113 /* 4G */ 114 { 2, 2, 5} 115 /* 8G */ 116 }; 117 118 static u8 mem_size_config[MEM_SIZE_LAST] = { 119 0x2, /* 512Mbit */ 120 0x3, /* 1Gbit */ 121 0x0, /* 2Gbit */ 122 0x4, /* 4Gbit */ 123 0x5 /* 8Gbit */ 124 }; 125 126 static u8 cs_mask2_num[] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 }; 127 128 static struct reg_data odpg_default_value[] = { 129 {0x1034, 0x38000, MASK_ALL_BITS}, 130 {0x1038, 0x0, MASK_ALL_BITS}, 131 {0x10b0, 0x0, MASK_ALL_BITS}, 132 {0x10b8, 0x0, MASK_ALL_BITS}, 133 {0x10c0, 0x0, MASK_ALL_BITS}, 134 {0x10f0, 0x0, MASK_ALL_BITS}, 135 {0x10f4, 0x0, MASK_ALL_BITS}, 136 {0x10f8, 0xff, MASK_ALL_BITS}, 137 {0x10fc, 0xffff, MASK_ALL_BITS}, 138 {0x1130, 0x0, MASK_ALL_BITS}, 139 {0x1830, 0x2000000, MASK_ALL_BITS}, 140 {0x14d0, 0x0, MASK_ALL_BITS}, 141 {0x14d4, 0x0, MASK_ALL_BITS}, 142 {0x14d8, 0x0, MASK_ALL_BITS}, 143 {0x14dc, 0x0, MASK_ALL_BITS}, 144 {0x1454, 0x0, MASK_ALL_BITS}, 145 {0x1594, 0x0, MASK_ALL_BITS}, 146 {0x1598, 0x0, MASK_ALL_BITS}, 147 {0x159c, 0x0, MASK_ALL_BITS}, 148 {0x15a0, 0x0, MASK_ALL_BITS}, 149 {0x15a4, 0x0, MASK_ALL_BITS}, 150 {0x15a8, 0x0, MASK_ALL_BITS}, 151 {0x15ac, 0x0, MASK_ALL_BITS}, 152 {0x1604, 0x0, MASK_ALL_BITS}, 153 {0x1608, 0x0, MASK_ALL_BITS}, 154 {0x160c, 0x0, MASK_ALL_BITS}, 155 {0x1610, 0x0, MASK_ALL_BITS}, 156 {0x1614, 0x0, MASK_ALL_BITS}, 157 {0x1618, 0x0, MASK_ALL_BITS}, 158 {0x1624, 0x0, MASK_ALL_BITS}, 159 {0x1690, 0x0, MASK_ALL_BITS}, 160 {0x1694, 0x0, MASK_ALL_BITS}, 161 {0x1698, 0x0, MASK_ALL_BITS}, 162 {0x169c, 0x0, MASK_ALL_BITS}, 163 {0x14b8, 0x6f67, MASK_ALL_BITS}, 164 {0x1630, 0x0, MASK_ALL_BITS}, 165 {0x1634, 0x0, MASK_ALL_BITS}, 166 {0x1638, 0x0, MASK_ALL_BITS}, 167 {0x163c, 0x0, MASK_ALL_BITS}, 168 {0x16b0, 0x0, MASK_ALL_BITS}, 169 {0x16b4, 0x0, MASK_ALL_BITS}, 170 {0x16b8, 0x0, MASK_ALL_BITS}, 171 {0x16bc, 0x0, MASK_ALL_BITS}, 172 {0x16c0, 0x0, MASK_ALL_BITS}, 173 {0x16c4, 0x0, MASK_ALL_BITS}, 174 {0x16c8, 0x0, MASK_ALL_BITS}, 175 {0x16cc, 0x1, MASK_ALL_BITS}, 176 {0x16f0, 0x1, MASK_ALL_BITS}, 177 {0x16f4, 0x0, MASK_ALL_BITS}, 178 {0x16f8, 0x0, MASK_ALL_BITS}, 179 {0x16fc, 0x0, MASK_ALL_BITS} 180 }; 181 182 static int ddr3_tip_bus_access(u32 dev_num, enum hws_access_type interface_access, 183 u32 if_id, enum hws_access_type phy_access, 184 u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, 185 u32 data_value, enum hws_operation oper_type); 186 static int ddr3_tip_pad_inv(u32 dev_num, u32 if_id); 187 static int ddr3_tip_rank_control(u32 dev_num, u32 if_id); 188 189 /* 190 * Update global training parameters by data from user 191 */ 192 int ddr3_tip_tune_training_params(u32 dev_num, 193 struct tune_train_params *params) 194 { 195 if (params->ck_delay != -1) 196 ck_delay = params->ck_delay; 197 if (params->ck_delay_16 != -1) 198 ck_delay_16 = params->ck_delay_16; 199 if (params->phy_reg3_val != -1) 200 phy_reg3_val = params->phy_reg3_val; 201 202 return MV_OK; 203 } 204 205 /* 206 * Configure CS 207 */ 208 int ddr3_tip_configure_cs(u32 dev_num, u32 if_id, u32 cs_num, u32 enable) 209 { 210 u32 data, addr_hi, data_high; 211 u32 mem_index; 212 struct hws_topology_map *tm = ddr3_get_topology_map(); 213 214 if (enable == 1) { 215 data = (tm->interface_params[if_id].bus_width == 216 BUS_WIDTH_8) ? 0 : 1; 217 CHECK_STATUS(ddr3_tip_if_write 218 (dev_num, ACCESS_TYPE_UNICAST, if_id, 219 SDRAM_ACCESS_CONTROL_REG, (data << (cs_num * 4)), 220 0x3 << (cs_num * 4))); 221 mem_index = tm->interface_params[if_id].memory_size; 222 223 addr_hi = mem_size_config[mem_index] & 0x3; 224 CHECK_STATUS(ddr3_tip_if_write 225 (dev_num, ACCESS_TYPE_UNICAST, if_id, 226 SDRAM_ACCESS_CONTROL_REG, 227 (addr_hi << (2 + cs_num * 4)), 228 0x3 << (2 + cs_num * 4))); 229 230 data_high = (mem_size_config[mem_index] & 0x4) >> 2; 231 CHECK_STATUS(ddr3_tip_if_write 232 (dev_num, ACCESS_TYPE_UNICAST, if_id, 233 SDRAM_ACCESS_CONTROL_REG, 234 data_high << (20 + cs_num), 1 << (20 + cs_num))); 235 236 /* Enable Address Select Mode */ 237 CHECK_STATUS(ddr3_tip_if_write 238 (dev_num, ACCESS_TYPE_UNICAST, if_id, 239 SDRAM_ACCESS_CONTROL_REG, 1 << (16 + cs_num), 240 1 << (16 + cs_num))); 241 } 242 switch (cs_num) { 243 case 0: 244 case 1: 245 case 2: 246 CHECK_STATUS(ddr3_tip_if_write 247 (dev_num, ACCESS_TYPE_UNICAST, if_id, 248 DDR_CONTROL_LOW_REG, (enable << (cs_num + 11)), 249 1 << (cs_num + 11))); 250 break; 251 case 3: 252 CHECK_STATUS(ddr3_tip_if_write 253 (dev_num, ACCESS_TYPE_UNICAST, if_id, 254 DDR_CONTROL_LOW_REG, (enable << 15), 1 << 15)); 255 break; 256 } 257 258 return MV_OK; 259 } 260 261 /* 262 * Calculate number of CS 263 */ 264 static int calc_cs_num(u32 dev_num, u32 if_id, u32 *cs_num) 265 { 266 u32 cs; 267 u32 bus_cnt; 268 u32 cs_count; 269 u32 cs_bitmask; 270 u32 curr_cs_num = 0; 271 struct hws_topology_map *tm = ddr3_get_topology_map(); 272 273 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 274 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 275 cs_count = 0; 276 cs_bitmask = tm->interface_params[if_id]. 277 as_bus_params[bus_cnt].cs_bitmask; 278 for (cs = 0; cs < MAX_CS_NUM; cs++) { 279 if ((cs_bitmask >> cs) & 1) 280 cs_count++; 281 } 282 283 if (curr_cs_num == 0) { 284 curr_cs_num = cs_count; 285 } else if (cs_count != curr_cs_num) { 286 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 287 ("CS number is different per bus (IF %d BUS %d cs_num %d curr_cs_num %d)\n", 288 if_id, bus_cnt, cs_count, 289 curr_cs_num)); 290 return MV_NOT_SUPPORTED; 291 } 292 } 293 *cs_num = curr_cs_num; 294 295 return MV_OK; 296 } 297 298 /* 299 * Init Controller Flow 300 */ 301 int hws_ddr3_tip_init_controller(u32 dev_num, struct init_cntr_param *init_cntr_prm) 302 { 303 u32 if_id; 304 u32 cs_num; 305 u32 t_refi = 0, t_hclk = 0, t_ckclk = 0, t_faw = 0, t_pd = 0, 306 t_wr = 0, t2t = 0, txpdll = 0; 307 u32 data_value = 0, bus_width = 0, page_size = 0, cs_cnt = 0, 308 mem_mask = 0, bus_index = 0; 309 enum hws_speed_bin speed_bin_index = SPEED_BIN_DDR_2133N; 310 enum hws_mem_size memory_size = MEM_2G; 311 enum hws_ddr_freq freq = init_freq; 312 enum hws_timing timing; 313 u32 cs_mask = 0; 314 u32 cl_value = 0, cwl_val = 0; 315 u32 refresh_interval_cnt = 0, bus_cnt = 0, adll_tap = 0; 316 enum hws_access_type access_type = ACCESS_TYPE_UNICAST; 317 u32 data_read[MAX_INTERFACE_NUM]; 318 struct hws_topology_map *tm = ddr3_get_topology_map(); 319 u32 odt_config = g_odt_config_2cs; 320 321 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 322 ("Init_controller, do_mrs_phy=%d, is_ctrl64_bit=%d\n", 323 init_cntr_prm->do_mrs_phy, 324 init_cntr_prm->is_ctrl64_bit)); 325 326 if (init_cntr_prm->init_phy == 1) { 327 CHECK_STATUS(ddr3_tip_configure_phy(dev_num)); 328 } 329 330 if (generic_init_controller == 1) { 331 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 332 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 333 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 334 ("active IF %d\n", if_id)); 335 mem_mask = 0; 336 for (bus_index = 0; 337 bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 338 bus_index++) { 339 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 340 mem_mask |= 341 tm->interface_params[if_id]. 342 as_bus_params[bus_index].mirror_enable_bitmask; 343 } 344 345 if (mem_mask != 0) { 346 CHECK_STATUS(ddr3_tip_if_write 347 (dev_num, ACCESS_TYPE_MULTICAST, 348 if_id, CS_ENABLE_REG, 0, 349 0x8)); 350 } 351 352 memory_size = 353 tm->interface_params[if_id]. 354 memory_size; 355 speed_bin_index = 356 tm->interface_params[if_id]. 357 speed_bin_index; 358 freq = init_freq; 359 t_refi = 360 (tm->interface_params[if_id]. 361 interface_temp == 362 HWS_TEMP_HIGH) ? TREFI_HIGH : TREFI_LOW; 363 t_refi *= 1000; /* psec */ 364 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 365 ("memy_size %d speed_bin_ind %d freq %d t_refi %d\n", 366 memory_size, speed_bin_index, freq, 367 t_refi)); 368 /* HCLK & CK CLK in 2:1[ps] */ 369 /* t_ckclk is external clock */ 370 t_ckclk = (MEGA / freq_val[freq]); 371 /* t_hclk is internal clock */ 372 t_hclk = 2 * t_ckclk; 373 refresh_interval_cnt = t_refi / t_hclk; /* no units */ 374 bus_width = 375 (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) 376 == 1) ? (16) : (32); 377 378 if (init_cntr_prm->is_ctrl64_bit) 379 bus_width = 64; 380 381 data_value = 382 (refresh_interval_cnt | 0x4000 | 383 ((bus_width == 384 32) ? 0x8000 : 0) | 0x1000000) & ~(1 << 26); 385 386 /* Interface Bus Width */ 387 /* SRMode */ 388 CHECK_STATUS(ddr3_tip_if_write 389 (dev_num, access_type, if_id, 390 SDRAM_CONFIGURATION_REG, data_value, 391 0x100ffff)); 392 393 /* Interleave first command pre-charge enable (TBD) */ 394 CHECK_STATUS(ddr3_tip_if_write 395 (dev_num, access_type, if_id, 396 SDRAM_OPEN_PAGE_CONTROL_REG, (1 << 10), 397 (1 << 10))); 398 399 /* PHY configuration */ 400 /* 401 * Postamble Length = 1.5cc, Addresscntl to clk skew 402 * \BD, Preamble length normal, parralal ADLL enable 403 */ 404 CHECK_STATUS(ddr3_tip_if_write 405 (dev_num, access_type, if_id, 406 DRAM_PHY_CONFIGURATION, 0x28, 0x3e)); 407 if (init_cntr_prm->is_ctrl64_bit) { 408 /* positive edge */ 409 CHECK_STATUS(ddr3_tip_if_write 410 (dev_num, access_type, if_id, 411 DRAM_PHY_CONFIGURATION, 0x0, 412 0xff80)); 413 } 414 415 /* calibration block disable */ 416 /* Xbar Read buffer select (for Internal access) */ 417 CHECK_STATUS(ddr3_tip_if_write 418 (dev_num, access_type, if_id, 419 CALIB_MACHINE_CTRL_REG, 0x1200c, 420 0x7dffe01c)); 421 CHECK_STATUS(ddr3_tip_if_write 422 (dev_num, access_type, if_id, 423 CALIB_MACHINE_CTRL_REG, 424 calibration_update_control << 3, 0x3 << 3)); 425 426 /* Pad calibration control - enable */ 427 CHECK_STATUS(ddr3_tip_if_write 428 (dev_num, access_type, if_id, 429 CALIB_MACHINE_CTRL_REG, 0x1, 0x1)); 430 431 cs_mask = 0; 432 data_value = 0x7; 433 /* 434 * Address ctrl \96 Part of the Generic code 435 * The next configuration is done: 436 * 1) Memory Size 437 * 2) Bus_width 438 * 3) CS# 439 * 4) Page Number 440 * 5) t_faw 441 * Per Dunit get from the Map_topology the parameters: 442 * Bus_width 443 * t_faw is per Dunit not per CS 444 */ 445 page_size = 446 (tm->interface_params[if_id]. 447 bus_width == 448 BUS_WIDTH_8) ? page_param[memory_size]. 449 page_size_8bit : page_param[memory_size]. 450 page_size_16bit; 451 452 t_faw = 453 (page_size == 1) ? speed_bin_table(speed_bin_index, 454 SPEED_BIN_TFAW1K) 455 : speed_bin_table(speed_bin_index, 456 SPEED_BIN_TFAW2K); 457 458 data_value = TIME_2_CLOCK_CYCLES(t_faw, t_ckclk); 459 data_value = data_value << 24; 460 CHECK_STATUS(ddr3_tip_if_write 461 (dev_num, access_type, if_id, 462 SDRAM_ACCESS_CONTROL_REG, data_value, 463 0x7f000000)); 464 465 data_value = 466 (tm->interface_params[if_id]. 467 bus_width == BUS_WIDTH_8) ? 0 : 1; 468 469 /* create merge cs mask for all cs available in dunit */ 470 for (bus_cnt = 0; 471 bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); 472 bus_cnt++) { 473 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 474 cs_mask |= 475 tm->interface_params[if_id]. 476 as_bus_params[bus_cnt].cs_bitmask; 477 } 478 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 479 ("Init_controller IF %d cs_mask %d\n", 480 if_id, cs_mask)); 481 /* 482 * Configure the next upon the Map Topology \96 If the 483 * Dunit is CS0 Configure CS0 if it is multi CS 484 * configure them both: The Bust_width it\92s the 485 * Memory Bus width \96 x8 or x16 486 */ 487 for (cs_cnt = 0; cs_cnt < NUM_OF_CS; cs_cnt++) { 488 ddr3_tip_configure_cs(dev_num, if_id, cs_cnt, 489 ((cs_mask & (1 << cs_cnt)) ? 1 490 : 0)); 491 } 492 493 if (init_cntr_prm->do_mrs_phy) { 494 /* 495 * MR0 \96 Part of the Generic code 496 * The next configuration is done: 497 * 1) Burst Length 498 * 2) CAS Latency 499 * get for each dunit what is it Speed_bin & 500 * Target Frequency. From those both parameters 501 * get the appropriate Cas_l from the CL table 502 */ 503 cl_value = 504 tm->interface_params[if_id]. 505 cas_l; 506 cwl_val = 507 tm->interface_params[if_id]. 508 cas_wl; 509 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 510 ("cl_value 0x%x cwl_val 0x%x\n", 511 cl_value, cwl_val)); 512 t_wr = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 513 SPEED_BIN_TWR), 514 t_ckclk); 515 data_value = 516 ((cl_mask_table[cl_value] & 0x1) << 2) | 517 ((cl_mask_table[cl_value] & 0xe) << 3); 518 CHECK_STATUS(ddr3_tip_if_write 519 (dev_num, access_type, if_id, 520 MR0_REG, data_value, 521 (0x7 << 4) | (1 << 2))); 522 CHECK_STATUS(ddr3_tip_if_write 523 (dev_num, access_type, if_id, 524 MR0_REG, twr_mask_table[t_wr + 1] << 9, 525 (0x7 << 9))); 526 527 528 /* 529 * MR1: Set RTT and DIC Design GL values 530 * configured by user 531 */ 532 CHECK_STATUS(ddr3_tip_if_write 533 (dev_num, ACCESS_TYPE_MULTICAST, 534 PARAM_NOT_CARE, MR1_REG, 535 g_dic | g_rtt_nom, 0x266)); 536 537 /* MR2 - Part of the Generic code */ 538 /* 539 * The next configuration is done: 540 * 1) SRT 541 * 2) CAS Write Latency 542 */ 543 data_value = (cwl_mask_table[cwl_val] << 3); 544 data_value |= 545 ((tm->interface_params[if_id]. 546 interface_temp == 547 HWS_TEMP_HIGH) ? (1 << 7) : 0); 548 CHECK_STATUS(ddr3_tip_if_write 549 (dev_num, access_type, if_id, 550 MR2_REG, data_value, 551 (0x7 << 3) | (0x1 << 7) | (0x3 << 552 9))); 553 } 554 555 ddr3_tip_write_odt(dev_num, access_type, if_id, 556 cl_value, cwl_val); 557 ddr3_tip_set_timing(dev_num, access_type, if_id, freq); 558 559 CHECK_STATUS(ddr3_tip_if_write 560 (dev_num, access_type, if_id, 561 DUNIT_CONTROL_HIGH_REG, 0x177, 562 0x1000177)); 563 564 if (init_cntr_prm->is_ctrl64_bit) { 565 /* disable 0.25 cc delay */ 566 CHECK_STATUS(ddr3_tip_if_write 567 (dev_num, access_type, if_id, 568 DUNIT_CONTROL_HIGH_REG, 0x0, 569 0x800)); 570 } 571 572 /* reset bit 7 */ 573 CHECK_STATUS(ddr3_tip_if_write 574 (dev_num, access_type, if_id, 575 DUNIT_CONTROL_HIGH_REG, 576 (init_cntr_prm->msys_init << 7), (1 << 7))); 577 578 /* calculate number of CS (per interface) */ 579 CHECK_STATUS(calc_cs_num 580 (dev_num, if_id, &cs_num)); 581 timing = tm->interface_params[if_id].timing; 582 583 if (mode2_t != 0xff) { 584 t2t = mode2_t; 585 } else if (timing != HWS_TIM_DEFAULT) { 586 /* Board topology map is forcing timing */ 587 t2t = (timing == HWS_TIM_2T) ? 1 : 0; 588 } else { 589 t2t = (cs_num == 1) ? 0 : 1; 590 } 591 592 CHECK_STATUS(ddr3_tip_if_write 593 (dev_num, access_type, if_id, 594 DDR_CONTROL_LOW_REG, t2t << 3, 595 0x3 << 3)); 596 /* move the block to ddr3_tip_set_timing - start */ 597 t_pd = TIMES_9_TREFI_CYCLES; 598 txpdll = GET_MAX_VALUE(t_ckclk * 10, 599 speed_bin_table(speed_bin_index, 600 SPEED_BIN_TXPDLL)); 601 txpdll = CEIL_DIVIDE((txpdll - 1), t_ckclk); 602 CHECK_STATUS(ddr3_tip_if_write 603 (dev_num, access_type, if_id, 604 DDR_TIMING_REG, txpdll << 4 | t_pd, 605 0x1f << 4 | 0xf)); 606 CHECK_STATUS(ddr3_tip_if_write 607 (dev_num, access_type, if_id, 608 DDR_TIMING_REG, 0x28 << 9, 0x3f << 9)); 609 CHECK_STATUS(ddr3_tip_if_write 610 (dev_num, access_type, if_id, 611 DDR_TIMING_REG, 0xa << 21, 0xff << 21)); 612 613 /* move the block to ddr3_tip_set_timing - end */ 614 /* AUTO_ZQC_TIMING */ 615 CHECK_STATUS(ddr3_tip_if_write 616 (dev_num, access_type, if_id, 617 TIMING_REG, (AUTO_ZQC_TIMING | (2 << 20)), 618 0x3fffff)); 619 CHECK_STATUS(ddr3_tip_if_read 620 (dev_num, access_type, if_id, 621 DRAM_PHY_CONFIGURATION, data_read, 0x30)); 622 data_value = 623 (data_read[if_id] == 0) ? (1 << 11) : 0; 624 CHECK_STATUS(ddr3_tip_if_write 625 (dev_num, access_type, if_id, 626 DUNIT_CONTROL_HIGH_REG, data_value, 627 (1 << 11))); 628 629 /* Set Active control for ODT write transactions */ 630 if (cs_num == 1) 631 odt_config = g_odt_config_1cs; 632 CHECK_STATUS(ddr3_tip_if_write 633 (dev_num, ACCESS_TYPE_MULTICAST, 634 PARAM_NOT_CARE, 0x1494, odt_config, 635 MASK_ALL_BITS)); 636 } 637 } else { 638 #ifdef STATIC_ALGO_SUPPORT 639 CHECK_STATUS(ddr3_tip_static_init_controller(dev_num)); 640 #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X) 641 CHECK_STATUS(ddr3_tip_static_phy_init_controller(dev_num)); 642 #endif 643 #endif /* STATIC_ALGO_SUPPORT */ 644 } 645 646 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 647 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 648 CHECK_STATUS(ddr3_tip_rank_control(dev_num, if_id)); 649 650 if (init_cntr_prm->do_mrs_phy) { 651 CHECK_STATUS(ddr3_tip_pad_inv(dev_num, if_id)); 652 } 653 654 /* Pad calibration control - disable */ 655 CHECK_STATUS(ddr3_tip_if_write 656 (dev_num, access_type, if_id, 657 CALIB_MACHINE_CTRL_REG, 0x0, 0x1)); 658 CHECK_STATUS(ddr3_tip_if_write 659 (dev_num, access_type, if_id, 660 CALIB_MACHINE_CTRL_REG, 661 calibration_update_control << 3, 0x3 << 3)); 662 } 663 664 CHECK_STATUS(ddr3_tip_enable_init_sequence(dev_num)); 665 666 if (delay_enable != 0) { 667 adll_tap = MEGA / (freq_val[freq] * 64); 668 ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap); 669 } 670 671 return MV_OK; 672 } 673 674 /* 675 * Load Topology map 676 */ 677 int hws_ddr3_tip_load_topology_map(u32 dev_num, struct hws_topology_map *tm) 678 { 679 enum hws_speed_bin speed_bin_index; 680 enum hws_ddr_freq freq = DDR_FREQ_LIMIT; 681 u32 if_id; 682 683 freq_val[DDR_FREQ_LOW_FREQ] = dfs_low_freq; 684 tm = ddr3_get_topology_map(); 685 CHECK_STATUS(ddr3_tip_get_first_active_if 686 ((u8)dev_num, tm->if_act_mask, 687 &first_active_if)); 688 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 689 ("board IF_Mask=0x%x num_of_bus_per_interface=0x%x\n", 690 tm->if_act_mask, 691 tm->num_of_bus_per_interface)); 692 693 /* 694 * if CL, CWL values are missing in topology map, then fill them 695 * according to speedbin tables 696 */ 697 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 698 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 699 speed_bin_index = 700 tm->interface_params[if_id].speed_bin_index; 701 /* TBD memory frequency of interface 0 only is used ! */ 702 freq = tm->interface_params[first_active_if].memory_freq; 703 704 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 705 ("speed_bin_index =%d freq=%d cl=%d cwl=%d\n", 706 speed_bin_index, freq_val[freq], 707 tm->interface_params[if_id]. 708 cas_l, 709 tm->interface_params[if_id]. 710 cas_wl)); 711 712 if (tm->interface_params[if_id].cas_l == 0) { 713 tm->interface_params[if_id].cas_l = 714 cas_latency_table[speed_bin_index].cl_val[freq]; 715 } 716 717 if (tm->interface_params[if_id].cas_wl == 0) { 718 tm->interface_params[if_id].cas_wl = 719 cas_write_latency_table[speed_bin_index].cl_val[freq]; 720 } 721 } 722 723 return MV_OK; 724 } 725 726 /* 727 * RANK Control Flow 728 */ 729 static int ddr3_tip_rank_control(u32 dev_num, u32 if_id) 730 { 731 u32 data_value = 0, bus_cnt; 732 struct hws_topology_map *tm = ddr3_get_topology_map(); 733 734 for (bus_cnt = 1; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 735 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 736 if ((tm->interface_params[if_id]. 737 as_bus_params[0].cs_bitmask != 738 tm->interface_params[if_id]. 739 as_bus_params[bus_cnt].cs_bitmask) || 740 (tm->interface_params[if_id]. 741 as_bus_params[0].mirror_enable_bitmask != 742 tm->interface_params[if_id]. 743 as_bus_params[bus_cnt].mirror_enable_bitmask)) 744 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 745 ("WARNING:Wrong configuration for pup #%d CS mask and CS mirroring for all pups should be the same\n", 746 bus_cnt)); 747 } 748 749 data_value |= tm->interface_params[if_id]. 750 as_bus_params[0].cs_bitmask; 751 data_value |= tm->interface_params[if_id]. 752 as_bus_params[0].mirror_enable_bitmask << 4; 753 754 CHECK_STATUS(ddr3_tip_if_write 755 (dev_num, ACCESS_TYPE_UNICAST, if_id, RANK_CTRL_REG, 756 data_value, 0xff)); 757 758 return MV_OK; 759 } 760 761 /* 762 * PAD Inverse Flow 763 */ 764 static int ddr3_tip_pad_inv(u32 dev_num, u32 if_id) 765 { 766 u32 bus_cnt, data_value, ck_swap_pup_ctrl; 767 struct hws_topology_map *tm = ddr3_get_topology_map(); 768 769 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 770 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 771 if (tm->interface_params[if_id]. 772 as_bus_params[bus_cnt].is_dqs_swap == 1) { 773 /* dqs swap */ 774 ddr3_tip_bus_read_modify_write(dev_num, ACCESS_TYPE_UNICAST, 775 if_id, bus_cnt, 776 DDR_PHY_DATA, 777 PHY_CONTROL_PHY_REG, 0xc0, 778 0xc0); 779 } 780 781 if (tm->interface_params[if_id]. 782 as_bus_params[bus_cnt].is_ck_swap == 1) { 783 if (bus_cnt <= 1) 784 data_value = 0x5 << 2; 785 else 786 data_value = 0xa << 2; 787 788 /* mask equals data */ 789 /* ck swap pup is only control pup #0 ! */ 790 ck_swap_pup_ctrl = 0; 791 ddr3_tip_bus_read_modify_write(dev_num, ACCESS_TYPE_UNICAST, 792 if_id, ck_swap_pup_ctrl, 793 DDR_PHY_CONTROL, 794 PHY_CONTROL_PHY_REG, 795 data_value, data_value); 796 } 797 } 798 799 return MV_OK; 800 } 801 802 /* 803 * Run Training Flow 804 */ 805 int hws_ddr3_tip_run_alg(u32 dev_num, enum hws_algo_type algo_type) 806 { 807 int ret = MV_OK, ret_tune = MV_OK; 808 809 #ifdef ODT_TEST_SUPPORT 810 if (finger_test == 1) 811 return odt_test(dev_num, algo_type); 812 #endif 813 814 if (algo_type == ALGO_TYPE_DYNAMIC) { 815 ret = ddr3_tip_ddr3_auto_tune(dev_num); 816 } else { 817 #ifdef STATIC_ALGO_SUPPORT 818 { 819 enum hws_ddr_freq freq; 820 freq = init_freq; 821 822 /* add to mask */ 823 if (is_adll_calib_before_init != 0) { 824 printf("with adll calib before init\n"); 825 adll_calibration(dev_num, ACCESS_TYPE_MULTICAST, 826 0, freq); 827 } 828 /* 829 * Frequency per interface is not relevant, 830 * only interface 0 831 */ 832 ret = ddr3_tip_run_static_alg(dev_num, 833 freq); 834 } 835 #endif 836 } 837 838 if (ret != MV_OK) { 839 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 840 ("Run_alg: tuning failed %d\n", ret_tune)); 841 } 842 843 return ret; 844 } 845 846 #ifdef ODT_TEST_SUPPORT 847 /* 848 * ODT Test 849 */ 850 static int odt_test(u32 dev_num, enum hws_algo_type algo_type) 851 { 852 int ret = MV_OK, ret_tune = MV_OK; 853 int pfinger_val = 0, nfinger_val; 854 855 for (pfinger_val = p_finger_start; pfinger_val <= p_finger_end; 856 pfinger_val += p_finger_step) { 857 for (nfinger_val = n_finger_start; nfinger_val <= n_finger_end; 858 nfinger_val += n_finger_step) { 859 if (finger_test != 0) { 860 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 861 ("pfinger_val %d nfinger_val %d\n", 862 pfinger_val, nfinger_val)); 863 p_finger = pfinger_val; 864 n_finger = nfinger_val; 865 } 866 867 if (algo_type == ALGO_TYPE_DYNAMIC) { 868 ret = ddr3_tip_ddr3_auto_tune(dev_num); 869 } else { 870 /* 871 * Frequency per interface is not relevant, 872 * only interface 0 873 */ 874 ret = ddr3_tip_run_static_alg(dev_num, 875 init_freq); 876 } 877 } 878 } 879 880 if (ret_tune != MV_OK) { 881 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 882 ("Run_alg: tuning failed %d\n", ret_tune)); 883 ret = (ret == MV_OK) ? ret_tune : ret; 884 } 885 886 return ret; 887 } 888 #endif 889 890 /* 891 * Select Controller 892 */ 893 int hws_ddr3_tip_select_ddr_controller(u32 dev_num, int enable) 894 { 895 if (config_func_info[dev_num].tip_dunit_mux_select_func != NULL) { 896 return config_func_info[dev_num]. 897 tip_dunit_mux_select_func((u8)dev_num, enable); 898 } 899 900 return MV_FAIL; 901 } 902 903 /* 904 * Dunit Register Write 905 */ 906 int ddr3_tip_if_write(u32 dev_num, enum hws_access_type interface_access, 907 u32 if_id, u32 reg_addr, u32 data_value, u32 mask) 908 { 909 if (config_func_info[dev_num].tip_dunit_write_func != NULL) { 910 return config_func_info[dev_num]. 911 tip_dunit_write_func((u8)dev_num, interface_access, 912 if_id, reg_addr, 913 data_value, mask); 914 } 915 916 return MV_FAIL; 917 } 918 919 /* 920 * Dunit Register Read 921 */ 922 int ddr3_tip_if_read(u32 dev_num, enum hws_access_type interface_access, 923 u32 if_id, u32 reg_addr, u32 *data, u32 mask) 924 { 925 if (config_func_info[dev_num].tip_dunit_read_func != NULL) { 926 return config_func_info[dev_num]. 927 tip_dunit_read_func((u8)dev_num, interface_access, 928 if_id, reg_addr, 929 data, mask); 930 } 931 932 return MV_FAIL; 933 } 934 935 /* 936 * Dunit Register Polling 937 */ 938 int ddr3_tip_if_polling(u32 dev_num, enum hws_access_type access_type, 939 u32 if_id, u32 exp_value, u32 mask, u32 offset, 940 u32 poll_tries) 941 { 942 u32 poll_cnt = 0, interface_num = 0, start_if, end_if; 943 u32 read_data[MAX_INTERFACE_NUM]; 944 int ret; 945 int is_fail = 0, is_if_fail; 946 struct hws_topology_map *tm = ddr3_get_topology_map(); 947 948 if (access_type == ACCESS_TYPE_MULTICAST) { 949 start_if = 0; 950 end_if = MAX_INTERFACE_NUM - 1; 951 } else { 952 start_if = if_id; 953 end_if = if_id; 954 } 955 956 for (interface_num = start_if; interface_num <= end_if; interface_num++) { 957 /* polling bit 3 for n times */ 958 VALIDATE_ACTIVE(tm->if_act_mask, interface_num); 959 960 is_if_fail = 0; 961 for (poll_cnt = 0; poll_cnt < poll_tries; poll_cnt++) { 962 ret = 963 ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, 964 interface_num, offset, read_data, 965 mask); 966 if (ret != MV_OK) 967 return ret; 968 969 if (read_data[interface_num] == exp_value) 970 break; 971 } 972 973 if (poll_cnt >= poll_tries) { 974 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 975 ("max poll IF #%d\n", interface_num)); 976 is_fail = 1; 977 is_if_fail = 1; 978 } 979 980 training_result[training_stage][interface_num] = 981 (is_if_fail == 1) ? TEST_FAILED : TEST_SUCCESS; 982 } 983 984 return (is_fail == 0) ? MV_OK : MV_FAIL; 985 } 986 987 /* 988 * Bus read access 989 */ 990 int ddr3_tip_bus_read(u32 dev_num, u32 if_id, 991 enum hws_access_type phy_access, u32 phy_id, 992 enum hws_ddr_phy phy_type, u32 reg_addr, u32 *data) 993 { 994 u32 bus_index = 0; 995 u32 data_read[MAX_INTERFACE_NUM]; 996 struct hws_topology_map *tm = ddr3_get_topology_map(); 997 998 if (phy_access == ACCESS_TYPE_MULTICAST) { 999 for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 1000 bus_index++) { 1001 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 1002 CHECK_STATUS(ddr3_tip_bus_access 1003 (dev_num, ACCESS_TYPE_UNICAST, 1004 if_id, ACCESS_TYPE_UNICAST, 1005 bus_index, phy_type, reg_addr, 0, 1006 OPERATION_READ)); 1007 CHECK_STATUS(ddr3_tip_if_read 1008 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1009 PHY_REG_FILE_ACCESS, data_read, 1010 MASK_ALL_BITS)); 1011 data[bus_index] = (data_read[if_id] & 0xffff); 1012 } 1013 } else { 1014 CHECK_STATUS(ddr3_tip_bus_access 1015 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1016 phy_access, phy_id, phy_type, reg_addr, 0, 1017 OPERATION_READ)); 1018 CHECK_STATUS(ddr3_tip_if_read 1019 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1020 PHY_REG_FILE_ACCESS, data_read, MASK_ALL_BITS)); 1021 1022 /* 1023 * only 16 lsb bit are valid in Phy (each register is different, 1024 * some can actually be less than 16 bits) 1025 */ 1026 *data = (data_read[if_id] & 0xffff); 1027 } 1028 1029 return MV_OK; 1030 } 1031 1032 /* 1033 * Bus write access 1034 */ 1035 int ddr3_tip_bus_write(u32 dev_num, enum hws_access_type interface_access, 1036 u32 if_id, enum hws_access_type phy_access, 1037 u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, 1038 u32 data_value) 1039 { 1040 CHECK_STATUS(ddr3_tip_bus_access 1041 (dev_num, interface_access, if_id, phy_access, 1042 phy_id, phy_type, reg_addr, data_value, OPERATION_WRITE)); 1043 1044 return MV_OK; 1045 } 1046 1047 /* 1048 * Bus access routine (relevant for both read & write) 1049 */ 1050 static int ddr3_tip_bus_access(u32 dev_num, enum hws_access_type interface_access, 1051 u32 if_id, enum hws_access_type phy_access, 1052 u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, 1053 u32 data_value, enum hws_operation oper_type) 1054 { 1055 u32 addr_low = 0x3f & reg_addr; 1056 u32 addr_hi = ((0xc0 & reg_addr) >> 6); 1057 u32 data_p1 = 1058 (oper_type << 30) + (addr_hi << 28) + (phy_access << 27) + 1059 (phy_type << 26) + (phy_id << 22) + (addr_low << 16) + 1060 (data_value & 0xffff); 1061 u32 data_p2 = data_p1 + (1 << 31); 1062 u32 start_if, end_if; 1063 struct hws_topology_map *tm = ddr3_get_topology_map(); 1064 1065 CHECK_STATUS(ddr3_tip_if_write 1066 (dev_num, interface_access, if_id, PHY_REG_FILE_ACCESS, 1067 data_p1, MASK_ALL_BITS)); 1068 CHECK_STATUS(ddr3_tip_if_write 1069 (dev_num, interface_access, if_id, PHY_REG_FILE_ACCESS, 1070 data_p2, MASK_ALL_BITS)); 1071 1072 if (interface_access == ACCESS_TYPE_UNICAST) { 1073 start_if = if_id; 1074 end_if = if_id; 1075 } else { 1076 start_if = 0; 1077 end_if = MAX_INTERFACE_NUM - 1; 1078 } 1079 1080 /* polling for read/write execution done */ 1081 for (if_id = start_if; if_id <= end_if; if_id++) { 1082 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1083 CHECK_STATUS(is_bus_access_done 1084 (dev_num, if_id, PHY_REG_FILE_ACCESS, 31)); 1085 } 1086 1087 return MV_OK; 1088 } 1089 1090 /* 1091 * Check bus access done 1092 */ 1093 static int is_bus_access_done(u32 dev_num, u32 if_id, u32 dunit_reg_adrr, 1094 u32 bit) 1095 { 1096 u32 rd_data = 1; 1097 u32 cnt = 0; 1098 u32 data_read[MAX_INTERFACE_NUM]; 1099 1100 CHECK_STATUS(ddr3_tip_if_read 1101 (dev_num, ACCESS_TYPE_UNICAST, if_id, dunit_reg_adrr, 1102 data_read, MASK_ALL_BITS)); 1103 rd_data = data_read[if_id]; 1104 rd_data &= (1 << bit); 1105 1106 while (rd_data != 0) { 1107 if (cnt++ >= MAX_POLLING_ITERATIONS) 1108 break; 1109 1110 CHECK_STATUS(ddr3_tip_if_read 1111 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1112 dunit_reg_adrr, data_read, MASK_ALL_BITS)); 1113 rd_data = data_read[if_id]; 1114 rd_data &= (1 << bit); 1115 } 1116 1117 if (cnt < MAX_POLLING_ITERATIONS) 1118 return MV_OK; 1119 else 1120 return MV_FAIL; 1121 } 1122 1123 /* 1124 * Phy read-modify-write 1125 */ 1126 int ddr3_tip_bus_read_modify_write(u32 dev_num, enum hws_access_type access_type, 1127 u32 interface_id, u32 phy_id, 1128 enum hws_ddr_phy phy_type, u32 reg_addr, 1129 u32 data_value, u32 reg_mask) 1130 { 1131 u32 data_val = 0, if_id, start_if, end_if; 1132 struct hws_topology_map *tm = ddr3_get_topology_map(); 1133 1134 if (access_type == ACCESS_TYPE_MULTICAST) { 1135 start_if = 0; 1136 end_if = MAX_INTERFACE_NUM - 1; 1137 } else { 1138 start_if = interface_id; 1139 end_if = interface_id; 1140 } 1141 1142 for (if_id = start_if; if_id <= end_if; if_id++) { 1143 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1144 CHECK_STATUS(ddr3_tip_bus_read 1145 (dev_num, if_id, ACCESS_TYPE_UNICAST, phy_id, 1146 phy_type, reg_addr, &data_val)); 1147 data_value = (data_val & (~reg_mask)) | (data_value & reg_mask); 1148 CHECK_STATUS(ddr3_tip_bus_write 1149 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1150 ACCESS_TYPE_UNICAST, phy_id, phy_type, reg_addr, 1151 data_value)); 1152 } 1153 1154 return MV_OK; 1155 } 1156 1157 /* 1158 * ADLL Calibration 1159 */ 1160 int adll_calibration(u32 dev_num, enum hws_access_type access_type, 1161 u32 if_id, enum hws_ddr_freq frequency) 1162 { 1163 struct hws_tip_freq_config_info freq_config_info; 1164 u32 bus_cnt = 0; 1165 struct hws_topology_map *tm = ddr3_get_topology_map(); 1166 1167 /* Reset Diver_b assert -> de-assert */ 1168 CHECK_STATUS(ddr3_tip_if_write 1169 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1170 0, 0x10000000)); 1171 mdelay(10); 1172 CHECK_STATUS(ddr3_tip_if_write 1173 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1174 0x10000000, 0x10000000)); 1175 1176 if (config_func_info[dev_num].tip_get_freq_config_info_func != NULL) { 1177 CHECK_STATUS(config_func_info[dev_num]. 1178 tip_get_freq_config_info_func((u8)dev_num, frequency, 1179 &freq_config_info)); 1180 } else { 1181 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1182 ("tip_get_freq_config_info_func is NULL")); 1183 return MV_NOT_INITIALIZED; 1184 } 1185 1186 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 1187 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 1188 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1189 (dev_num, access_type, if_id, bus_cnt, 1190 DDR_PHY_DATA, BW_PHY_REG, 1191 freq_config_info.bw_per_freq << 8, 0x700)); 1192 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1193 (dev_num, access_type, if_id, bus_cnt, 1194 DDR_PHY_DATA, RATE_PHY_REG, 1195 freq_config_info.rate_per_freq, 0x7)); 1196 } 1197 1198 /* DUnit to Phy drive post edge, ADLL reset assert de-assert */ 1199 CHECK_STATUS(ddr3_tip_if_write 1200 (dev_num, access_type, if_id, DRAM_PHY_CONFIGURATION, 1201 0, (0x80000000 | 0x40000000))); 1202 mdelay(100 / (freq_val[frequency] / freq_val[DDR_FREQ_LOW_FREQ])); 1203 CHECK_STATUS(ddr3_tip_if_write 1204 (dev_num, access_type, if_id, DRAM_PHY_CONFIGURATION, 1205 (0x80000000 | 0x40000000), (0x80000000 | 0x40000000))); 1206 1207 /* polling for ADLL Done */ 1208 if (ddr3_tip_if_polling(dev_num, access_type, if_id, 1209 0x3ff03ff, 0x3ff03ff, PHY_LOCK_STATUS_REG, 1210 MAX_POLLING_ITERATIONS) != MV_OK) { 1211 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1212 ("Freq_set: DDR3 poll failed(1)")); 1213 } 1214 1215 /* pup data_pup reset assert-> deassert */ 1216 CHECK_STATUS(ddr3_tip_if_write 1217 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1218 0, 0x60000000)); 1219 mdelay(10); 1220 CHECK_STATUS(ddr3_tip_if_write 1221 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1222 0x60000000, 0x60000000)); 1223 1224 return MV_OK; 1225 } 1226 1227 int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type access_type, 1228 u32 if_id, enum hws_ddr_freq frequency) 1229 { 1230 u32 cl_value = 0, cwl_value = 0, mem_mask = 0, val = 0, 1231 bus_cnt = 0, t_hclk = 0, t_wr = 0, 1232 refresh_interval_cnt = 0, cnt_id; 1233 u32 t_ckclk; 1234 u32 t_refi = 0, end_if, start_if; 1235 u32 bus_index = 0; 1236 int is_dll_off = 0; 1237 enum hws_speed_bin speed_bin_index = 0; 1238 struct hws_tip_freq_config_info freq_config_info; 1239 enum hws_result *flow_result = training_result[training_stage]; 1240 u32 adll_tap = 0; 1241 u32 cs_mask[MAX_INTERFACE_NUM]; 1242 struct hws_topology_map *tm = ddr3_get_topology_map(); 1243 1244 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1245 ("dev %d access %d IF %d freq %d\n", dev_num, 1246 access_type, if_id, frequency)); 1247 1248 if (frequency == DDR_FREQ_LOW_FREQ) 1249 is_dll_off = 1; 1250 if (access_type == ACCESS_TYPE_MULTICAST) { 1251 start_if = 0; 1252 end_if = MAX_INTERFACE_NUM - 1; 1253 } else { 1254 start_if = if_id; 1255 end_if = if_id; 1256 } 1257 1258 /* calculate interface cs mask - Oferb 4/11 */ 1259 /* speed bin can be different for each interface */ 1260 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1261 /* cs enable is active low */ 1262 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1263 cs_mask[if_id] = CS_BIT_MASK; 1264 training_result[training_stage][if_id] = TEST_SUCCESS; 1265 ddr3_tip_calc_cs_mask(dev_num, if_id, effective_cs, 1266 &cs_mask[if_id]); 1267 } 1268 1269 /* speed bin can be different for each interface */ 1270 /* 1271 * moti b - need to remove the loop for multicas access functions 1272 * and loop the unicast access functions 1273 */ 1274 for (if_id = start_if; if_id <= end_if; if_id++) { 1275 if (IS_ACTIVE(tm->if_act_mask, if_id) == 0) 1276 continue; 1277 1278 flow_result[if_id] = TEST_SUCCESS; 1279 speed_bin_index = 1280 tm->interface_params[if_id].speed_bin_index; 1281 if (tm->interface_params[if_id].memory_freq == 1282 frequency) { 1283 cl_value = 1284 tm->interface_params[if_id].cas_l; 1285 cwl_value = 1286 tm->interface_params[if_id].cas_wl; 1287 } else { 1288 cl_value = 1289 cas_latency_table[speed_bin_index].cl_val[frequency]; 1290 cwl_value = 1291 cas_write_latency_table[speed_bin_index]. 1292 cl_val[frequency]; 1293 } 1294 1295 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1296 ("Freq_set dev 0x%x access 0x%x if 0x%x freq 0x%x speed %d:\n\t", 1297 dev_num, access_type, if_id, 1298 frequency, speed_bin_index)); 1299 1300 for (cnt_id = 0; cnt_id < DDR_FREQ_LIMIT; cnt_id++) { 1301 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1302 ("%d ", 1303 cas_latency_table[speed_bin_index]. 1304 cl_val[cnt_id])); 1305 } 1306 1307 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, ("\n")); 1308 mem_mask = 0; 1309 for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 1310 bus_index++) { 1311 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 1312 mem_mask |= 1313 tm->interface_params[if_id]. 1314 as_bus_params[bus_index].mirror_enable_bitmask; 1315 } 1316 1317 if (mem_mask != 0) { 1318 /* motib redundant in KW28 */ 1319 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1320 if_id, 1321 CS_ENABLE_REG, 0, 0x8)); 1322 } 1323 1324 /* dll state after exiting SR */ 1325 if (is_dll_off == 1) { 1326 CHECK_STATUS(ddr3_tip_if_write 1327 (dev_num, access_type, if_id, 1328 DFS_REG, 0x1, 0x1)); 1329 } else { 1330 CHECK_STATUS(ddr3_tip_if_write 1331 (dev_num, access_type, if_id, 1332 DFS_REG, 0, 0x1)); 1333 } 1334 1335 CHECK_STATUS(ddr3_tip_if_write 1336 (dev_num, access_type, if_id, 1337 DUNIT_MMASK_REG, 0, 0x1)); 1338 /* DFS - block transactions */ 1339 CHECK_STATUS(ddr3_tip_if_write 1340 (dev_num, access_type, if_id, 1341 DFS_REG, 0x2, 0x2)); 1342 1343 /* disable ODT in case of dll off */ 1344 if (is_dll_off == 1) { 1345 CHECK_STATUS(ddr3_tip_if_write 1346 (dev_num, access_type, if_id, 1347 0x1874, 0, 0x244)); 1348 CHECK_STATUS(ddr3_tip_if_write 1349 (dev_num, access_type, if_id, 1350 0x1884, 0, 0x244)); 1351 CHECK_STATUS(ddr3_tip_if_write 1352 (dev_num, access_type, if_id, 1353 0x1894, 0, 0x244)); 1354 CHECK_STATUS(ddr3_tip_if_write 1355 (dev_num, access_type, if_id, 1356 0x18a4, 0, 0x244)); 1357 } 1358 1359 /* DFS - Enter Self-Refresh */ 1360 CHECK_STATUS(ddr3_tip_if_write 1361 (dev_num, access_type, if_id, DFS_REG, 0x4, 1362 0x4)); 1363 /* polling on self refresh entry */ 1364 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, 1365 if_id, 0x8, 0x8, DFS_REG, 1366 MAX_POLLING_ITERATIONS) != MV_OK) { 1367 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1368 ("Freq_set: DDR3 poll failed on SR entry\n")); 1369 } 1370 1371 /* PLL configuration */ 1372 if (config_func_info[dev_num].tip_set_freq_divider_func != NULL) { 1373 config_func_info[dev_num]. 1374 tip_set_freq_divider_func(dev_num, if_id, 1375 frequency); 1376 } 1377 1378 /* PLL configuration End */ 1379 1380 /* adjust t_refi to new frequency */ 1381 t_refi = (tm->interface_params[if_id].interface_temp == 1382 HWS_TEMP_HIGH) ? TREFI_HIGH : TREFI_LOW; 1383 t_refi *= 1000; /*psec */ 1384 1385 /* HCLK in[ps] */ 1386 t_hclk = MEGA / (freq_val[frequency] / 2); 1387 refresh_interval_cnt = t_refi / t_hclk; /* no units */ 1388 val = 0x4000 | refresh_interval_cnt; 1389 CHECK_STATUS(ddr3_tip_if_write 1390 (dev_num, access_type, if_id, 1391 SDRAM_CONFIGURATION_REG, val, 0x7fff)); 1392 1393 /* DFS - CL/CWL/WR parameters after exiting SR */ 1394 CHECK_STATUS(ddr3_tip_if_write 1395 (dev_num, access_type, if_id, DFS_REG, 1396 (cl_mask_table[cl_value] << 8), 0xf00)); 1397 CHECK_STATUS(ddr3_tip_if_write 1398 (dev_num, access_type, if_id, DFS_REG, 1399 (cwl_mask_table[cwl_value] << 12), 0x7000)); 1400 1401 t_ckclk = MEGA / freq_val[frequency]; 1402 t_wr = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1403 SPEED_BIN_TWR), 1404 t_ckclk); 1405 1406 CHECK_STATUS(ddr3_tip_if_write 1407 (dev_num, access_type, if_id, DFS_REG, 1408 (twr_mask_table[t_wr + 1] << 16), 0x70000)); 1409 1410 /* Restore original RTT values if returning from DLL OFF mode */ 1411 if (is_dll_off == 1) { 1412 CHECK_STATUS(ddr3_tip_if_write 1413 (dev_num, access_type, if_id, 0x1874, 1414 g_dic | g_rtt_nom, 0x266)); 1415 CHECK_STATUS(ddr3_tip_if_write 1416 (dev_num, access_type, if_id, 0x1884, 1417 g_dic | g_rtt_nom, 0x266)); 1418 CHECK_STATUS(ddr3_tip_if_write 1419 (dev_num, access_type, if_id, 0x1894, 1420 g_dic | g_rtt_nom, 0x266)); 1421 CHECK_STATUS(ddr3_tip_if_write 1422 (dev_num, access_type, if_id, 0x18a4, 1423 g_dic | g_rtt_nom, 0x266)); 1424 } 1425 1426 /* Reset Diver_b assert -> de-assert */ 1427 CHECK_STATUS(ddr3_tip_if_write 1428 (dev_num, access_type, if_id, 1429 SDRAM_CONFIGURATION_REG, 0, 0x10000000)); 1430 mdelay(10); 1431 CHECK_STATUS(ddr3_tip_if_write 1432 (dev_num, access_type, if_id, 1433 SDRAM_CONFIGURATION_REG, 0x10000000, 0x10000000)); 1434 1435 /* Adll configuration function of process and Frequency */ 1436 if (config_func_info[dev_num].tip_get_freq_config_info_func != NULL) { 1437 CHECK_STATUS(config_func_info[dev_num]. 1438 tip_get_freq_config_info_func(dev_num, frequency, 1439 &freq_config_info)); 1440 } 1441 /* TBD check milo5 using device ID ? */ 1442 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); 1443 bus_cnt++) { 1444 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 1445 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1446 (dev_num, ACCESS_TYPE_UNICAST, 1447 if_id, bus_cnt, DDR_PHY_DATA, 1448 0x92, 1449 freq_config_info. 1450 bw_per_freq << 8 1451 /*freq_mask[dev_num][frequency] << 8 */ 1452 , 0x700)); 1453 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1454 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1455 bus_cnt, DDR_PHY_DATA, 0x94, 1456 freq_config_info.rate_per_freq, 0x7)); 1457 } 1458 1459 /* DUnit to Phy drive post edge, ADLL reset assert de-assert */ 1460 CHECK_STATUS(ddr3_tip_if_write 1461 (dev_num, access_type, if_id, 1462 DRAM_PHY_CONFIGURATION, 0, 1463 (0x80000000 | 0x40000000))); 1464 mdelay(100 / (freq_val[frequency] / freq_val[DDR_FREQ_LOW_FREQ])); 1465 CHECK_STATUS(ddr3_tip_if_write 1466 (dev_num, access_type, if_id, 1467 DRAM_PHY_CONFIGURATION, (0x80000000 | 0x40000000), 1468 (0x80000000 | 0x40000000))); 1469 1470 /* polling for ADLL Done */ 1471 if (ddr3_tip_if_polling 1472 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x3ff03ff, 1473 0x3ff03ff, PHY_LOCK_STATUS_REG, 1474 MAX_POLLING_ITERATIONS) != MV_OK) { 1475 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1476 ("Freq_set: DDR3 poll failed(1)\n")); 1477 } 1478 1479 /* pup data_pup reset assert-> deassert */ 1480 CHECK_STATUS(ddr3_tip_if_write 1481 (dev_num, access_type, if_id, 1482 SDRAM_CONFIGURATION_REG, 0, 0x60000000)); 1483 mdelay(10); 1484 CHECK_STATUS(ddr3_tip_if_write 1485 (dev_num, access_type, if_id, 1486 SDRAM_CONFIGURATION_REG, 0x60000000, 0x60000000)); 1487 1488 /* Set proper timing params before existing Self-Refresh */ 1489 ddr3_tip_set_timing(dev_num, access_type, if_id, frequency); 1490 if (delay_enable != 0) { 1491 adll_tap = MEGA / (freq_val[frequency] * 64); 1492 ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap); 1493 } 1494 1495 /* Exit SR */ 1496 CHECK_STATUS(ddr3_tip_if_write 1497 (dev_num, access_type, if_id, DFS_REG, 0, 1498 0x4)); 1499 if (ddr3_tip_if_polling 1500 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x8, DFS_REG, 1501 MAX_POLLING_ITERATIONS) != MV_OK) { 1502 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1503 ("Freq_set: DDR3 poll failed(2)")); 1504 } 1505 1506 /* Refresh Command */ 1507 CHECK_STATUS(ddr3_tip_if_write 1508 (dev_num, access_type, if_id, 1509 SDRAM_OPERATION_REG, 0x2, 0xf1f)); 1510 if (ddr3_tip_if_polling 1511 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f, 1512 SDRAM_OPERATION_REG, MAX_POLLING_ITERATIONS) != MV_OK) { 1513 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1514 ("Freq_set: DDR3 poll failed(3)")); 1515 } 1516 1517 /* Release DFS Block */ 1518 CHECK_STATUS(ddr3_tip_if_write 1519 (dev_num, access_type, if_id, DFS_REG, 0, 1520 0x2)); 1521 /* Controller to MBUS Retry - normal */ 1522 CHECK_STATUS(ddr3_tip_if_write 1523 (dev_num, access_type, if_id, DUNIT_MMASK_REG, 1524 0x1, 0x1)); 1525 1526 /* MRO: Burst Length 8, CL , Auto_precharge 0x16cc */ 1527 val = 1528 ((cl_mask_table[cl_value] & 0x1) << 2) | 1529 ((cl_mask_table[cl_value] & 0xe) << 3); 1530 CHECK_STATUS(ddr3_tip_if_write 1531 (dev_num, access_type, if_id, MR0_REG, 1532 val, (0x7 << 4) | (1 << 2))); 1533 /* MR2: CWL = 10 , Auto Self-Refresh - disable */ 1534 val = (cwl_mask_table[cwl_value] << 3); 1535 /* 1536 * nklein 24.10.13 - should not be here - leave value as set in 1537 * the init configuration val |= (1 << 9); 1538 * val |= ((tm->interface_params[if_id]. 1539 * interface_temp == HWS_TEMP_HIGH) ? (1 << 7) : 0); 1540 */ 1541 /* nklein 24.10.13 - see above comment */ 1542 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1543 if_id, MR2_REG, 1544 val, (0x7 << 3))); 1545 1546 /* ODT TIMING */ 1547 val = ((cl_value - cwl_value + 1) << 4) | 1548 ((cl_value - cwl_value + 6) << 8) | 1549 ((cl_value - 1) << 12) | ((cl_value + 6) << 16); 1550 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1551 if_id, ODT_TIMING_LOW, 1552 val, 0xffff0)); 1553 val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12); 1554 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1555 if_id, ODT_TIMING_HI_REG, 1556 val, 0xffff)); 1557 1558 /* ODT Active */ 1559 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1560 if_id, 1561 DUNIT_ODT_CONTROL_REG, 1562 0xf, 0xf)); 1563 1564 /* re-write CL */ 1565 val = ((cl_mask_table[cl_value] & 0x1) << 2) | 1566 ((cl_mask_table[cl_value] & 0xe) << 3); 1567 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1568 0, MR0_REG, val, 1569 (0x7 << 4) | (1 << 2))); 1570 1571 /* re-write CWL */ 1572 val = (cwl_mask_table[cwl_value] << 3); 1573 CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask, MRS2_CMD, 1574 val, (0x7 << 3))); 1575 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1576 0, MR2_REG, val, (0x7 << 3))); 1577 1578 if (mem_mask != 0) { 1579 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1580 if_id, 1581 CS_ENABLE_REG, 1582 1 << 3, 0x8)); 1583 } 1584 } 1585 1586 return MV_OK; 1587 } 1588 1589 /* 1590 * Set ODT values 1591 */ 1592 static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type, 1593 u32 if_id, u32 cl_value, u32 cwl_value) 1594 { 1595 /* ODT TIMING */ 1596 u32 val = (cl_value - cwl_value + 6); 1597 1598 val = ((cl_value - cwl_value + 1) << 4) | ((val & 0xf) << 8) | 1599 (((cl_value - 1) & 0xf) << 12) | 1600 (((cl_value + 6) & 0xf) << 16) | (((val & 0x10) >> 4) << 21); 1601 val |= (((cl_value - 1) >> 4) << 22) | (((cl_value + 6) >> 4) << 23); 1602 1603 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1604 ODT_TIMING_LOW, val, 0xffff0)); 1605 val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12); 1606 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1607 ODT_TIMING_HI_REG, val, 0xffff)); 1608 if (odt_additional == 1) { 1609 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1610 if_id, 1611 SDRAM_ODT_CONTROL_HIGH_REG, 1612 0xf, 0xf)); 1613 } 1614 1615 /* ODT Active */ 1616 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1617 DUNIT_ODT_CONTROL_REG, 0xf, 0xf)); 1618 1619 return MV_OK; 1620 } 1621 1622 /* 1623 * Set Timing values for training 1624 */ 1625 static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type, 1626 u32 if_id, enum hws_ddr_freq frequency) 1627 { 1628 u32 t_ckclk = 0, t_ras = 0; 1629 u32 t_rcd = 0, t_rp = 0, t_wr = 0, t_wtr = 0, t_rrd = 0, t_rtp = 0, 1630 t_rfc = 0, t_mod = 0; 1631 u32 val = 0, page_size = 0; 1632 enum hws_speed_bin speed_bin_index; 1633 enum hws_mem_size memory_size = MEM_2G; 1634 struct hws_topology_map *tm = ddr3_get_topology_map(); 1635 1636 speed_bin_index = tm->interface_params[if_id].speed_bin_index; 1637 memory_size = tm->interface_params[if_id].memory_size; 1638 page_size = 1639 (tm->interface_params[if_id].bus_width == 1640 BUS_WIDTH_8) ? page_param[memory_size]. 1641 page_size_8bit : page_param[memory_size].page_size_16bit; 1642 t_ckclk = (MEGA / freq_val[frequency]); 1643 t_rrd = (page_size == 1) ? speed_bin_table(speed_bin_index, 1644 SPEED_BIN_TRRD1K) : 1645 speed_bin_table(speed_bin_index, SPEED_BIN_TRRD2K); 1646 t_rrd = GET_MAX_VALUE(t_ckclk * 4, t_rrd); 1647 t_rtp = GET_MAX_VALUE(t_ckclk * 4, speed_bin_table(speed_bin_index, 1648 SPEED_BIN_TRTP)); 1649 t_wtr = GET_MAX_VALUE(t_ckclk * 4, speed_bin_table(speed_bin_index, 1650 SPEED_BIN_TWTR)); 1651 t_ras = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1652 SPEED_BIN_TRAS), 1653 t_ckclk); 1654 t_rcd = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1655 SPEED_BIN_TRCD), 1656 t_ckclk); 1657 t_rp = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1658 SPEED_BIN_TRP), 1659 t_ckclk); 1660 t_wr = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1661 SPEED_BIN_TWR), 1662 t_ckclk); 1663 t_wtr = TIME_2_CLOCK_CYCLES(t_wtr, t_ckclk); 1664 t_rrd = TIME_2_CLOCK_CYCLES(t_rrd, t_ckclk); 1665 t_rtp = TIME_2_CLOCK_CYCLES(t_rtp, t_ckclk); 1666 t_rfc = TIME_2_CLOCK_CYCLES(rfc_table[memory_size] * 1000, t_ckclk); 1667 t_mod = GET_MAX_VALUE(t_ckclk * 24, 15000); 1668 t_mod = TIME_2_CLOCK_CYCLES(t_mod, t_ckclk); 1669 1670 /* SDRAM Timing Low */ 1671 val = (t_ras & 0xf) | (t_rcd << 4) | (t_rp << 8) | (t_wr << 12) | 1672 (t_wtr << 16) | (((t_ras & 0x30) >> 4) << 20) | (t_rrd << 24) | 1673 (t_rtp << 28); 1674 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1675 SDRAM_TIMING_LOW_REG, val, 0xff3fffff)); 1676 1677 /* SDRAM Timing High */ 1678 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1679 SDRAM_TIMING_HIGH_REG, 1680 t_rfc & 0x7f, 0x7f)); 1681 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1682 SDRAM_TIMING_HIGH_REG, 1683 0x180, 0x180)); 1684 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1685 SDRAM_TIMING_HIGH_REG, 1686 0x600, 0x600)); 1687 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1688 SDRAM_TIMING_HIGH_REG, 1689 0x1800, 0xf800)); 1690 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1691 SDRAM_TIMING_HIGH_REG, 1692 ((t_rfc & 0x380) >> 7) << 16, 0x70000)); 1693 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1694 SDRAM_TIMING_HIGH_REG, 0, 1695 0x380000)); 1696 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1697 SDRAM_TIMING_HIGH_REG, 1698 (t_mod & 0xf) << 25, 0x1e00000)); 1699 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1700 SDRAM_TIMING_HIGH_REG, 1701 (t_mod >> 4) << 30, 0xc0000000)); 1702 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1703 SDRAM_TIMING_HIGH_REG, 1704 0x16000000, 0x1e000000)); 1705 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1706 SDRAM_TIMING_HIGH_REG, 1707 0x40000000, 0xc0000000)); 1708 1709 return MV_OK; 1710 } 1711 1712 /* 1713 * Mode Read 1714 */ 1715 int hws_ddr3_tip_mode_read(u32 dev_num, struct mode_info *mode_info) 1716 { 1717 u32 ret; 1718 1719 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1720 MR0_REG, mode_info->reg_mr0, MASK_ALL_BITS); 1721 if (ret != MV_OK) 1722 return ret; 1723 1724 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1725 MR1_REG, mode_info->reg_mr1, MASK_ALL_BITS); 1726 if (ret != MV_OK) 1727 return ret; 1728 1729 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1730 MR2_REG, mode_info->reg_mr2, MASK_ALL_BITS); 1731 if (ret != MV_OK) 1732 return ret; 1733 1734 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1735 MR3_REG, mode_info->reg_mr2, MASK_ALL_BITS); 1736 if (ret != MV_OK) 1737 return ret; 1738 1739 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1740 READ_DATA_SAMPLE_DELAY, mode_info->read_data_sample, 1741 MASK_ALL_BITS); 1742 if (ret != MV_OK) 1743 return ret; 1744 1745 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1746 READ_DATA_READY_DELAY, mode_info->read_data_ready, 1747 MASK_ALL_BITS); 1748 if (ret != MV_OK) 1749 return ret; 1750 1751 return MV_OK; 1752 } 1753 1754 /* 1755 * Get first active IF 1756 */ 1757 int ddr3_tip_get_first_active_if(u8 dev_num, u32 interface_mask, 1758 u32 *interface_id) 1759 { 1760 u32 if_id; 1761 struct hws_topology_map *tm = ddr3_get_topology_map(); 1762 1763 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1764 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1765 if (interface_mask & (1 << if_id)) { 1766 *interface_id = if_id; 1767 break; 1768 } 1769 } 1770 1771 return MV_OK; 1772 } 1773 1774 /* 1775 * Write CS Result 1776 */ 1777 int ddr3_tip_write_cs_result(u32 dev_num, u32 offset) 1778 { 1779 u32 if_id, bus_num, cs_bitmask, data_val, cs_num; 1780 struct hws_topology_map *tm = ddr3_get_topology_map(); 1781 1782 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1783 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1784 for (bus_num = 0; bus_num < tm->num_of_bus_per_interface; 1785 bus_num++) { 1786 VALIDATE_ACTIVE(tm->bus_act_mask, bus_num); 1787 cs_bitmask = 1788 tm->interface_params[if_id]. 1789 as_bus_params[bus_num].cs_bitmask; 1790 if (cs_bitmask != effective_cs) { 1791 cs_num = GET_CS_FROM_MASK(cs_bitmask); 1792 ddr3_tip_bus_read(dev_num, if_id, 1793 ACCESS_TYPE_UNICAST, bus_num, 1794 DDR_PHY_DATA, 1795 offset + 1796 CS_REG_VALUE(effective_cs), 1797 &data_val); 1798 ddr3_tip_bus_write(dev_num, 1799 ACCESS_TYPE_UNICAST, 1800 if_id, 1801 ACCESS_TYPE_UNICAST, 1802 bus_num, DDR_PHY_DATA, 1803 offset + 1804 CS_REG_VALUE(cs_num), 1805 data_val); 1806 } 1807 } 1808 } 1809 1810 return MV_OK; 1811 } 1812 1813 /* 1814 * Write MRS 1815 */ 1816 int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, u32 cmd, 1817 u32 data, u32 mask) 1818 { 1819 u32 if_id, reg; 1820 struct hws_topology_map *tm = ddr3_get_topology_map(); 1821 1822 reg = (cmd == MRS1_CMD) ? MR1_REG : MR2_REG; 1823 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1824 PARAM_NOT_CARE, reg, data, mask)); 1825 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1826 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1827 CHECK_STATUS(ddr3_tip_if_write 1828 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1829 SDRAM_OPERATION_REG, 1830 (cs_mask_arr[if_id] << 8) | cmd, 0xf1f)); 1831 } 1832 1833 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1834 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1835 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 1836 0x1f, SDRAM_OPERATION_REG, 1837 MAX_POLLING_ITERATIONS) != MV_OK) { 1838 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1839 ("write_mrs_cmd: Poll cmd fail")); 1840 } 1841 } 1842 1843 return MV_OK; 1844 } 1845 1846 /* 1847 * Reset XSB Read FIFO 1848 */ 1849 int ddr3_tip_reset_fifo_ptr(u32 dev_num) 1850 { 1851 u32 if_id = 0; 1852 1853 /* Configure PHY reset value to 0 in order to "clean" the FIFO */ 1854 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1855 if_id, 0x15c8, 0, 0xff000000)); 1856 /* 1857 * Move PHY to RL mode (only in RL mode the PHY overrides FIFO values 1858 * during FIFO reset) 1859 */ 1860 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1861 if_id, TRAINING_SW_2_REG, 1862 0x1, 0x9)); 1863 /* In order that above configuration will influence the PHY */ 1864 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1865 if_id, 0x15b0, 1866 0x80000000, 0x80000000)); 1867 /* Reset read fifo assertion */ 1868 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1869 if_id, 0x1400, 0, 0x40000000)); 1870 /* Reset read fifo deassertion */ 1871 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1872 if_id, 0x1400, 1873 0x40000000, 0x40000000)); 1874 /* Move PHY back to functional mode */ 1875 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1876 if_id, TRAINING_SW_2_REG, 1877 0x8, 0x9)); 1878 /* Stop training machine */ 1879 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1880 if_id, 0x15b4, 0x10000, 0x10000)); 1881 1882 return MV_OK; 1883 } 1884 1885 /* 1886 * Reset Phy registers 1887 */ 1888 int ddr3_tip_ddr3_reset_phy_regs(u32 dev_num) 1889 { 1890 u32 if_id, phy_id, cs; 1891 struct hws_topology_map *tm = ddr3_get_topology_map(); 1892 1893 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1894 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1895 for (phy_id = 0; phy_id < tm->num_of_bus_per_interface; 1896 phy_id++) { 1897 VALIDATE_ACTIVE(tm->bus_act_mask, phy_id); 1898 CHECK_STATUS(ddr3_tip_bus_write 1899 (dev_num, ACCESS_TYPE_UNICAST, 1900 if_id, ACCESS_TYPE_UNICAST, 1901 phy_id, DDR_PHY_DATA, 1902 WL_PHY_REG + 1903 CS_REG_VALUE(effective_cs), 1904 phy_reg0_val)); 1905 CHECK_STATUS(ddr3_tip_bus_write 1906 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1907 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1908 RL_PHY_REG + CS_REG_VALUE(effective_cs), 1909 phy_reg2_val)); 1910 CHECK_STATUS(ddr3_tip_bus_write 1911 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1912 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1913 READ_CENTRALIZATION_PHY_REG + 1914 CS_REG_VALUE(effective_cs), phy_reg3_val)); 1915 CHECK_STATUS(ddr3_tip_bus_write 1916 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1917 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1918 WRITE_CENTRALIZATION_PHY_REG + 1919 CS_REG_VALUE(effective_cs), phy_reg3_val)); 1920 } 1921 } 1922 1923 /* Set Receiver Calibration value */ 1924 for (cs = 0; cs < MAX_CS_NUM; cs++) { 1925 /* PHY register 0xdb bits[5:0] - configure to 63 */ 1926 CHECK_STATUS(ddr3_tip_bus_write 1927 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1928 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1929 DDR_PHY_DATA, CSN_IOB_VREF_REG(cs), 63)); 1930 } 1931 1932 return MV_OK; 1933 } 1934 1935 /* 1936 * Restore Dunit registers 1937 */ 1938 int ddr3_tip_restore_dunit_regs(u32 dev_num) 1939 { 1940 u32 index_cnt; 1941 1942 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1943 PARAM_NOT_CARE, CALIB_MACHINE_CTRL_REG, 1944 0x1, 0x1)); 1945 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1946 PARAM_NOT_CARE, CALIB_MACHINE_CTRL_REG, 1947 calibration_update_control << 3, 1948 0x3 << 3)); 1949 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1950 PARAM_NOT_CARE, 1951 ODPG_WRITE_READ_MODE_ENABLE_REG, 1952 0xffff, MASK_ALL_BITS)); 1953 1954 for (index_cnt = 0; index_cnt < ARRAY_SIZE(odpg_default_value); 1955 index_cnt++) { 1956 CHECK_STATUS(ddr3_tip_if_write 1957 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1958 odpg_default_value[index_cnt].reg_addr, 1959 odpg_default_value[index_cnt].reg_data, 1960 odpg_default_value[index_cnt].reg_mask)); 1961 } 1962 1963 return MV_OK; 1964 } 1965 1966 /* 1967 * Auto tune main flow 1968 */ 1969 static int ddr3_tip_ddr3_training_main_flow(u32 dev_num) 1970 { 1971 enum hws_ddr_freq freq = init_freq; 1972 struct init_cntr_param init_cntr_prm; 1973 int ret = MV_OK; 1974 u32 if_id; 1975 u32 max_cs = hws_ddr3_tip_max_cs_get(); 1976 struct hws_topology_map *tm = ddr3_get_topology_map(); 1977 1978 #ifndef EXCLUDE_SWITCH_DEBUG 1979 if (debug_training == DEBUG_LEVEL_TRACE) { 1980 CHECK_STATUS(print_device_info((u8)dev_num)); 1981 } 1982 #endif 1983 1984 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 1985 CHECK_STATUS(ddr3_tip_ddr3_reset_phy_regs(dev_num)); 1986 } 1987 /* Set to 0 after each loop to avoid illegal value may be used */ 1988 effective_cs = 0; 1989 1990 freq = init_freq; 1991 if (is_pll_before_init != 0) { 1992 for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { 1993 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1994 config_func_info[dev_num].tip_set_freq_divider_func( 1995 (u8)dev_num, if_id, freq); 1996 } 1997 } 1998 1999 if (is_adll_calib_before_init != 0) { 2000 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2001 ("with adll calib before init\n")); 2002 adll_calibration(dev_num, ACCESS_TYPE_MULTICAST, 0, freq); 2003 } 2004 2005 if (is_reg_dump != 0) { 2006 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2007 ("Dump before init controller\n")); 2008 ddr3_tip_reg_dump(dev_num); 2009 } 2010 2011 if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) { 2012 training_stage = INIT_CONTROLLER; 2013 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2014 ("INIT_CONTROLLER_MASK_BIT\n")); 2015 init_cntr_prm.do_mrs_phy = 1; 2016 init_cntr_prm.is_ctrl64_bit = 0; 2017 init_cntr_prm.init_phy = 1; 2018 init_cntr_prm.msys_init = 0; 2019 ret = hws_ddr3_tip_init_controller(dev_num, &init_cntr_prm); 2020 if (is_reg_dump != 0) 2021 ddr3_tip_reg_dump(dev_num); 2022 if (ret != MV_OK) { 2023 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2024 ("hws_ddr3_tip_init_controller failure\n")); 2025 if (debug_mode == 0) 2026 return MV_FAIL; 2027 } 2028 } 2029 2030 #ifdef STATIC_ALGO_SUPPORT 2031 if (mask_tune_func & STATIC_LEVELING_MASK_BIT) { 2032 training_stage = STATIC_LEVELING; 2033 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2034 ("STATIC_LEVELING_MASK_BIT\n")); 2035 ret = ddr3_tip_run_static_alg(dev_num, freq); 2036 if (is_reg_dump != 0) 2037 ddr3_tip_reg_dump(dev_num); 2038 if (ret != MV_OK) { 2039 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2040 ("ddr3_tip_run_static_alg failure\n")); 2041 if (debug_mode == 0) 2042 return MV_FAIL; 2043 } 2044 } 2045 #endif 2046 2047 if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) { 2048 training_stage = SET_LOW_FREQ; 2049 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2050 ("SET_LOW_FREQ_MASK_BIT %d\n", 2051 freq_val[low_freq])); 2052 ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2053 PARAM_NOT_CARE, low_freq); 2054 if (is_reg_dump != 0) 2055 ddr3_tip_reg_dump(dev_num); 2056 if (ret != MV_OK) { 2057 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2058 ("ddr3_tip_freq_set failure\n")); 2059 if (debug_mode == 0) 2060 return MV_FAIL; 2061 } 2062 } 2063 2064 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2065 if (mask_tune_func & LOAD_PATTERN_MASK_BIT) { 2066 training_stage = LOAD_PATTERN; 2067 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2068 ("LOAD_PATTERN_MASK_BIT #%d\n", 2069 effective_cs)); 2070 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2071 if (is_reg_dump != 0) 2072 ddr3_tip_reg_dump(dev_num); 2073 if (ret != MV_OK) { 2074 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2075 ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n", 2076 effective_cs)); 2077 if (debug_mode == 0) 2078 return MV_FAIL; 2079 } 2080 } 2081 } 2082 /* Set to 0 after each loop to avoid illegal value may be used */ 2083 effective_cs = 0; 2084 2085 if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) { 2086 training_stage = SET_MEDIUM_FREQ; 2087 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2088 ("SET_MEDIUM_FREQ_MASK_BIT %d\n", 2089 freq_val[medium_freq])); 2090 ret = 2091 ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2092 PARAM_NOT_CARE, medium_freq); 2093 if (is_reg_dump != 0) 2094 ddr3_tip_reg_dump(dev_num); 2095 if (ret != MV_OK) { 2096 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2097 ("ddr3_tip_freq_set failure\n")); 2098 if (debug_mode == 0) 2099 return MV_FAIL; 2100 } 2101 } 2102 2103 if (mask_tune_func & WRITE_LEVELING_MASK_BIT) { 2104 training_stage = WRITE_LEVELING; 2105 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2106 ("WRITE_LEVELING_MASK_BIT\n")); 2107 if ((rl_mid_freq_wa == 0) || (freq_val[medium_freq] == 533)) { 2108 ret = ddr3_tip_dynamic_write_leveling(dev_num); 2109 } else { 2110 /* Use old WL */ 2111 ret = ddr3_tip_legacy_dynamic_write_leveling(dev_num); 2112 } 2113 2114 if (is_reg_dump != 0) 2115 ddr3_tip_reg_dump(dev_num); 2116 if (ret != MV_OK) { 2117 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2118 ("ddr3_tip_dynamic_write_leveling failure\n")); 2119 if (debug_mode == 0) 2120 return MV_FAIL; 2121 } 2122 } 2123 2124 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2125 if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) { 2126 training_stage = LOAD_PATTERN_2; 2127 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2128 ("LOAD_PATTERN_2_MASK_BIT CS #%d\n", 2129 effective_cs)); 2130 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2131 if (is_reg_dump != 0) 2132 ddr3_tip_reg_dump(dev_num); 2133 if (ret != MV_OK) { 2134 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2135 ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n", 2136 effective_cs)); 2137 if (debug_mode == 0) 2138 return MV_FAIL; 2139 } 2140 } 2141 } 2142 /* Set to 0 after each loop to avoid illegal value may be used */ 2143 effective_cs = 0; 2144 2145 if (mask_tune_func & READ_LEVELING_MASK_BIT) { 2146 training_stage = READ_LEVELING; 2147 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2148 ("READ_LEVELING_MASK_BIT\n")); 2149 if ((rl_mid_freq_wa == 0) || (freq_val[medium_freq] == 533)) { 2150 ret = ddr3_tip_dynamic_read_leveling(dev_num, medium_freq); 2151 } else { 2152 /* Use old RL */ 2153 ret = ddr3_tip_legacy_dynamic_read_leveling(dev_num); 2154 } 2155 2156 if (is_reg_dump != 0) 2157 ddr3_tip_reg_dump(dev_num); 2158 if (ret != MV_OK) { 2159 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2160 ("ddr3_tip_dynamic_read_leveling failure\n")); 2161 if (debug_mode == 0) 2162 return MV_FAIL; 2163 } 2164 } 2165 2166 if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) { 2167 training_stage = WRITE_LEVELING_SUPP; 2168 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2169 ("WRITE_LEVELING_SUPP_MASK_BIT\n")); 2170 ret = ddr3_tip_dynamic_write_leveling_supp(dev_num); 2171 if (is_reg_dump != 0) 2172 ddr3_tip_reg_dump(dev_num); 2173 if (ret != MV_OK) { 2174 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2175 ("ddr3_tip_dynamic_write_leveling_supp failure\n")); 2176 if (debug_mode == 0) 2177 return MV_FAIL; 2178 } 2179 } 2180 2181 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2182 if (mask_tune_func & PBS_RX_MASK_BIT) { 2183 training_stage = PBS_RX; 2184 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2185 ("PBS_RX_MASK_BIT CS #%d\n", 2186 effective_cs)); 2187 ret = ddr3_tip_pbs_rx(dev_num); 2188 if (is_reg_dump != 0) 2189 ddr3_tip_reg_dump(dev_num); 2190 if (ret != MV_OK) { 2191 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2192 ("ddr3_tip_pbs_rx failure CS #%d\n", 2193 effective_cs)); 2194 if (debug_mode == 0) 2195 return MV_FAIL; 2196 } 2197 } 2198 } 2199 2200 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2201 if (mask_tune_func & PBS_TX_MASK_BIT) { 2202 training_stage = PBS_TX; 2203 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2204 ("PBS_TX_MASK_BIT CS #%d\n", 2205 effective_cs)); 2206 ret = ddr3_tip_pbs_tx(dev_num); 2207 if (is_reg_dump != 0) 2208 ddr3_tip_reg_dump(dev_num); 2209 if (ret != MV_OK) { 2210 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2211 ("ddr3_tip_pbs_tx failure CS #%d\n", 2212 effective_cs)); 2213 if (debug_mode == 0) 2214 return MV_FAIL; 2215 } 2216 } 2217 } 2218 /* Set to 0 after each loop to avoid illegal value may be used */ 2219 effective_cs = 0; 2220 2221 if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) { 2222 training_stage = SET_TARGET_FREQ; 2223 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2224 ("SET_TARGET_FREQ_MASK_BIT %d\n", 2225 freq_val[tm-> 2226 interface_params[first_active_if]. 2227 memory_freq])); 2228 ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2229 PARAM_NOT_CARE, 2230 tm->interface_params[first_active_if]. 2231 memory_freq); 2232 if (is_reg_dump != 0) 2233 ddr3_tip_reg_dump(dev_num); 2234 if (ret != MV_OK) { 2235 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2236 ("ddr3_tip_freq_set failure\n")); 2237 if (debug_mode == 0) 2238 return MV_FAIL; 2239 } 2240 } 2241 2242 if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) { 2243 training_stage = WRITE_LEVELING_TF; 2244 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2245 ("WRITE_LEVELING_TF_MASK_BIT\n")); 2246 ret = ddr3_tip_dynamic_write_leveling(dev_num); 2247 if (is_reg_dump != 0) 2248 ddr3_tip_reg_dump(dev_num); 2249 if (ret != MV_OK) { 2250 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2251 ("ddr3_tip_dynamic_write_leveling TF failure\n")); 2252 if (debug_mode == 0) 2253 return MV_FAIL; 2254 } 2255 } 2256 2257 if (mask_tune_func & LOAD_PATTERN_HIGH_MASK_BIT) { 2258 training_stage = LOAD_PATTERN_HIGH; 2259 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("LOAD_PATTERN_HIGH\n")); 2260 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2261 if (is_reg_dump != 0) 2262 ddr3_tip_reg_dump(dev_num); 2263 if (ret != MV_OK) { 2264 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2265 ("ddr3_tip_load_all_pattern_to_mem failure\n")); 2266 if (debug_mode == 0) 2267 return MV_FAIL; 2268 } 2269 } 2270 2271 if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) { 2272 training_stage = READ_LEVELING_TF; 2273 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2274 ("READ_LEVELING_TF_MASK_BIT\n")); 2275 ret = ddr3_tip_dynamic_read_leveling(dev_num, tm-> 2276 interface_params[first_active_if]. 2277 memory_freq); 2278 if (is_reg_dump != 0) 2279 ddr3_tip_reg_dump(dev_num); 2280 if (ret != MV_OK) { 2281 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2282 ("ddr3_tip_dynamic_read_leveling TF failure\n")); 2283 if (debug_mode == 0) 2284 return MV_FAIL; 2285 } 2286 } 2287 2288 if (mask_tune_func & DM_PBS_TX_MASK_BIT) { 2289 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("DM_PBS_TX_MASK_BIT\n")); 2290 } 2291 2292 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2293 if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) { 2294 training_stage = VREF_CALIBRATION; 2295 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("VREF\n")); 2296 ret = ddr3_tip_vref(dev_num); 2297 if (is_reg_dump != 0) { 2298 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2299 ("VREF Dump\n")); 2300 ddr3_tip_reg_dump(dev_num); 2301 } 2302 if (ret != MV_OK) { 2303 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2304 ("ddr3_tip_vref failure\n")); 2305 if (debug_mode == 0) 2306 return MV_FAIL; 2307 } 2308 } 2309 } 2310 /* Set to 0 after each loop to avoid illegal value may be used */ 2311 effective_cs = 0; 2312 2313 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2314 if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) { 2315 training_stage = CENTRALIZATION_RX; 2316 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2317 ("CENTRALIZATION_RX_MASK_BIT CS #%d\n", 2318 effective_cs)); 2319 ret = ddr3_tip_centralization_rx(dev_num); 2320 if (is_reg_dump != 0) 2321 ddr3_tip_reg_dump(dev_num); 2322 if (ret != MV_OK) { 2323 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2324 ("ddr3_tip_centralization_rx failure CS #%d\n", 2325 effective_cs)); 2326 if (debug_mode == 0) 2327 return MV_FAIL; 2328 } 2329 } 2330 } 2331 /* Set to 0 after each loop to avoid illegal value may be used */ 2332 effective_cs = 0; 2333 2334 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2335 if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) { 2336 training_stage = WRITE_LEVELING_SUPP_TF; 2337 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2338 ("WRITE_LEVELING_SUPP_TF_MASK_BIT CS #%d\n", 2339 effective_cs)); 2340 ret = ddr3_tip_dynamic_write_leveling_supp(dev_num); 2341 if (is_reg_dump != 0) 2342 ddr3_tip_reg_dump(dev_num); 2343 if (ret != MV_OK) { 2344 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2345 ("ddr3_tip_dynamic_write_leveling_supp TF failure CS #%d\n", 2346 effective_cs)); 2347 if (debug_mode == 0) 2348 return MV_FAIL; 2349 } 2350 } 2351 } 2352 /* Set to 0 after each loop to avoid illegal value may be used */ 2353 effective_cs = 0; 2354 2355 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2356 if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) { 2357 training_stage = CENTRALIZATION_TX; 2358 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2359 ("CENTRALIZATION_TX_MASK_BIT CS #%d\n", 2360 effective_cs)); 2361 ret = ddr3_tip_centralization_tx(dev_num); 2362 if (is_reg_dump != 0) 2363 ddr3_tip_reg_dump(dev_num); 2364 if (ret != MV_OK) { 2365 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2366 ("ddr3_tip_centralization_tx failure CS #%d\n", 2367 effective_cs)); 2368 if (debug_mode == 0) 2369 return MV_FAIL; 2370 } 2371 } 2372 } 2373 /* Set to 0 after each loop to avoid illegal value may be used */ 2374 effective_cs = 0; 2375 2376 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("restore registers to default\n")); 2377 /* restore register values */ 2378 CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num)); 2379 2380 if (is_reg_dump != 0) 2381 ddr3_tip_reg_dump(dev_num); 2382 2383 return MV_OK; 2384 } 2385 2386 /* 2387 * DDR3 Dynamic training flow 2388 */ 2389 static int ddr3_tip_ddr3_auto_tune(u32 dev_num) 2390 { 2391 u32 if_id, stage, ret; 2392 int is_if_fail = 0, is_auto_tune_fail = 0; 2393 2394 training_stage = INIT_CONTROLLER; 2395 2396 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2397 for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) 2398 training_result[stage][if_id] = NO_TEST_DONE; 2399 } 2400 2401 ret = ddr3_tip_ddr3_training_main_flow(dev_num); 2402 2403 /* activate XSB test */ 2404 if (xsb_validate_type != 0) { 2405 run_xsb_test(dev_num, xsb_validation_base_address, 1, 1, 2406 0x1024); 2407 } 2408 2409 if (is_reg_dump != 0) 2410 ddr3_tip_reg_dump(dev_num); 2411 2412 /* print log */ 2413 CHECK_STATUS(ddr3_tip_print_log(dev_num, window_mem_addr)); 2414 2415 if (ret != MV_OK) { 2416 CHECK_STATUS(ddr3_tip_print_stability_log(dev_num)); 2417 } 2418 2419 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2420 is_if_fail = 0; 2421 for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) { 2422 if (training_result[stage][if_id] == TEST_FAILED) 2423 is_if_fail = 1; 2424 } 2425 if (is_if_fail == 1) { 2426 is_auto_tune_fail = 1; 2427 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2428 ("Auto Tune failed for IF %d\n", 2429 if_id)); 2430 } 2431 } 2432 2433 if ((ret == MV_FAIL) || (is_auto_tune_fail == 1)) 2434 return MV_FAIL; 2435 else 2436 return MV_OK; 2437 } 2438 2439 /* 2440 * Enable init sequence 2441 */ 2442 int ddr3_tip_enable_init_sequence(u32 dev_num) 2443 { 2444 int is_fail = 0; 2445 u32 if_id = 0, mem_mask = 0, bus_index = 0; 2446 struct hws_topology_map *tm = ddr3_get_topology_map(); 2447 2448 /* Enable init sequence */ 2449 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 0, 2450 SDRAM_INIT_CONTROL_REG, 0x1, 0x1)); 2451 2452 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2453 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 2454 2455 if (ddr3_tip_if_polling 2456 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1, 2457 SDRAM_INIT_CONTROL_REG, 2458 MAX_POLLING_ITERATIONS) != MV_OK) { 2459 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2460 ("polling failed IF %d\n", 2461 if_id)); 2462 is_fail = 1; 2463 continue; 2464 } 2465 2466 mem_mask = 0; 2467 for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 2468 bus_index++) { 2469 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 2470 mem_mask |= 2471 tm->interface_params[if_id]. 2472 as_bus_params[bus_index].mirror_enable_bitmask; 2473 } 2474 2475 if (mem_mask != 0) { 2476 /* Disable Multi CS */ 2477 CHECK_STATUS(ddr3_tip_if_write 2478 (dev_num, ACCESS_TYPE_MULTICAST, 2479 if_id, CS_ENABLE_REG, 1 << 3, 2480 1 << 3)); 2481 } 2482 } 2483 2484 return (is_fail == 0) ? MV_OK : MV_FAIL; 2485 } 2486 2487 int ddr3_tip_register_dq_table(u32 dev_num, u32 *table) 2488 { 2489 dq_map_table = table; 2490 2491 return MV_OK; 2492 } 2493 2494 /* 2495 * Check if pup search is locked 2496 */ 2497 int ddr3_tip_is_pup_lock(u32 *pup_buf, enum hws_training_result read_mode) 2498 { 2499 u32 bit_start = 0, bit_end = 0, bit_id; 2500 2501 if (read_mode == RESULT_PER_BIT) { 2502 bit_start = 0; 2503 bit_end = BUS_WIDTH_IN_BITS - 1; 2504 } else { 2505 bit_start = 0; 2506 bit_end = 0; 2507 } 2508 2509 for (bit_id = bit_start; bit_id <= bit_end; bit_id++) { 2510 if (GET_LOCK_RESULT(pup_buf[bit_id]) == 0) 2511 return 0; 2512 } 2513 2514 return 1; 2515 } 2516 2517 /* 2518 * Get minimum buffer value 2519 */ 2520 u8 ddr3_tip_get_buf_min(u8 *buf_ptr) 2521 { 2522 u8 min_val = 0xff; 2523 u8 cnt = 0; 2524 2525 for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) { 2526 if (buf_ptr[cnt] < min_val) 2527 min_val = buf_ptr[cnt]; 2528 } 2529 2530 return min_val; 2531 } 2532 2533 /* 2534 * Get maximum buffer value 2535 */ 2536 u8 ddr3_tip_get_buf_max(u8 *buf_ptr) 2537 { 2538 u8 max_val = 0; 2539 u8 cnt = 0; 2540 2541 for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) { 2542 if (buf_ptr[cnt] > max_val) 2543 max_val = buf_ptr[cnt]; 2544 } 2545 2546 return max_val; 2547 } 2548 2549 /* 2550 * The following functions return memory parameters: 2551 * bus and device width, device size 2552 */ 2553 2554 u32 hws_ddr3_get_bus_width(void) 2555 { 2556 struct hws_topology_map *tm = ddr3_get_topology_map(); 2557 2558 return (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == 2559 1) ? 16 : 32; 2560 } 2561 2562 u32 hws_ddr3_get_device_width(u32 if_id) 2563 { 2564 struct hws_topology_map *tm = ddr3_get_topology_map(); 2565 2566 return (tm->interface_params[if_id].bus_width == 2567 BUS_WIDTH_8) ? 8 : 16; 2568 } 2569 2570 u32 hws_ddr3_get_device_size(u32 if_id) 2571 { 2572 struct hws_topology_map *tm = ddr3_get_topology_map(); 2573 2574 if (tm->interface_params[if_id].memory_size >= 2575 MEM_SIZE_LAST) { 2576 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2577 ("Error: Wrong device size of Cs: %d", 2578 tm->interface_params[if_id].memory_size)); 2579 return 0; 2580 } else { 2581 return 1 << tm->interface_params[if_id].memory_size; 2582 } 2583 } 2584 2585 int hws_ddr3_calc_mem_cs_size(u32 if_id, u32 cs, u32 *cs_size) 2586 { 2587 u32 cs_mem_size, dev_size; 2588 2589 dev_size = hws_ddr3_get_device_size(if_id); 2590 if (dev_size != 0) { 2591 cs_mem_size = ((hws_ddr3_get_bus_width() / 2592 hws_ddr3_get_device_width(if_id)) * dev_size); 2593 2594 /* the calculated result in Gbytex16 to avoid float using */ 2595 2596 if (cs_mem_size == 2) { 2597 *cs_size = _128M; 2598 } else if (cs_mem_size == 4) { 2599 *cs_size = _256M; 2600 } else if (cs_mem_size == 8) { 2601 *cs_size = _512M; 2602 } else if (cs_mem_size == 16) { 2603 *cs_size = _1G; 2604 } else if (cs_mem_size == 32) { 2605 *cs_size = _2G; 2606 } else { 2607 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2608 ("Error: Wrong Memory size of Cs: %d", cs)); 2609 return MV_FAIL; 2610 } 2611 return MV_OK; 2612 } else { 2613 return MV_FAIL; 2614 } 2615 } 2616 2617 int hws_ddr3_cs_base_adr_calc(u32 if_id, u32 cs, u32 *cs_base_addr) 2618 { 2619 u32 cs_mem_size = 0; 2620 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE 2621 u32 physical_mem_size; 2622 u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE; 2623 #endif 2624 2625 if (hws_ddr3_calc_mem_cs_size(if_id, cs, &cs_mem_size) != MV_OK) 2626 return MV_FAIL; 2627 2628 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE 2629 struct hws_topology_map *tm = ddr3_get_topology_map(); 2630 /* 2631 * if number of address pins doesn't allow to use max mem size that 2632 * is defined in topology mem size is defined by 2633 * DEVICE_MAX_DRAM_ADDRESS_SIZE 2634 */ 2635 physical_mem_size = 2636 mv_hwsmem_size[tm->interface_params[0].memory_size]; 2637 2638 if (hws_ddr3_get_device_width(cs) == 16) { 2639 /* 2640 * 16bit mem device can be twice more - no need in less 2641 * significant pin 2642 */ 2643 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2; 2644 } 2645 2646 if (physical_mem_size > max_mem_size) { 2647 cs_mem_size = max_mem_size * 2648 (hws_ddr3_get_bus_width() / 2649 hws_ddr3_get_device_width(if_id)); 2650 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2651 ("Updated Physical Mem size is from 0x%x to %x\n", 2652 physical_mem_size, 2653 DEVICE_MAX_DRAM_ADDRESS_SIZE)); 2654 } 2655 #endif 2656 2657 /* calculate CS base addr */ 2658 *cs_base_addr = ((cs_mem_size) * cs) & 0xffff0000; 2659 2660 return MV_OK; 2661 } 2662