1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Marvell International Ltd. and its affiliates 4 */ 5 6 #include <common.h> 7 #include <spl.h> 8 #include <asm/io.h> 9 #include <asm/arch/cpu.h> 10 #include <asm/arch/soc.h> 11 12 #include "ddr3_init.h" 13 14 #define GET_MAX_VALUE(x, y) \ 15 ((x) > (y)) ? (x) : (y) 16 #define CEIL_DIVIDE(x, y) \ 17 ((x - (x / y) * y) == 0) ? ((x / y) - 1) : (x / y) 18 19 #define TIME_2_CLOCK_CYCLES CEIL_DIVIDE 20 21 #define GET_CS_FROM_MASK(mask) (cs_mask2_num[mask]) 22 #define CS_CBE_VALUE(cs_num) (cs_cbe_reg[cs_num]) 23 24 #define TIMES_9_TREFI_CYCLES 0x8 25 26 u32 window_mem_addr = 0; 27 u32 phy_reg0_val = 0; 28 u32 phy_reg1_val = 8; 29 u32 phy_reg2_val = 0; 30 u32 phy_reg3_val = 0xa; 31 enum hws_ddr_freq init_freq = DDR_FREQ_667; 32 enum hws_ddr_freq low_freq = DDR_FREQ_LOW_FREQ; 33 enum hws_ddr_freq medium_freq; 34 u32 debug_dunit = 0; 35 u32 odt_additional = 1; 36 u32 *dq_map_table = NULL; 37 u32 odt_config = 1; 38 39 #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ALLEYCAT3) || \ 40 defined(CONFIG_ARMADA_39X) 41 u32 is_pll_before_init = 0, is_adll_calib_before_init = 0, is_dfs_in_init = 0; 42 u32 dfs_low_freq = 130; 43 #else 44 u32 is_pll_before_init = 0, is_adll_calib_before_init = 1, is_dfs_in_init = 0; 45 u32 dfs_low_freq = 100; 46 #endif 47 u32 g_rtt_nom_c_s0, g_rtt_nom_c_s1; 48 u8 calibration_update_control; /* 2 external only, 1 is internal only */ 49 50 enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM]; 51 enum auto_tune_stage training_stage = INIT_CONTROLLER; 52 u32 finger_test = 0, p_finger_start = 11, p_finger_end = 64, 53 n_finger_start = 11, n_finger_end = 64, 54 p_finger_step = 3, n_finger_step = 3; 55 u32 clamp_tbl[] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }; 56 57 /* Initiate to 0xff, this variable is define by user in debug mode */ 58 u32 mode2_t = 0xff; 59 u32 xsb_validate_type = 0; 60 u32 xsb_validation_base_address = 0xf000; 61 u32 first_active_if = 0; 62 u32 dfs_low_phy1 = 0x1f; 63 u32 multicast_id = 0; 64 int use_broadcast = 0; 65 struct hws_tip_freq_config_info *freq_info_table = NULL; 66 u8 is_cbe_required = 0; 67 u32 debug_mode = 0; 68 u32 delay_enable = 0; 69 int rl_mid_freq_wa = 0; 70 71 u32 effective_cs = 0; 72 73 u32 mask_tune_func = (SET_MEDIUM_FREQ_MASK_BIT | 74 WRITE_LEVELING_MASK_BIT | 75 LOAD_PATTERN_2_MASK_BIT | 76 READ_LEVELING_MASK_BIT | 77 SET_TARGET_FREQ_MASK_BIT | WRITE_LEVELING_TF_MASK_BIT | 78 READ_LEVELING_TF_MASK_BIT | 79 CENTRALIZATION_RX_MASK_BIT | CENTRALIZATION_TX_MASK_BIT); 80 81 void ddr3_print_version(void) 82 { 83 printf(DDR3_TIP_VERSION_STRING); 84 } 85 86 static int ddr3_tip_ddr3_training_main_flow(u32 dev_num); 87 static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type, 88 u32 if_id, u32 cl_value, u32 cwl_value); 89 static int ddr3_tip_ddr3_auto_tune(u32 dev_num); 90 static int is_bus_access_done(u32 dev_num, u32 if_id, 91 u32 dunit_reg_adrr, u32 bit); 92 #ifdef ODT_TEST_SUPPORT 93 static int odt_test(u32 dev_num, enum hws_algo_type algo_type); 94 #endif 95 96 int adll_calibration(u32 dev_num, enum hws_access_type access_type, 97 u32 if_id, enum hws_ddr_freq frequency); 98 static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type, 99 u32 if_id, enum hws_ddr_freq frequency); 100 101 static struct page_element page_param[] = { 102 /* 103 * 8bits 16 bits 104 * page-size(K) page-size(K) mask 105 */ 106 { 1, 2, 2}, 107 /* 512M */ 108 { 1, 2, 3}, 109 /* 1G */ 110 { 1, 2, 0}, 111 /* 2G */ 112 { 1, 2, 4}, 113 /* 4G */ 114 { 2, 2, 5} 115 /* 8G */ 116 }; 117 118 static u8 mem_size_config[MEM_SIZE_LAST] = { 119 0x2, /* 512Mbit */ 120 0x3, /* 1Gbit */ 121 0x0, /* 2Gbit */ 122 0x4, /* 4Gbit */ 123 0x5 /* 8Gbit */ 124 }; 125 126 static u8 cs_mask2_num[] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 }; 127 128 static struct reg_data odpg_default_value[] = { 129 {0x1034, 0x38000, MASK_ALL_BITS}, 130 {0x1038, 0x0, MASK_ALL_BITS}, 131 {0x10b0, 0x0, MASK_ALL_BITS}, 132 {0x10b8, 0x0, MASK_ALL_BITS}, 133 {0x10c0, 0x0, MASK_ALL_BITS}, 134 {0x10f0, 0x0, MASK_ALL_BITS}, 135 {0x10f4, 0x0, MASK_ALL_BITS}, 136 {0x10f8, 0xff, MASK_ALL_BITS}, 137 {0x10fc, 0xffff, MASK_ALL_BITS}, 138 {0x1130, 0x0, MASK_ALL_BITS}, 139 {0x1830, 0x2000000, MASK_ALL_BITS}, 140 {0x14d0, 0x0, MASK_ALL_BITS}, 141 {0x14d4, 0x0, MASK_ALL_BITS}, 142 {0x14d8, 0x0, MASK_ALL_BITS}, 143 {0x14dc, 0x0, MASK_ALL_BITS}, 144 {0x1454, 0x0, MASK_ALL_BITS}, 145 {0x1594, 0x0, MASK_ALL_BITS}, 146 {0x1598, 0x0, MASK_ALL_BITS}, 147 {0x159c, 0x0, MASK_ALL_BITS}, 148 {0x15a0, 0x0, MASK_ALL_BITS}, 149 {0x15a4, 0x0, MASK_ALL_BITS}, 150 {0x15a8, 0x0, MASK_ALL_BITS}, 151 {0x15ac, 0x0, MASK_ALL_BITS}, 152 {0x1604, 0x0, MASK_ALL_BITS}, 153 {0x1608, 0x0, MASK_ALL_BITS}, 154 {0x160c, 0x0, MASK_ALL_BITS}, 155 {0x1610, 0x0, MASK_ALL_BITS}, 156 {0x1614, 0x0, MASK_ALL_BITS}, 157 {0x1618, 0x0, MASK_ALL_BITS}, 158 {0x1624, 0x0, MASK_ALL_BITS}, 159 {0x1690, 0x0, MASK_ALL_BITS}, 160 {0x1694, 0x0, MASK_ALL_BITS}, 161 {0x1698, 0x0, MASK_ALL_BITS}, 162 {0x169c, 0x0, MASK_ALL_BITS}, 163 {0x14b8, 0x6f67, MASK_ALL_BITS}, 164 {0x1630, 0x0, MASK_ALL_BITS}, 165 {0x1634, 0x0, MASK_ALL_BITS}, 166 {0x1638, 0x0, MASK_ALL_BITS}, 167 {0x163c, 0x0, MASK_ALL_BITS}, 168 {0x16b0, 0x0, MASK_ALL_BITS}, 169 {0x16b4, 0x0, MASK_ALL_BITS}, 170 {0x16b8, 0x0, MASK_ALL_BITS}, 171 {0x16bc, 0x0, MASK_ALL_BITS}, 172 {0x16c0, 0x0, MASK_ALL_BITS}, 173 {0x16c4, 0x0, MASK_ALL_BITS}, 174 {0x16c8, 0x0, MASK_ALL_BITS}, 175 {0x16cc, 0x1, MASK_ALL_BITS}, 176 {0x16f0, 0x1, MASK_ALL_BITS}, 177 {0x16f4, 0x0, MASK_ALL_BITS}, 178 {0x16f8, 0x0, MASK_ALL_BITS}, 179 {0x16fc, 0x0, MASK_ALL_BITS} 180 }; 181 182 static int ddr3_tip_bus_access(u32 dev_num, enum hws_access_type interface_access, 183 u32 if_id, enum hws_access_type phy_access, 184 u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, 185 u32 data_value, enum hws_operation oper_type); 186 static int ddr3_tip_pad_inv(u32 dev_num, u32 if_id); 187 static int ddr3_tip_rank_control(u32 dev_num, u32 if_id); 188 189 /* 190 * Update global training parameters by data from user 191 */ 192 int ddr3_tip_tune_training_params(u32 dev_num, 193 struct tune_train_params *params) 194 { 195 if (params->ck_delay != -1) 196 ck_delay = params->ck_delay; 197 if (params->ck_delay_16 != -1) 198 ck_delay_16 = params->ck_delay_16; 199 if (params->phy_reg3_val != -1) 200 phy_reg3_val = params->phy_reg3_val; 201 202 return MV_OK; 203 } 204 205 /* 206 * Configure CS 207 */ 208 int ddr3_tip_configure_cs(u32 dev_num, u32 if_id, u32 cs_num, u32 enable) 209 { 210 u32 data, addr_hi, data_high; 211 u32 mem_index; 212 struct hws_topology_map *tm = ddr3_get_topology_map(); 213 214 if (enable == 1) { 215 data = (tm->interface_params[if_id].bus_width == 216 BUS_WIDTH_8) ? 0 : 1; 217 CHECK_STATUS(ddr3_tip_if_write 218 (dev_num, ACCESS_TYPE_UNICAST, if_id, 219 SDRAM_ACCESS_CONTROL_REG, (data << (cs_num * 4)), 220 0x3 << (cs_num * 4))); 221 mem_index = tm->interface_params[if_id].memory_size; 222 223 addr_hi = mem_size_config[mem_index] & 0x3; 224 CHECK_STATUS(ddr3_tip_if_write 225 (dev_num, ACCESS_TYPE_UNICAST, if_id, 226 SDRAM_ACCESS_CONTROL_REG, 227 (addr_hi << (2 + cs_num * 4)), 228 0x3 << (2 + cs_num * 4))); 229 230 data_high = (mem_size_config[mem_index] & 0x4) >> 2; 231 CHECK_STATUS(ddr3_tip_if_write 232 (dev_num, ACCESS_TYPE_UNICAST, if_id, 233 SDRAM_ACCESS_CONTROL_REG, 234 data_high << (20 + cs_num), 1 << (20 + cs_num))); 235 236 /* Enable Address Select Mode */ 237 CHECK_STATUS(ddr3_tip_if_write 238 (dev_num, ACCESS_TYPE_UNICAST, if_id, 239 SDRAM_ACCESS_CONTROL_REG, 1 << (16 + cs_num), 240 1 << (16 + cs_num))); 241 } 242 switch (cs_num) { 243 case 0: 244 case 1: 245 case 2: 246 CHECK_STATUS(ddr3_tip_if_write 247 (dev_num, ACCESS_TYPE_UNICAST, if_id, 248 DDR_CONTROL_LOW_REG, (enable << (cs_num + 11)), 249 1 << (cs_num + 11))); 250 break; 251 case 3: 252 CHECK_STATUS(ddr3_tip_if_write 253 (dev_num, ACCESS_TYPE_UNICAST, if_id, 254 DDR_CONTROL_LOW_REG, (enable << 15), 1 << 15)); 255 break; 256 } 257 258 return MV_OK; 259 } 260 261 /* 262 * Calculate number of CS 263 */ 264 static int calc_cs_num(u32 dev_num, u32 if_id, u32 *cs_num) 265 { 266 u32 cs; 267 u32 bus_cnt; 268 u32 cs_count; 269 u32 cs_bitmask; 270 u32 curr_cs_num = 0; 271 struct hws_topology_map *tm = ddr3_get_topology_map(); 272 273 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 274 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 275 cs_count = 0; 276 cs_bitmask = tm->interface_params[if_id]. 277 as_bus_params[bus_cnt].cs_bitmask; 278 for (cs = 0; cs < MAX_CS_NUM; cs++) { 279 if ((cs_bitmask >> cs) & 1) 280 cs_count++; 281 } 282 283 if (curr_cs_num == 0) { 284 curr_cs_num = cs_count; 285 } else if (cs_count != curr_cs_num) { 286 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 287 ("CS number is different per bus (IF %d BUS %d cs_num %d curr_cs_num %d)\n", 288 if_id, bus_cnt, cs_count, 289 curr_cs_num)); 290 return MV_NOT_SUPPORTED; 291 } 292 } 293 *cs_num = curr_cs_num; 294 295 return MV_OK; 296 } 297 298 /* 299 * Init Controller Flow 300 */ 301 int hws_ddr3_tip_init_controller(u32 dev_num, struct init_cntr_param *init_cntr_prm) 302 { 303 u32 if_id; 304 u32 cs_num; 305 u32 t_refi = 0, t_hclk = 0, t_ckclk = 0, t_faw = 0, t_pd = 0, 306 t_wr = 0, t2t = 0, txpdll = 0; 307 u32 data_value = 0, bus_width = 0, page_size = 0, cs_cnt = 0, 308 mem_mask = 0, bus_index = 0; 309 enum hws_speed_bin speed_bin_index = SPEED_BIN_DDR_2133N; 310 enum hws_mem_size memory_size = MEM_2G; 311 enum hws_ddr_freq freq = init_freq; 312 enum hws_timing timing; 313 u32 cs_mask = 0; 314 u32 cl_value = 0, cwl_val = 0; 315 u32 refresh_interval_cnt = 0, bus_cnt = 0, adll_tap = 0; 316 enum hws_access_type access_type = ACCESS_TYPE_UNICAST; 317 u32 data_read[MAX_INTERFACE_NUM]; 318 struct hws_topology_map *tm = ddr3_get_topology_map(); 319 u32 odt_config = g_odt_config_2cs; 320 321 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 322 ("Init_controller, do_mrs_phy=%d, is_ctrl64_bit=%d\n", 323 init_cntr_prm->do_mrs_phy, 324 init_cntr_prm->is_ctrl64_bit)); 325 326 if (init_cntr_prm->init_phy == 1) { 327 CHECK_STATUS(ddr3_tip_configure_phy(dev_num)); 328 } 329 330 if (generic_init_controller == 1) { 331 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 332 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 333 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 334 ("active IF %d\n", if_id)); 335 mem_mask = 0; 336 for (bus_index = 0; 337 bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 338 bus_index++) { 339 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 340 mem_mask |= 341 tm->interface_params[if_id]. 342 as_bus_params[bus_index].mirror_enable_bitmask; 343 } 344 345 if (mem_mask != 0) { 346 CHECK_STATUS(ddr3_tip_if_write 347 (dev_num, ACCESS_TYPE_MULTICAST, 348 if_id, CS_ENABLE_REG, 0, 349 0x8)); 350 } 351 352 memory_size = 353 tm->interface_params[if_id]. 354 memory_size; 355 speed_bin_index = 356 tm->interface_params[if_id]. 357 speed_bin_index; 358 freq = init_freq; 359 t_refi = 360 (tm->interface_params[if_id]. 361 interface_temp == 362 HWS_TEMP_HIGH) ? TREFI_HIGH : TREFI_LOW; 363 t_refi *= 1000; /* psec */ 364 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 365 ("memy_size %d speed_bin_ind %d freq %d t_refi %d\n", 366 memory_size, speed_bin_index, freq, 367 t_refi)); 368 /* HCLK & CK CLK in 2:1[ps] */ 369 /* t_ckclk is external clock */ 370 t_ckclk = (MEGA / freq_val[freq]); 371 /* t_hclk is internal clock */ 372 t_hclk = 2 * t_ckclk; 373 refresh_interval_cnt = t_refi / t_hclk; /* no units */ 374 bus_width = 375 (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) 376 == 1) ? (16) : (32); 377 378 if (init_cntr_prm->is_ctrl64_bit) 379 bus_width = 64; 380 381 data_value = 382 (refresh_interval_cnt | 0x4000 | 383 ((bus_width == 384 32) ? 0x8000 : 0) | 0x1000000) & ~(1 << 26); 385 386 /* Interface Bus Width */ 387 /* SRMode */ 388 CHECK_STATUS(ddr3_tip_if_write 389 (dev_num, access_type, if_id, 390 SDRAM_CONFIGURATION_REG, data_value, 391 0x100ffff)); 392 393 /* Interleave first command pre-charge enable (TBD) */ 394 CHECK_STATUS(ddr3_tip_if_write 395 (dev_num, access_type, if_id, 396 SDRAM_OPEN_PAGE_CONTROL_REG, (1 << 10), 397 (1 << 10))); 398 399 /* PHY configuration */ 400 /* 401 * Postamble Length = 1.5cc, Addresscntl to clk skew 402 * \BD, Preamble length normal, parralal ADLL enable 403 */ 404 CHECK_STATUS(ddr3_tip_if_write 405 (dev_num, access_type, if_id, 406 DRAM_PHY_CONFIGURATION, 0x28, 0x3e)); 407 if (init_cntr_prm->is_ctrl64_bit) { 408 /* positive edge */ 409 CHECK_STATUS(ddr3_tip_if_write 410 (dev_num, access_type, if_id, 411 DRAM_PHY_CONFIGURATION, 0x0, 412 0xff80)); 413 } 414 415 /* calibration block disable */ 416 /* Xbar Read buffer select (for Internal access) */ 417 CHECK_STATUS(ddr3_tip_if_write 418 (dev_num, access_type, if_id, 419 CALIB_MACHINE_CTRL_REG, 0x1200c, 420 0x7dffe01c)); 421 CHECK_STATUS(ddr3_tip_if_write 422 (dev_num, access_type, if_id, 423 CALIB_MACHINE_CTRL_REG, 424 calibration_update_control << 3, 0x3 << 3)); 425 426 /* Pad calibration control - enable */ 427 CHECK_STATUS(ddr3_tip_if_write 428 (dev_num, access_type, if_id, 429 CALIB_MACHINE_CTRL_REG, 0x1, 0x1)); 430 431 cs_mask = 0; 432 data_value = 0x7; 433 /* 434 * Address ctrl \96 Part of the Generic code 435 * The next configuration is done: 436 * 1) Memory Size 437 * 2) Bus_width 438 * 3) CS# 439 * 4) Page Number 440 * 5) t_faw 441 * Per Dunit get from the Map_topology the parameters: 442 * Bus_width 443 * t_faw is per Dunit not per CS 444 */ 445 page_size = 446 (tm->interface_params[if_id]. 447 bus_width == 448 BUS_WIDTH_8) ? page_param[memory_size]. 449 page_size_8bit : page_param[memory_size]. 450 page_size_16bit; 451 452 t_faw = 453 (page_size == 1) ? speed_bin_table(speed_bin_index, 454 SPEED_BIN_TFAW1K) 455 : speed_bin_table(speed_bin_index, 456 SPEED_BIN_TFAW2K); 457 458 data_value = TIME_2_CLOCK_CYCLES(t_faw, t_ckclk); 459 data_value = data_value << 24; 460 CHECK_STATUS(ddr3_tip_if_write 461 (dev_num, access_type, if_id, 462 SDRAM_ACCESS_CONTROL_REG, data_value, 463 0x7f000000)); 464 465 data_value = 466 (tm->interface_params[if_id]. 467 bus_width == BUS_WIDTH_8) ? 0 : 1; 468 469 /* create merge cs mask for all cs available in dunit */ 470 for (bus_cnt = 0; 471 bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); 472 bus_cnt++) { 473 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 474 cs_mask |= 475 tm->interface_params[if_id]. 476 as_bus_params[bus_cnt].cs_bitmask; 477 } 478 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 479 ("Init_controller IF %d cs_mask %d\n", 480 if_id, cs_mask)); 481 /* 482 * Configure the next upon the Map Topology \96 If the 483 * Dunit is CS0 Configure CS0 if it is multi CS 484 * configure them both: The Bust_width it\92s the 485 * Memory Bus width \96 x8 or x16 486 */ 487 for (cs_cnt = 0; cs_cnt < NUM_OF_CS; cs_cnt++) { 488 ddr3_tip_configure_cs(dev_num, if_id, cs_cnt, 489 ((cs_mask & (1 << cs_cnt)) ? 1 490 : 0)); 491 } 492 493 if (init_cntr_prm->do_mrs_phy) { 494 /* 495 * MR0 \96 Part of the Generic code 496 * The next configuration is done: 497 * 1) Burst Length 498 * 2) CAS Latency 499 * get for each dunit what is it Speed_bin & 500 * Target Frequency. From those both parameters 501 * get the appropriate Cas_l from the CL table 502 */ 503 cl_value = 504 tm->interface_params[if_id]. 505 cas_l; 506 cwl_val = 507 tm->interface_params[if_id]. 508 cas_wl; 509 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 510 ("cl_value 0x%x cwl_val 0x%x\n", 511 cl_value, cwl_val)); 512 t_wr = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 513 SPEED_BIN_TWR), 514 t_ckclk); 515 data_value = 516 ((cl_mask_table[cl_value] & 0x1) << 2) | 517 ((cl_mask_table[cl_value] & 0xe) << 3); 518 CHECK_STATUS(ddr3_tip_if_write 519 (dev_num, access_type, if_id, 520 MR0_REG, data_value, 521 (0x7 << 4) | (1 << 2))); 522 CHECK_STATUS(ddr3_tip_if_write 523 (dev_num, access_type, if_id, 524 MR0_REG, twr_mask_table[t_wr + 1] << 9, 525 (0x7 << 9))); 526 527 528 /* 529 * MR1: Set RTT and DIC Design GL values 530 * configured by user 531 */ 532 CHECK_STATUS(ddr3_tip_if_write 533 (dev_num, ACCESS_TYPE_MULTICAST, 534 PARAM_NOT_CARE, MR1_REG, 535 g_dic | g_rtt_nom, 0x266)); 536 537 /* MR2 - Part of the Generic code */ 538 /* 539 * The next configuration is done: 540 * 1) SRT 541 * 2) CAS Write Latency 542 */ 543 data_value = (cwl_mask_table[cwl_val] << 3); 544 data_value |= 545 ((tm->interface_params[if_id]. 546 interface_temp == 547 HWS_TEMP_HIGH) ? (1 << 7) : 0); 548 CHECK_STATUS(ddr3_tip_if_write 549 (dev_num, access_type, if_id, 550 MR2_REG, data_value, 551 (0x7 << 3) | (0x1 << 7) | (0x3 << 552 9))); 553 } 554 555 ddr3_tip_write_odt(dev_num, access_type, if_id, 556 cl_value, cwl_val); 557 ddr3_tip_set_timing(dev_num, access_type, if_id, freq); 558 559 CHECK_STATUS(ddr3_tip_if_write 560 (dev_num, access_type, if_id, 561 DUNIT_CONTROL_HIGH_REG, 0x177, 562 0x1000177)); 563 564 if (init_cntr_prm->is_ctrl64_bit) { 565 /* disable 0.25 cc delay */ 566 CHECK_STATUS(ddr3_tip_if_write 567 (dev_num, access_type, if_id, 568 DUNIT_CONTROL_HIGH_REG, 0x0, 569 0x800)); 570 } 571 572 /* reset bit 7 */ 573 CHECK_STATUS(ddr3_tip_if_write 574 (dev_num, access_type, if_id, 575 DUNIT_CONTROL_HIGH_REG, 576 (init_cntr_prm->msys_init << 7), (1 << 7))); 577 578 /* calculate number of CS (per interface) */ 579 CHECK_STATUS(calc_cs_num 580 (dev_num, if_id, &cs_num)); 581 timing = tm->interface_params[if_id].timing; 582 583 if (mode2_t != 0xff) { 584 t2t = mode2_t; 585 } else if (timing != HWS_TIM_DEFAULT) { 586 /* Board topology map is forcing timing */ 587 t2t = (timing == HWS_TIM_2T) ? 1 : 0; 588 } else { 589 t2t = (cs_num == 1) ? 0 : 1; 590 } 591 592 CHECK_STATUS(ddr3_tip_if_write 593 (dev_num, access_type, if_id, 594 DDR_CONTROL_LOW_REG, t2t << 3, 595 0x3 << 3)); 596 /* move the block to ddr3_tip_set_timing - start */ 597 t_pd = TIMES_9_TREFI_CYCLES; 598 txpdll = GET_MAX_VALUE(t_ckclk * 10, 599 speed_bin_table(speed_bin_index, 600 SPEED_BIN_TXPDLL)); 601 txpdll = CEIL_DIVIDE((txpdll - 1), t_ckclk); 602 CHECK_STATUS(ddr3_tip_if_write 603 (dev_num, access_type, if_id, 604 DDR_TIMING_REG, txpdll << 4 | t_pd, 605 0x1f << 4 | 0xf)); 606 CHECK_STATUS(ddr3_tip_if_write 607 (dev_num, access_type, if_id, 608 DDR_TIMING_REG, 0x28 << 9, 0x3f << 9)); 609 CHECK_STATUS(ddr3_tip_if_write 610 (dev_num, access_type, if_id, 611 DDR_TIMING_REG, 0xa << 21, 0xff << 21)); 612 613 /* move the block to ddr3_tip_set_timing - end */ 614 /* AUTO_ZQC_TIMING */ 615 CHECK_STATUS(ddr3_tip_if_write 616 (dev_num, access_type, if_id, 617 TIMING_REG, (AUTO_ZQC_TIMING | (2 << 20)), 618 0x3fffff)); 619 CHECK_STATUS(ddr3_tip_if_read 620 (dev_num, access_type, if_id, 621 DRAM_PHY_CONFIGURATION, data_read, 0x30)); 622 data_value = 623 (data_read[if_id] == 0) ? (1 << 11) : 0; 624 CHECK_STATUS(ddr3_tip_if_write 625 (dev_num, access_type, if_id, 626 DUNIT_CONTROL_HIGH_REG, data_value, 627 (1 << 11))); 628 629 /* Set Active control for ODT write transactions */ 630 if (cs_num == 1) 631 odt_config = g_odt_config_1cs; 632 CHECK_STATUS(ddr3_tip_if_write 633 (dev_num, ACCESS_TYPE_MULTICAST, 634 PARAM_NOT_CARE, 0x1494, odt_config, 635 MASK_ALL_BITS)); 636 } 637 } else { 638 } 639 640 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 641 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 642 CHECK_STATUS(ddr3_tip_rank_control(dev_num, if_id)); 643 644 if (init_cntr_prm->do_mrs_phy) { 645 CHECK_STATUS(ddr3_tip_pad_inv(dev_num, if_id)); 646 } 647 648 /* Pad calibration control - disable */ 649 CHECK_STATUS(ddr3_tip_if_write 650 (dev_num, access_type, if_id, 651 CALIB_MACHINE_CTRL_REG, 0x0, 0x1)); 652 CHECK_STATUS(ddr3_tip_if_write 653 (dev_num, access_type, if_id, 654 CALIB_MACHINE_CTRL_REG, 655 calibration_update_control << 3, 0x3 << 3)); 656 } 657 658 CHECK_STATUS(ddr3_tip_enable_init_sequence(dev_num)); 659 660 if (delay_enable != 0) { 661 adll_tap = MEGA / (freq_val[freq] * 64); 662 ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap); 663 } 664 665 return MV_OK; 666 } 667 668 /* 669 * Load Topology map 670 */ 671 int hws_ddr3_tip_load_topology_map(u32 dev_num, struct hws_topology_map *tm) 672 { 673 enum hws_speed_bin speed_bin_index; 674 enum hws_ddr_freq freq = DDR_FREQ_LIMIT; 675 u32 if_id; 676 677 freq_val[DDR_FREQ_LOW_FREQ] = dfs_low_freq; 678 tm = ddr3_get_topology_map(); 679 CHECK_STATUS(ddr3_tip_get_first_active_if 680 ((u8)dev_num, tm->if_act_mask, 681 &first_active_if)); 682 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 683 ("board IF_Mask=0x%x num_of_bus_per_interface=0x%x\n", 684 tm->if_act_mask, 685 tm->num_of_bus_per_interface)); 686 687 /* 688 * if CL, CWL values are missing in topology map, then fill them 689 * according to speedbin tables 690 */ 691 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 692 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 693 speed_bin_index = 694 tm->interface_params[if_id].speed_bin_index; 695 /* TBD memory frequency of interface 0 only is used ! */ 696 freq = tm->interface_params[first_active_if].memory_freq; 697 698 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 699 ("speed_bin_index =%d freq=%d cl=%d cwl=%d\n", 700 speed_bin_index, freq_val[freq], 701 tm->interface_params[if_id]. 702 cas_l, 703 tm->interface_params[if_id]. 704 cas_wl)); 705 706 if (tm->interface_params[if_id].cas_l == 0) { 707 tm->interface_params[if_id].cas_l = 708 cas_latency_table[speed_bin_index].cl_val[freq]; 709 } 710 711 if (tm->interface_params[if_id].cas_wl == 0) { 712 tm->interface_params[if_id].cas_wl = 713 cas_write_latency_table[speed_bin_index].cl_val[freq]; 714 } 715 } 716 717 return MV_OK; 718 } 719 720 /* 721 * RANK Control Flow 722 */ 723 static int ddr3_tip_rank_control(u32 dev_num, u32 if_id) 724 { 725 u32 data_value = 0, bus_cnt; 726 struct hws_topology_map *tm = ddr3_get_topology_map(); 727 728 for (bus_cnt = 1; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 729 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 730 if ((tm->interface_params[if_id]. 731 as_bus_params[0].cs_bitmask != 732 tm->interface_params[if_id]. 733 as_bus_params[bus_cnt].cs_bitmask) || 734 (tm->interface_params[if_id]. 735 as_bus_params[0].mirror_enable_bitmask != 736 tm->interface_params[if_id]. 737 as_bus_params[bus_cnt].mirror_enable_bitmask)) 738 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 739 ("WARNING:Wrong configuration for pup #%d CS mask and CS mirroring for all pups should be the same\n", 740 bus_cnt)); 741 } 742 743 data_value |= tm->interface_params[if_id]. 744 as_bus_params[0].cs_bitmask; 745 data_value |= tm->interface_params[if_id]. 746 as_bus_params[0].mirror_enable_bitmask << 4; 747 748 CHECK_STATUS(ddr3_tip_if_write 749 (dev_num, ACCESS_TYPE_UNICAST, if_id, RANK_CTRL_REG, 750 data_value, 0xff)); 751 752 return MV_OK; 753 } 754 755 /* 756 * PAD Inverse Flow 757 */ 758 static int ddr3_tip_pad_inv(u32 dev_num, u32 if_id) 759 { 760 u32 bus_cnt, data_value, ck_swap_pup_ctrl; 761 struct hws_topology_map *tm = ddr3_get_topology_map(); 762 763 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 764 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 765 if (tm->interface_params[if_id]. 766 as_bus_params[bus_cnt].is_dqs_swap == 1) { 767 /* dqs swap */ 768 ddr3_tip_bus_read_modify_write(dev_num, ACCESS_TYPE_UNICAST, 769 if_id, bus_cnt, 770 DDR_PHY_DATA, 771 PHY_CONTROL_PHY_REG, 0xc0, 772 0xc0); 773 } 774 775 if (tm->interface_params[if_id]. 776 as_bus_params[bus_cnt].is_ck_swap == 1) { 777 if (bus_cnt <= 1) 778 data_value = 0x5 << 2; 779 else 780 data_value = 0xa << 2; 781 782 /* mask equals data */ 783 /* ck swap pup is only control pup #0 ! */ 784 ck_swap_pup_ctrl = 0; 785 ddr3_tip_bus_read_modify_write(dev_num, ACCESS_TYPE_UNICAST, 786 if_id, ck_swap_pup_ctrl, 787 DDR_PHY_CONTROL, 788 PHY_CONTROL_PHY_REG, 789 data_value, data_value); 790 } 791 } 792 793 return MV_OK; 794 } 795 796 /* 797 * Run Training Flow 798 */ 799 int hws_ddr3_tip_run_alg(u32 dev_num, enum hws_algo_type algo_type) 800 { 801 int ret = MV_OK, ret_tune = MV_OK; 802 803 #ifdef ODT_TEST_SUPPORT 804 if (finger_test == 1) 805 return odt_test(dev_num, algo_type); 806 #endif 807 808 if (algo_type == ALGO_TYPE_DYNAMIC) { 809 ret = ddr3_tip_ddr3_auto_tune(dev_num); 810 } else { 811 } 812 813 if (ret != MV_OK) { 814 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 815 ("Run_alg: tuning failed %d\n", ret_tune)); 816 } 817 818 return ret; 819 } 820 821 #ifdef ODT_TEST_SUPPORT 822 /* 823 * ODT Test 824 */ 825 static int odt_test(u32 dev_num, enum hws_algo_type algo_type) 826 { 827 int ret = MV_OK, ret_tune = MV_OK; 828 int pfinger_val = 0, nfinger_val; 829 830 for (pfinger_val = p_finger_start; pfinger_val <= p_finger_end; 831 pfinger_val += p_finger_step) { 832 for (nfinger_val = n_finger_start; nfinger_val <= n_finger_end; 833 nfinger_val += n_finger_step) { 834 if (finger_test != 0) { 835 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 836 ("pfinger_val %d nfinger_val %d\n", 837 pfinger_val, nfinger_val)); 838 p_finger = pfinger_val; 839 n_finger = nfinger_val; 840 } 841 842 if (algo_type == ALGO_TYPE_DYNAMIC) { 843 ret = ddr3_tip_ddr3_auto_tune(dev_num); 844 } else { 845 /* 846 * Frequency per interface is not relevant, 847 * only interface 0 848 */ 849 ret = ddr3_tip_run_static_alg(dev_num, 850 init_freq); 851 } 852 } 853 } 854 855 if (ret_tune != MV_OK) { 856 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 857 ("Run_alg: tuning failed %d\n", ret_tune)); 858 ret = (ret == MV_OK) ? ret_tune : ret; 859 } 860 861 return ret; 862 } 863 #endif 864 865 /* 866 * Select Controller 867 */ 868 int hws_ddr3_tip_select_ddr_controller(u32 dev_num, int enable) 869 { 870 if (config_func_info[dev_num].tip_dunit_mux_select_func != NULL) { 871 return config_func_info[dev_num]. 872 tip_dunit_mux_select_func((u8)dev_num, enable); 873 } 874 875 return MV_FAIL; 876 } 877 878 /* 879 * Dunit Register Write 880 */ 881 int ddr3_tip_if_write(u32 dev_num, enum hws_access_type interface_access, 882 u32 if_id, u32 reg_addr, u32 data_value, u32 mask) 883 { 884 if (config_func_info[dev_num].tip_dunit_write_func != NULL) { 885 return config_func_info[dev_num]. 886 tip_dunit_write_func((u8)dev_num, interface_access, 887 if_id, reg_addr, 888 data_value, mask); 889 } 890 891 return MV_FAIL; 892 } 893 894 /* 895 * Dunit Register Read 896 */ 897 int ddr3_tip_if_read(u32 dev_num, enum hws_access_type interface_access, 898 u32 if_id, u32 reg_addr, u32 *data, u32 mask) 899 { 900 if (config_func_info[dev_num].tip_dunit_read_func != NULL) { 901 return config_func_info[dev_num]. 902 tip_dunit_read_func((u8)dev_num, interface_access, 903 if_id, reg_addr, 904 data, mask); 905 } 906 907 return MV_FAIL; 908 } 909 910 /* 911 * Dunit Register Polling 912 */ 913 int ddr3_tip_if_polling(u32 dev_num, enum hws_access_type access_type, 914 u32 if_id, u32 exp_value, u32 mask, u32 offset, 915 u32 poll_tries) 916 { 917 u32 poll_cnt = 0, interface_num = 0, start_if, end_if; 918 u32 read_data[MAX_INTERFACE_NUM]; 919 int ret; 920 int is_fail = 0, is_if_fail; 921 struct hws_topology_map *tm = ddr3_get_topology_map(); 922 923 if (access_type == ACCESS_TYPE_MULTICAST) { 924 start_if = 0; 925 end_if = MAX_INTERFACE_NUM - 1; 926 } else { 927 start_if = if_id; 928 end_if = if_id; 929 } 930 931 for (interface_num = start_if; interface_num <= end_if; interface_num++) { 932 /* polling bit 3 for n times */ 933 VALIDATE_ACTIVE(tm->if_act_mask, interface_num); 934 935 is_if_fail = 0; 936 for (poll_cnt = 0; poll_cnt < poll_tries; poll_cnt++) { 937 ret = 938 ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, 939 interface_num, offset, read_data, 940 mask); 941 if (ret != MV_OK) 942 return ret; 943 944 if (read_data[interface_num] == exp_value) 945 break; 946 } 947 948 if (poll_cnt >= poll_tries) { 949 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 950 ("max poll IF #%d\n", interface_num)); 951 is_fail = 1; 952 is_if_fail = 1; 953 } 954 955 training_result[training_stage][interface_num] = 956 (is_if_fail == 1) ? TEST_FAILED : TEST_SUCCESS; 957 } 958 959 return (is_fail == 0) ? MV_OK : MV_FAIL; 960 } 961 962 /* 963 * Bus read access 964 */ 965 int ddr3_tip_bus_read(u32 dev_num, u32 if_id, 966 enum hws_access_type phy_access, u32 phy_id, 967 enum hws_ddr_phy phy_type, u32 reg_addr, u32 *data) 968 { 969 u32 bus_index = 0; 970 u32 data_read[MAX_INTERFACE_NUM]; 971 struct hws_topology_map *tm = ddr3_get_topology_map(); 972 973 if (phy_access == ACCESS_TYPE_MULTICAST) { 974 for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 975 bus_index++) { 976 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 977 CHECK_STATUS(ddr3_tip_bus_access 978 (dev_num, ACCESS_TYPE_UNICAST, 979 if_id, ACCESS_TYPE_UNICAST, 980 bus_index, phy_type, reg_addr, 0, 981 OPERATION_READ)); 982 CHECK_STATUS(ddr3_tip_if_read 983 (dev_num, ACCESS_TYPE_UNICAST, if_id, 984 PHY_REG_FILE_ACCESS, data_read, 985 MASK_ALL_BITS)); 986 data[bus_index] = (data_read[if_id] & 0xffff); 987 } 988 } else { 989 CHECK_STATUS(ddr3_tip_bus_access 990 (dev_num, ACCESS_TYPE_UNICAST, if_id, 991 phy_access, phy_id, phy_type, reg_addr, 0, 992 OPERATION_READ)); 993 CHECK_STATUS(ddr3_tip_if_read 994 (dev_num, ACCESS_TYPE_UNICAST, if_id, 995 PHY_REG_FILE_ACCESS, data_read, MASK_ALL_BITS)); 996 997 /* 998 * only 16 lsb bit are valid in Phy (each register is different, 999 * some can actually be less than 16 bits) 1000 */ 1001 *data = (data_read[if_id] & 0xffff); 1002 } 1003 1004 return MV_OK; 1005 } 1006 1007 /* 1008 * Bus write access 1009 */ 1010 int ddr3_tip_bus_write(u32 dev_num, enum hws_access_type interface_access, 1011 u32 if_id, enum hws_access_type phy_access, 1012 u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, 1013 u32 data_value) 1014 { 1015 CHECK_STATUS(ddr3_tip_bus_access 1016 (dev_num, interface_access, if_id, phy_access, 1017 phy_id, phy_type, reg_addr, data_value, OPERATION_WRITE)); 1018 1019 return MV_OK; 1020 } 1021 1022 /* 1023 * Bus access routine (relevant for both read & write) 1024 */ 1025 static int ddr3_tip_bus_access(u32 dev_num, enum hws_access_type interface_access, 1026 u32 if_id, enum hws_access_type phy_access, 1027 u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, 1028 u32 data_value, enum hws_operation oper_type) 1029 { 1030 u32 addr_low = 0x3f & reg_addr; 1031 u32 addr_hi = ((0xc0 & reg_addr) >> 6); 1032 u32 data_p1 = 1033 (oper_type << 30) + (addr_hi << 28) + (phy_access << 27) + 1034 (phy_type << 26) + (phy_id << 22) + (addr_low << 16) + 1035 (data_value & 0xffff); 1036 u32 data_p2 = data_p1 + (1 << 31); 1037 u32 start_if, end_if; 1038 struct hws_topology_map *tm = ddr3_get_topology_map(); 1039 1040 CHECK_STATUS(ddr3_tip_if_write 1041 (dev_num, interface_access, if_id, PHY_REG_FILE_ACCESS, 1042 data_p1, MASK_ALL_BITS)); 1043 CHECK_STATUS(ddr3_tip_if_write 1044 (dev_num, interface_access, if_id, PHY_REG_FILE_ACCESS, 1045 data_p2, MASK_ALL_BITS)); 1046 1047 if (interface_access == ACCESS_TYPE_UNICAST) { 1048 start_if = if_id; 1049 end_if = if_id; 1050 } else { 1051 start_if = 0; 1052 end_if = MAX_INTERFACE_NUM - 1; 1053 } 1054 1055 /* polling for read/write execution done */ 1056 for (if_id = start_if; if_id <= end_if; if_id++) { 1057 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1058 CHECK_STATUS(is_bus_access_done 1059 (dev_num, if_id, PHY_REG_FILE_ACCESS, 31)); 1060 } 1061 1062 return MV_OK; 1063 } 1064 1065 /* 1066 * Check bus access done 1067 */ 1068 static int is_bus_access_done(u32 dev_num, u32 if_id, u32 dunit_reg_adrr, 1069 u32 bit) 1070 { 1071 u32 rd_data = 1; 1072 u32 cnt = 0; 1073 u32 data_read[MAX_INTERFACE_NUM]; 1074 1075 CHECK_STATUS(ddr3_tip_if_read 1076 (dev_num, ACCESS_TYPE_UNICAST, if_id, dunit_reg_adrr, 1077 data_read, MASK_ALL_BITS)); 1078 rd_data = data_read[if_id]; 1079 rd_data &= (1 << bit); 1080 1081 while (rd_data != 0) { 1082 if (cnt++ >= MAX_POLLING_ITERATIONS) 1083 break; 1084 1085 CHECK_STATUS(ddr3_tip_if_read 1086 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1087 dunit_reg_adrr, data_read, MASK_ALL_BITS)); 1088 rd_data = data_read[if_id]; 1089 rd_data &= (1 << bit); 1090 } 1091 1092 if (cnt < MAX_POLLING_ITERATIONS) 1093 return MV_OK; 1094 else 1095 return MV_FAIL; 1096 } 1097 1098 /* 1099 * Phy read-modify-write 1100 */ 1101 int ddr3_tip_bus_read_modify_write(u32 dev_num, enum hws_access_type access_type, 1102 u32 interface_id, u32 phy_id, 1103 enum hws_ddr_phy phy_type, u32 reg_addr, 1104 u32 data_value, u32 reg_mask) 1105 { 1106 u32 data_val = 0, if_id, start_if, end_if; 1107 struct hws_topology_map *tm = ddr3_get_topology_map(); 1108 1109 if (access_type == ACCESS_TYPE_MULTICAST) { 1110 start_if = 0; 1111 end_if = MAX_INTERFACE_NUM - 1; 1112 } else { 1113 start_if = interface_id; 1114 end_if = interface_id; 1115 } 1116 1117 for (if_id = start_if; if_id <= end_if; if_id++) { 1118 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1119 CHECK_STATUS(ddr3_tip_bus_read 1120 (dev_num, if_id, ACCESS_TYPE_UNICAST, phy_id, 1121 phy_type, reg_addr, &data_val)); 1122 data_value = (data_val & (~reg_mask)) | (data_value & reg_mask); 1123 CHECK_STATUS(ddr3_tip_bus_write 1124 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1125 ACCESS_TYPE_UNICAST, phy_id, phy_type, reg_addr, 1126 data_value)); 1127 } 1128 1129 return MV_OK; 1130 } 1131 1132 /* 1133 * ADLL Calibration 1134 */ 1135 int adll_calibration(u32 dev_num, enum hws_access_type access_type, 1136 u32 if_id, enum hws_ddr_freq frequency) 1137 { 1138 struct hws_tip_freq_config_info freq_config_info; 1139 u32 bus_cnt = 0; 1140 struct hws_topology_map *tm = ddr3_get_topology_map(); 1141 1142 /* Reset Diver_b assert -> de-assert */ 1143 CHECK_STATUS(ddr3_tip_if_write 1144 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1145 0, 0x10000000)); 1146 mdelay(10); 1147 CHECK_STATUS(ddr3_tip_if_write 1148 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1149 0x10000000, 0x10000000)); 1150 1151 if (config_func_info[dev_num].tip_get_freq_config_info_func != NULL) { 1152 CHECK_STATUS(config_func_info[dev_num]. 1153 tip_get_freq_config_info_func((u8)dev_num, frequency, 1154 &freq_config_info)); 1155 } else { 1156 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1157 ("tip_get_freq_config_info_func is NULL")); 1158 return MV_NOT_INITIALIZED; 1159 } 1160 1161 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) { 1162 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 1163 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1164 (dev_num, access_type, if_id, bus_cnt, 1165 DDR_PHY_DATA, BW_PHY_REG, 1166 freq_config_info.bw_per_freq << 8, 0x700)); 1167 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1168 (dev_num, access_type, if_id, bus_cnt, 1169 DDR_PHY_DATA, RATE_PHY_REG, 1170 freq_config_info.rate_per_freq, 0x7)); 1171 } 1172 1173 /* DUnit to Phy drive post edge, ADLL reset assert de-assert */ 1174 CHECK_STATUS(ddr3_tip_if_write 1175 (dev_num, access_type, if_id, DRAM_PHY_CONFIGURATION, 1176 0, (0x80000000 | 0x40000000))); 1177 mdelay(100 / (freq_val[frequency] / freq_val[DDR_FREQ_LOW_FREQ])); 1178 CHECK_STATUS(ddr3_tip_if_write 1179 (dev_num, access_type, if_id, DRAM_PHY_CONFIGURATION, 1180 (0x80000000 | 0x40000000), (0x80000000 | 0x40000000))); 1181 1182 /* polling for ADLL Done */ 1183 if (ddr3_tip_if_polling(dev_num, access_type, if_id, 1184 0x3ff03ff, 0x3ff03ff, PHY_LOCK_STATUS_REG, 1185 MAX_POLLING_ITERATIONS) != MV_OK) { 1186 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1187 ("Freq_set: DDR3 poll failed(1)")); 1188 } 1189 1190 /* pup data_pup reset assert-> deassert */ 1191 CHECK_STATUS(ddr3_tip_if_write 1192 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1193 0, 0x60000000)); 1194 mdelay(10); 1195 CHECK_STATUS(ddr3_tip_if_write 1196 (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG, 1197 0x60000000, 0x60000000)); 1198 1199 return MV_OK; 1200 } 1201 1202 int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type access_type, 1203 u32 if_id, enum hws_ddr_freq frequency) 1204 { 1205 u32 cl_value = 0, cwl_value = 0, mem_mask = 0, val = 0, 1206 bus_cnt = 0, t_hclk = 0, t_wr = 0, 1207 refresh_interval_cnt = 0, cnt_id; 1208 u32 t_ckclk; 1209 u32 t_refi = 0, end_if, start_if; 1210 u32 bus_index = 0; 1211 int is_dll_off = 0; 1212 enum hws_speed_bin speed_bin_index = 0; 1213 struct hws_tip_freq_config_info freq_config_info; 1214 enum hws_result *flow_result = training_result[training_stage]; 1215 u32 adll_tap = 0; 1216 u32 cs_mask[MAX_INTERFACE_NUM]; 1217 struct hws_topology_map *tm = ddr3_get_topology_map(); 1218 1219 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1220 ("dev %d access %d IF %d freq %d\n", dev_num, 1221 access_type, if_id, frequency)); 1222 1223 if (frequency == DDR_FREQ_LOW_FREQ) 1224 is_dll_off = 1; 1225 if (access_type == ACCESS_TYPE_MULTICAST) { 1226 start_if = 0; 1227 end_if = MAX_INTERFACE_NUM - 1; 1228 } else { 1229 start_if = if_id; 1230 end_if = if_id; 1231 } 1232 1233 /* calculate interface cs mask - Oferb 4/11 */ 1234 /* speed bin can be different for each interface */ 1235 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1236 /* cs enable is active low */ 1237 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1238 cs_mask[if_id] = CS_BIT_MASK; 1239 training_result[training_stage][if_id] = TEST_SUCCESS; 1240 ddr3_tip_calc_cs_mask(dev_num, if_id, effective_cs, 1241 &cs_mask[if_id]); 1242 } 1243 1244 /* speed bin can be different for each interface */ 1245 /* 1246 * moti b - need to remove the loop for multicas access functions 1247 * and loop the unicast access functions 1248 */ 1249 for (if_id = start_if; if_id <= end_if; if_id++) { 1250 if (IS_ACTIVE(tm->if_act_mask, if_id) == 0) 1251 continue; 1252 1253 flow_result[if_id] = TEST_SUCCESS; 1254 speed_bin_index = 1255 tm->interface_params[if_id].speed_bin_index; 1256 if (tm->interface_params[if_id].memory_freq == 1257 frequency) { 1258 cl_value = 1259 tm->interface_params[if_id].cas_l; 1260 cwl_value = 1261 tm->interface_params[if_id].cas_wl; 1262 } else { 1263 cl_value = 1264 cas_latency_table[speed_bin_index].cl_val[frequency]; 1265 cwl_value = 1266 cas_write_latency_table[speed_bin_index]. 1267 cl_val[frequency]; 1268 } 1269 1270 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1271 ("Freq_set dev 0x%x access 0x%x if 0x%x freq 0x%x speed %d:\n\t", 1272 dev_num, access_type, if_id, 1273 frequency, speed_bin_index)); 1274 1275 for (cnt_id = 0; cnt_id < DDR_FREQ_LIMIT; cnt_id++) { 1276 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1277 ("%d ", 1278 cas_latency_table[speed_bin_index]. 1279 cl_val[cnt_id])); 1280 } 1281 1282 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, ("\n")); 1283 mem_mask = 0; 1284 for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 1285 bus_index++) { 1286 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 1287 mem_mask |= 1288 tm->interface_params[if_id]. 1289 as_bus_params[bus_index].mirror_enable_bitmask; 1290 } 1291 1292 if (mem_mask != 0) { 1293 /* motib redundant in KW28 */ 1294 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1295 if_id, 1296 CS_ENABLE_REG, 0, 0x8)); 1297 } 1298 1299 /* dll state after exiting SR */ 1300 if (is_dll_off == 1) { 1301 CHECK_STATUS(ddr3_tip_if_write 1302 (dev_num, access_type, if_id, 1303 DFS_REG, 0x1, 0x1)); 1304 } else { 1305 CHECK_STATUS(ddr3_tip_if_write 1306 (dev_num, access_type, if_id, 1307 DFS_REG, 0, 0x1)); 1308 } 1309 1310 CHECK_STATUS(ddr3_tip_if_write 1311 (dev_num, access_type, if_id, 1312 DUNIT_MMASK_REG, 0, 0x1)); 1313 /* DFS - block transactions */ 1314 CHECK_STATUS(ddr3_tip_if_write 1315 (dev_num, access_type, if_id, 1316 DFS_REG, 0x2, 0x2)); 1317 1318 /* disable ODT in case of dll off */ 1319 if (is_dll_off == 1) { 1320 CHECK_STATUS(ddr3_tip_if_write 1321 (dev_num, access_type, if_id, 1322 0x1874, 0, 0x244)); 1323 CHECK_STATUS(ddr3_tip_if_write 1324 (dev_num, access_type, if_id, 1325 0x1884, 0, 0x244)); 1326 CHECK_STATUS(ddr3_tip_if_write 1327 (dev_num, access_type, if_id, 1328 0x1894, 0, 0x244)); 1329 CHECK_STATUS(ddr3_tip_if_write 1330 (dev_num, access_type, if_id, 1331 0x18a4, 0, 0x244)); 1332 } 1333 1334 /* DFS - Enter Self-Refresh */ 1335 CHECK_STATUS(ddr3_tip_if_write 1336 (dev_num, access_type, if_id, DFS_REG, 0x4, 1337 0x4)); 1338 /* polling on self refresh entry */ 1339 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, 1340 if_id, 0x8, 0x8, DFS_REG, 1341 MAX_POLLING_ITERATIONS) != MV_OK) { 1342 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1343 ("Freq_set: DDR3 poll failed on SR entry\n")); 1344 } 1345 1346 /* PLL configuration */ 1347 if (config_func_info[dev_num].tip_set_freq_divider_func != NULL) { 1348 config_func_info[dev_num]. 1349 tip_set_freq_divider_func(dev_num, if_id, 1350 frequency); 1351 } 1352 1353 /* PLL configuration End */ 1354 1355 /* adjust t_refi to new frequency */ 1356 t_refi = (tm->interface_params[if_id].interface_temp == 1357 HWS_TEMP_HIGH) ? TREFI_HIGH : TREFI_LOW; 1358 t_refi *= 1000; /*psec */ 1359 1360 /* HCLK in[ps] */ 1361 t_hclk = MEGA / (freq_val[frequency] / 2); 1362 refresh_interval_cnt = t_refi / t_hclk; /* no units */ 1363 val = 0x4000 | refresh_interval_cnt; 1364 CHECK_STATUS(ddr3_tip_if_write 1365 (dev_num, access_type, if_id, 1366 SDRAM_CONFIGURATION_REG, val, 0x7fff)); 1367 1368 /* DFS - CL/CWL/WR parameters after exiting SR */ 1369 CHECK_STATUS(ddr3_tip_if_write 1370 (dev_num, access_type, if_id, DFS_REG, 1371 (cl_mask_table[cl_value] << 8), 0xf00)); 1372 CHECK_STATUS(ddr3_tip_if_write 1373 (dev_num, access_type, if_id, DFS_REG, 1374 (cwl_mask_table[cwl_value] << 12), 0x7000)); 1375 1376 t_ckclk = MEGA / freq_val[frequency]; 1377 t_wr = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1378 SPEED_BIN_TWR), 1379 t_ckclk); 1380 1381 CHECK_STATUS(ddr3_tip_if_write 1382 (dev_num, access_type, if_id, DFS_REG, 1383 (twr_mask_table[t_wr + 1] << 16), 0x70000)); 1384 1385 /* Restore original RTT values if returning from DLL OFF mode */ 1386 if (is_dll_off == 1) { 1387 CHECK_STATUS(ddr3_tip_if_write 1388 (dev_num, access_type, if_id, 0x1874, 1389 g_dic | g_rtt_nom, 0x266)); 1390 CHECK_STATUS(ddr3_tip_if_write 1391 (dev_num, access_type, if_id, 0x1884, 1392 g_dic | g_rtt_nom, 0x266)); 1393 CHECK_STATUS(ddr3_tip_if_write 1394 (dev_num, access_type, if_id, 0x1894, 1395 g_dic | g_rtt_nom, 0x266)); 1396 CHECK_STATUS(ddr3_tip_if_write 1397 (dev_num, access_type, if_id, 0x18a4, 1398 g_dic | g_rtt_nom, 0x266)); 1399 } 1400 1401 /* Reset Diver_b assert -> de-assert */ 1402 CHECK_STATUS(ddr3_tip_if_write 1403 (dev_num, access_type, if_id, 1404 SDRAM_CONFIGURATION_REG, 0, 0x10000000)); 1405 mdelay(10); 1406 CHECK_STATUS(ddr3_tip_if_write 1407 (dev_num, access_type, if_id, 1408 SDRAM_CONFIGURATION_REG, 0x10000000, 0x10000000)); 1409 1410 /* Adll configuration function of process and Frequency */ 1411 if (config_func_info[dev_num].tip_get_freq_config_info_func != NULL) { 1412 CHECK_STATUS(config_func_info[dev_num]. 1413 tip_get_freq_config_info_func(dev_num, frequency, 1414 &freq_config_info)); 1415 } 1416 /* TBD check milo5 using device ID ? */ 1417 for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); 1418 bus_cnt++) { 1419 VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt); 1420 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1421 (dev_num, ACCESS_TYPE_UNICAST, 1422 if_id, bus_cnt, DDR_PHY_DATA, 1423 0x92, 1424 freq_config_info. 1425 bw_per_freq << 8 1426 /*freq_mask[dev_num][frequency] << 8 */ 1427 , 0x700)); 1428 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1429 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1430 bus_cnt, DDR_PHY_DATA, 0x94, 1431 freq_config_info.rate_per_freq, 0x7)); 1432 } 1433 1434 /* DUnit to Phy drive post edge, ADLL reset assert de-assert */ 1435 CHECK_STATUS(ddr3_tip_if_write 1436 (dev_num, access_type, if_id, 1437 DRAM_PHY_CONFIGURATION, 0, 1438 (0x80000000 | 0x40000000))); 1439 mdelay(100 / (freq_val[frequency] / freq_val[DDR_FREQ_LOW_FREQ])); 1440 CHECK_STATUS(ddr3_tip_if_write 1441 (dev_num, access_type, if_id, 1442 DRAM_PHY_CONFIGURATION, (0x80000000 | 0x40000000), 1443 (0x80000000 | 0x40000000))); 1444 1445 /* polling for ADLL Done */ 1446 if (ddr3_tip_if_polling 1447 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x3ff03ff, 1448 0x3ff03ff, PHY_LOCK_STATUS_REG, 1449 MAX_POLLING_ITERATIONS) != MV_OK) { 1450 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1451 ("Freq_set: DDR3 poll failed(1)\n")); 1452 } 1453 1454 /* pup data_pup reset assert-> deassert */ 1455 CHECK_STATUS(ddr3_tip_if_write 1456 (dev_num, access_type, if_id, 1457 SDRAM_CONFIGURATION_REG, 0, 0x60000000)); 1458 mdelay(10); 1459 CHECK_STATUS(ddr3_tip_if_write 1460 (dev_num, access_type, if_id, 1461 SDRAM_CONFIGURATION_REG, 0x60000000, 0x60000000)); 1462 1463 /* Set proper timing params before existing Self-Refresh */ 1464 ddr3_tip_set_timing(dev_num, access_type, if_id, frequency); 1465 if (delay_enable != 0) { 1466 adll_tap = MEGA / (freq_val[frequency] * 64); 1467 ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap); 1468 } 1469 1470 /* Exit SR */ 1471 CHECK_STATUS(ddr3_tip_if_write 1472 (dev_num, access_type, if_id, DFS_REG, 0, 1473 0x4)); 1474 if (ddr3_tip_if_polling 1475 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x8, DFS_REG, 1476 MAX_POLLING_ITERATIONS) != MV_OK) { 1477 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1478 ("Freq_set: DDR3 poll failed(2)")); 1479 } 1480 1481 /* Refresh Command */ 1482 CHECK_STATUS(ddr3_tip_if_write 1483 (dev_num, access_type, if_id, 1484 SDRAM_OPERATION_REG, 0x2, 0xf1f)); 1485 if (ddr3_tip_if_polling 1486 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f, 1487 SDRAM_OPERATION_REG, MAX_POLLING_ITERATIONS) != MV_OK) { 1488 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1489 ("Freq_set: DDR3 poll failed(3)")); 1490 } 1491 1492 /* Release DFS Block */ 1493 CHECK_STATUS(ddr3_tip_if_write 1494 (dev_num, access_type, if_id, DFS_REG, 0, 1495 0x2)); 1496 /* Controller to MBUS Retry - normal */ 1497 CHECK_STATUS(ddr3_tip_if_write 1498 (dev_num, access_type, if_id, DUNIT_MMASK_REG, 1499 0x1, 0x1)); 1500 1501 /* MRO: Burst Length 8, CL , Auto_precharge 0x16cc */ 1502 val = 1503 ((cl_mask_table[cl_value] & 0x1) << 2) | 1504 ((cl_mask_table[cl_value] & 0xe) << 3); 1505 CHECK_STATUS(ddr3_tip_if_write 1506 (dev_num, access_type, if_id, MR0_REG, 1507 val, (0x7 << 4) | (1 << 2))); 1508 /* MR2: CWL = 10 , Auto Self-Refresh - disable */ 1509 val = (cwl_mask_table[cwl_value] << 3); 1510 /* 1511 * nklein 24.10.13 - should not be here - leave value as set in 1512 * the init configuration val |= (1 << 9); 1513 * val |= ((tm->interface_params[if_id]. 1514 * interface_temp == HWS_TEMP_HIGH) ? (1 << 7) : 0); 1515 */ 1516 /* nklein 24.10.13 - see above comment */ 1517 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1518 if_id, MR2_REG, 1519 val, (0x7 << 3))); 1520 1521 /* ODT TIMING */ 1522 val = ((cl_value - cwl_value + 1) << 4) | 1523 ((cl_value - cwl_value + 6) << 8) | 1524 ((cl_value - 1) << 12) | ((cl_value + 6) << 16); 1525 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1526 if_id, ODT_TIMING_LOW, 1527 val, 0xffff0)); 1528 val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12); 1529 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1530 if_id, ODT_TIMING_HI_REG, 1531 val, 0xffff)); 1532 1533 /* ODT Active */ 1534 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1535 if_id, 1536 DUNIT_ODT_CONTROL_REG, 1537 0xf, 0xf)); 1538 1539 /* re-write CL */ 1540 val = ((cl_mask_table[cl_value] & 0x1) << 2) | 1541 ((cl_mask_table[cl_value] & 0xe) << 3); 1542 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1543 0, MR0_REG, val, 1544 (0x7 << 4) | (1 << 2))); 1545 1546 /* re-write CWL */ 1547 val = (cwl_mask_table[cwl_value] << 3); 1548 CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask, MRS2_CMD, 1549 val, (0x7 << 3))); 1550 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1551 0, MR2_REG, val, (0x7 << 3))); 1552 1553 if (mem_mask != 0) { 1554 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1555 if_id, 1556 CS_ENABLE_REG, 1557 1 << 3, 0x8)); 1558 } 1559 } 1560 1561 return MV_OK; 1562 } 1563 1564 /* 1565 * Set ODT values 1566 */ 1567 static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type, 1568 u32 if_id, u32 cl_value, u32 cwl_value) 1569 { 1570 /* ODT TIMING */ 1571 u32 val = (cl_value - cwl_value + 6); 1572 1573 val = ((cl_value - cwl_value + 1) << 4) | ((val & 0xf) << 8) | 1574 (((cl_value - 1) & 0xf) << 12) | 1575 (((cl_value + 6) & 0xf) << 16) | (((val & 0x10) >> 4) << 21); 1576 val |= (((cl_value - 1) >> 4) << 22) | (((cl_value + 6) >> 4) << 23); 1577 1578 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1579 ODT_TIMING_LOW, val, 0xffff0)); 1580 val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12); 1581 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1582 ODT_TIMING_HI_REG, val, 0xffff)); 1583 if (odt_additional == 1) { 1584 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1585 if_id, 1586 SDRAM_ODT_CONTROL_HIGH_REG, 1587 0xf, 0xf)); 1588 } 1589 1590 /* ODT Active */ 1591 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1592 DUNIT_ODT_CONTROL_REG, 0xf, 0xf)); 1593 1594 return MV_OK; 1595 } 1596 1597 /* 1598 * Set Timing values for training 1599 */ 1600 static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type, 1601 u32 if_id, enum hws_ddr_freq frequency) 1602 { 1603 u32 t_ckclk = 0, t_ras = 0; 1604 u32 t_rcd = 0, t_rp = 0, t_wr = 0, t_wtr = 0, t_rrd = 0, t_rtp = 0, 1605 t_rfc = 0, t_mod = 0; 1606 u32 val = 0, page_size = 0; 1607 enum hws_speed_bin speed_bin_index; 1608 enum hws_mem_size memory_size = MEM_2G; 1609 struct hws_topology_map *tm = ddr3_get_topology_map(); 1610 1611 speed_bin_index = tm->interface_params[if_id].speed_bin_index; 1612 memory_size = tm->interface_params[if_id].memory_size; 1613 page_size = 1614 (tm->interface_params[if_id].bus_width == 1615 BUS_WIDTH_8) ? page_param[memory_size]. 1616 page_size_8bit : page_param[memory_size].page_size_16bit; 1617 t_ckclk = (MEGA / freq_val[frequency]); 1618 t_rrd = (page_size == 1) ? speed_bin_table(speed_bin_index, 1619 SPEED_BIN_TRRD1K) : 1620 speed_bin_table(speed_bin_index, SPEED_BIN_TRRD2K); 1621 t_rrd = GET_MAX_VALUE(t_ckclk * 4, t_rrd); 1622 t_rtp = GET_MAX_VALUE(t_ckclk * 4, speed_bin_table(speed_bin_index, 1623 SPEED_BIN_TRTP)); 1624 t_wtr = GET_MAX_VALUE(t_ckclk * 4, speed_bin_table(speed_bin_index, 1625 SPEED_BIN_TWTR)); 1626 t_ras = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1627 SPEED_BIN_TRAS), 1628 t_ckclk); 1629 t_rcd = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1630 SPEED_BIN_TRCD), 1631 t_ckclk); 1632 t_rp = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1633 SPEED_BIN_TRP), 1634 t_ckclk); 1635 t_wr = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index, 1636 SPEED_BIN_TWR), 1637 t_ckclk); 1638 t_wtr = TIME_2_CLOCK_CYCLES(t_wtr, t_ckclk); 1639 t_rrd = TIME_2_CLOCK_CYCLES(t_rrd, t_ckclk); 1640 t_rtp = TIME_2_CLOCK_CYCLES(t_rtp, t_ckclk); 1641 t_rfc = TIME_2_CLOCK_CYCLES(rfc_table[memory_size] * 1000, t_ckclk); 1642 t_mod = GET_MAX_VALUE(t_ckclk * 24, 15000); 1643 t_mod = TIME_2_CLOCK_CYCLES(t_mod, t_ckclk); 1644 1645 /* SDRAM Timing Low */ 1646 val = (t_ras & 0xf) | (t_rcd << 4) | (t_rp << 8) | (t_wr << 12) | 1647 (t_wtr << 16) | (((t_ras & 0x30) >> 4) << 20) | (t_rrd << 24) | 1648 (t_rtp << 28); 1649 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1650 SDRAM_TIMING_LOW_REG, val, 0xff3fffff)); 1651 1652 /* SDRAM Timing High */ 1653 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1654 SDRAM_TIMING_HIGH_REG, 1655 t_rfc & 0x7f, 0x7f)); 1656 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1657 SDRAM_TIMING_HIGH_REG, 1658 0x180, 0x180)); 1659 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1660 SDRAM_TIMING_HIGH_REG, 1661 0x600, 0x600)); 1662 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1663 SDRAM_TIMING_HIGH_REG, 1664 0x1800, 0xf800)); 1665 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1666 SDRAM_TIMING_HIGH_REG, 1667 ((t_rfc & 0x380) >> 7) << 16, 0x70000)); 1668 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1669 SDRAM_TIMING_HIGH_REG, 0, 1670 0x380000)); 1671 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1672 SDRAM_TIMING_HIGH_REG, 1673 (t_mod & 0xf) << 25, 0x1e00000)); 1674 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1675 SDRAM_TIMING_HIGH_REG, 1676 (t_mod >> 4) << 30, 0xc0000000)); 1677 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1678 SDRAM_TIMING_HIGH_REG, 1679 0x16000000, 0x1e000000)); 1680 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1681 SDRAM_TIMING_HIGH_REG, 1682 0x40000000, 0xc0000000)); 1683 1684 return MV_OK; 1685 } 1686 1687 /* 1688 * Mode Read 1689 */ 1690 int hws_ddr3_tip_mode_read(u32 dev_num, struct mode_info *mode_info) 1691 { 1692 u32 ret; 1693 1694 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1695 MR0_REG, mode_info->reg_mr0, MASK_ALL_BITS); 1696 if (ret != MV_OK) 1697 return ret; 1698 1699 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1700 MR1_REG, mode_info->reg_mr1, MASK_ALL_BITS); 1701 if (ret != MV_OK) 1702 return ret; 1703 1704 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1705 MR2_REG, mode_info->reg_mr2, MASK_ALL_BITS); 1706 if (ret != MV_OK) 1707 return ret; 1708 1709 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1710 MR3_REG, mode_info->reg_mr2, MASK_ALL_BITS); 1711 if (ret != MV_OK) 1712 return ret; 1713 1714 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1715 READ_DATA_SAMPLE_DELAY, mode_info->read_data_sample, 1716 MASK_ALL_BITS); 1717 if (ret != MV_OK) 1718 return ret; 1719 1720 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1721 READ_DATA_READY_DELAY, mode_info->read_data_ready, 1722 MASK_ALL_BITS); 1723 if (ret != MV_OK) 1724 return ret; 1725 1726 return MV_OK; 1727 } 1728 1729 /* 1730 * Get first active IF 1731 */ 1732 int ddr3_tip_get_first_active_if(u8 dev_num, u32 interface_mask, 1733 u32 *interface_id) 1734 { 1735 u32 if_id; 1736 struct hws_topology_map *tm = ddr3_get_topology_map(); 1737 1738 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1739 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1740 if (interface_mask & (1 << if_id)) { 1741 *interface_id = if_id; 1742 break; 1743 } 1744 } 1745 1746 return MV_OK; 1747 } 1748 1749 /* 1750 * Write CS Result 1751 */ 1752 int ddr3_tip_write_cs_result(u32 dev_num, u32 offset) 1753 { 1754 u32 if_id, bus_num, cs_bitmask, data_val, cs_num; 1755 struct hws_topology_map *tm = ddr3_get_topology_map(); 1756 1757 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1758 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1759 for (bus_num = 0; bus_num < tm->num_of_bus_per_interface; 1760 bus_num++) { 1761 VALIDATE_ACTIVE(tm->bus_act_mask, bus_num); 1762 cs_bitmask = 1763 tm->interface_params[if_id]. 1764 as_bus_params[bus_num].cs_bitmask; 1765 if (cs_bitmask != effective_cs) { 1766 cs_num = GET_CS_FROM_MASK(cs_bitmask); 1767 ddr3_tip_bus_read(dev_num, if_id, 1768 ACCESS_TYPE_UNICAST, bus_num, 1769 DDR_PHY_DATA, 1770 offset + 1771 CS_REG_VALUE(effective_cs), 1772 &data_val); 1773 ddr3_tip_bus_write(dev_num, 1774 ACCESS_TYPE_UNICAST, 1775 if_id, 1776 ACCESS_TYPE_UNICAST, 1777 bus_num, DDR_PHY_DATA, 1778 offset + 1779 CS_REG_VALUE(cs_num), 1780 data_val); 1781 } 1782 } 1783 } 1784 1785 return MV_OK; 1786 } 1787 1788 /* 1789 * Write MRS 1790 */ 1791 int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, u32 cmd, 1792 u32 data, u32 mask) 1793 { 1794 u32 if_id, reg; 1795 struct hws_topology_map *tm = ddr3_get_topology_map(); 1796 1797 reg = (cmd == MRS1_CMD) ? MR1_REG : MR2_REG; 1798 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1799 PARAM_NOT_CARE, reg, data, mask)); 1800 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1801 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1802 CHECK_STATUS(ddr3_tip_if_write 1803 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1804 SDRAM_OPERATION_REG, 1805 (cs_mask_arr[if_id] << 8) | cmd, 0xf1f)); 1806 } 1807 1808 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1809 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1810 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 1811 0x1f, SDRAM_OPERATION_REG, 1812 MAX_POLLING_ITERATIONS) != MV_OK) { 1813 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1814 ("write_mrs_cmd: Poll cmd fail")); 1815 } 1816 } 1817 1818 return MV_OK; 1819 } 1820 1821 /* 1822 * Reset XSB Read FIFO 1823 */ 1824 int ddr3_tip_reset_fifo_ptr(u32 dev_num) 1825 { 1826 u32 if_id = 0; 1827 1828 /* Configure PHY reset value to 0 in order to "clean" the FIFO */ 1829 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1830 if_id, 0x15c8, 0, 0xff000000)); 1831 /* 1832 * Move PHY to RL mode (only in RL mode the PHY overrides FIFO values 1833 * during FIFO reset) 1834 */ 1835 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1836 if_id, TRAINING_SW_2_REG, 1837 0x1, 0x9)); 1838 /* In order that above configuration will influence the PHY */ 1839 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1840 if_id, 0x15b0, 1841 0x80000000, 0x80000000)); 1842 /* Reset read fifo assertion */ 1843 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1844 if_id, 0x1400, 0, 0x40000000)); 1845 /* Reset read fifo deassertion */ 1846 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1847 if_id, 0x1400, 1848 0x40000000, 0x40000000)); 1849 /* Move PHY back to functional mode */ 1850 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1851 if_id, TRAINING_SW_2_REG, 1852 0x8, 0x9)); 1853 /* Stop training machine */ 1854 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1855 if_id, 0x15b4, 0x10000, 0x10000)); 1856 1857 return MV_OK; 1858 } 1859 1860 /* 1861 * Reset Phy registers 1862 */ 1863 int ddr3_tip_ddr3_reset_phy_regs(u32 dev_num) 1864 { 1865 u32 if_id, phy_id, cs; 1866 struct hws_topology_map *tm = ddr3_get_topology_map(); 1867 1868 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1869 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1870 for (phy_id = 0; phy_id < tm->num_of_bus_per_interface; 1871 phy_id++) { 1872 VALIDATE_ACTIVE(tm->bus_act_mask, phy_id); 1873 CHECK_STATUS(ddr3_tip_bus_write 1874 (dev_num, ACCESS_TYPE_UNICAST, 1875 if_id, ACCESS_TYPE_UNICAST, 1876 phy_id, DDR_PHY_DATA, 1877 WL_PHY_REG + 1878 CS_REG_VALUE(effective_cs), 1879 phy_reg0_val)); 1880 CHECK_STATUS(ddr3_tip_bus_write 1881 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1882 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1883 RL_PHY_REG + CS_REG_VALUE(effective_cs), 1884 phy_reg2_val)); 1885 CHECK_STATUS(ddr3_tip_bus_write 1886 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1887 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1888 READ_CENTRALIZATION_PHY_REG + 1889 CS_REG_VALUE(effective_cs), phy_reg3_val)); 1890 CHECK_STATUS(ddr3_tip_bus_write 1891 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1892 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1893 WRITE_CENTRALIZATION_PHY_REG + 1894 CS_REG_VALUE(effective_cs), phy_reg3_val)); 1895 } 1896 } 1897 1898 /* Set Receiver Calibration value */ 1899 for (cs = 0; cs < MAX_CS_NUM; cs++) { 1900 /* PHY register 0xdb bits[5:0] - configure to 63 */ 1901 CHECK_STATUS(ddr3_tip_bus_write 1902 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1903 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1904 DDR_PHY_DATA, CSN_IOB_VREF_REG(cs), 63)); 1905 } 1906 1907 return MV_OK; 1908 } 1909 1910 /* 1911 * Restore Dunit registers 1912 */ 1913 int ddr3_tip_restore_dunit_regs(u32 dev_num) 1914 { 1915 u32 index_cnt; 1916 1917 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1918 PARAM_NOT_CARE, CALIB_MACHINE_CTRL_REG, 1919 0x1, 0x1)); 1920 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1921 PARAM_NOT_CARE, CALIB_MACHINE_CTRL_REG, 1922 calibration_update_control << 3, 1923 0x3 << 3)); 1924 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1925 PARAM_NOT_CARE, 1926 ODPG_WRITE_READ_MODE_ENABLE_REG, 1927 0xffff, MASK_ALL_BITS)); 1928 1929 for (index_cnt = 0; index_cnt < ARRAY_SIZE(odpg_default_value); 1930 index_cnt++) { 1931 CHECK_STATUS(ddr3_tip_if_write 1932 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1933 odpg_default_value[index_cnt].reg_addr, 1934 odpg_default_value[index_cnt].reg_data, 1935 odpg_default_value[index_cnt].reg_mask)); 1936 } 1937 1938 return MV_OK; 1939 } 1940 1941 /* 1942 * Auto tune main flow 1943 */ 1944 static int ddr3_tip_ddr3_training_main_flow(u32 dev_num) 1945 { 1946 enum hws_ddr_freq freq = init_freq; 1947 struct init_cntr_param init_cntr_prm; 1948 int ret = MV_OK; 1949 u32 if_id; 1950 u32 max_cs = hws_ddr3_tip_max_cs_get(); 1951 struct hws_topology_map *tm = ddr3_get_topology_map(); 1952 1953 #ifndef EXCLUDE_SWITCH_DEBUG 1954 if (debug_training == DEBUG_LEVEL_TRACE) { 1955 CHECK_STATUS(print_device_info((u8)dev_num)); 1956 } 1957 #endif 1958 1959 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 1960 CHECK_STATUS(ddr3_tip_ddr3_reset_phy_regs(dev_num)); 1961 } 1962 /* Set to 0 after each loop to avoid illegal value may be used */ 1963 effective_cs = 0; 1964 1965 freq = init_freq; 1966 if (is_pll_before_init != 0) { 1967 for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { 1968 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 1969 config_func_info[dev_num].tip_set_freq_divider_func( 1970 (u8)dev_num, if_id, freq); 1971 } 1972 } 1973 1974 if (is_adll_calib_before_init != 0) { 1975 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 1976 ("with adll calib before init\n")); 1977 adll_calibration(dev_num, ACCESS_TYPE_MULTICAST, 0, freq); 1978 } 1979 1980 if (is_reg_dump != 0) { 1981 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 1982 ("Dump before init controller\n")); 1983 ddr3_tip_reg_dump(dev_num); 1984 } 1985 1986 if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) { 1987 training_stage = INIT_CONTROLLER; 1988 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 1989 ("INIT_CONTROLLER_MASK_BIT\n")); 1990 init_cntr_prm.do_mrs_phy = 1; 1991 init_cntr_prm.is_ctrl64_bit = 0; 1992 init_cntr_prm.init_phy = 1; 1993 init_cntr_prm.msys_init = 0; 1994 ret = hws_ddr3_tip_init_controller(dev_num, &init_cntr_prm); 1995 if (is_reg_dump != 0) 1996 ddr3_tip_reg_dump(dev_num); 1997 if (ret != MV_OK) { 1998 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1999 ("hws_ddr3_tip_init_controller failure\n")); 2000 if (debug_mode == 0) 2001 return MV_FAIL; 2002 } 2003 } 2004 2005 2006 if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) { 2007 training_stage = SET_LOW_FREQ; 2008 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2009 ("SET_LOW_FREQ_MASK_BIT %d\n", 2010 freq_val[low_freq])); 2011 ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2012 PARAM_NOT_CARE, low_freq); 2013 if (is_reg_dump != 0) 2014 ddr3_tip_reg_dump(dev_num); 2015 if (ret != MV_OK) { 2016 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2017 ("ddr3_tip_freq_set failure\n")); 2018 if (debug_mode == 0) 2019 return MV_FAIL; 2020 } 2021 } 2022 2023 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2024 if (mask_tune_func & LOAD_PATTERN_MASK_BIT) { 2025 training_stage = LOAD_PATTERN; 2026 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2027 ("LOAD_PATTERN_MASK_BIT #%d\n", 2028 effective_cs)); 2029 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2030 if (is_reg_dump != 0) 2031 ddr3_tip_reg_dump(dev_num); 2032 if (ret != MV_OK) { 2033 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2034 ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n", 2035 effective_cs)); 2036 if (debug_mode == 0) 2037 return MV_FAIL; 2038 } 2039 } 2040 } 2041 /* Set to 0 after each loop to avoid illegal value may be used */ 2042 effective_cs = 0; 2043 2044 if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) { 2045 training_stage = SET_MEDIUM_FREQ; 2046 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2047 ("SET_MEDIUM_FREQ_MASK_BIT %d\n", 2048 freq_val[medium_freq])); 2049 ret = 2050 ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2051 PARAM_NOT_CARE, medium_freq); 2052 if (is_reg_dump != 0) 2053 ddr3_tip_reg_dump(dev_num); 2054 if (ret != MV_OK) { 2055 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2056 ("ddr3_tip_freq_set failure\n")); 2057 if (debug_mode == 0) 2058 return MV_FAIL; 2059 } 2060 } 2061 2062 if (mask_tune_func & WRITE_LEVELING_MASK_BIT) { 2063 training_stage = WRITE_LEVELING; 2064 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2065 ("WRITE_LEVELING_MASK_BIT\n")); 2066 if ((rl_mid_freq_wa == 0) || (freq_val[medium_freq] == 533)) { 2067 ret = ddr3_tip_dynamic_write_leveling(dev_num); 2068 } else { 2069 /* Use old WL */ 2070 ret = ddr3_tip_legacy_dynamic_write_leveling(dev_num); 2071 } 2072 2073 if (is_reg_dump != 0) 2074 ddr3_tip_reg_dump(dev_num); 2075 if (ret != MV_OK) { 2076 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2077 ("ddr3_tip_dynamic_write_leveling failure\n")); 2078 if (debug_mode == 0) 2079 return MV_FAIL; 2080 } 2081 } 2082 2083 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2084 if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) { 2085 training_stage = LOAD_PATTERN_2; 2086 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2087 ("LOAD_PATTERN_2_MASK_BIT CS #%d\n", 2088 effective_cs)); 2089 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2090 if (is_reg_dump != 0) 2091 ddr3_tip_reg_dump(dev_num); 2092 if (ret != MV_OK) { 2093 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2094 ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n", 2095 effective_cs)); 2096 if (debug_mode == 0) 2097 return MV_FAIL; 2098 } 2099 } 2100 } 2101 /* Set to 0 after each loop to avoid illegal value may be used */ 2102 effective_cs = 0; 2103 2104 if (mask_tune_func & READ_LEVELING_MASK_BIT) { 2105 training_stage = READ_LEVELING; 2106 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2107 ("READ_LEVELING_MASK_BIT\n")); 2108 if ((rl_mid_freq_wa == 0) || (freq_val[medium_freq] == 533)) { 2109 ret = ddr3_tip_dynamic_read_leveling(dev_num, medium_freq); 2110 } else { 2111 /* Use old RL */ 2112 ret = ddr3_tip_legacy_dynamic_read_leveling(dev_num); 2113 } 2114 2115 if (is_reg_dump != 0) 2116 ddr3_tip_reg_dump(dev_num); 2117 if (ret != MV_OK) { 2118 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2119 ("ddr3_tip_dynamic_read_leveling failure\n")); 2120 if (debug_mode == 0) 2121 return MV_FAIL; 2122 } 2123 } 2124 2125 if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) { 2126 training_stage = WRITE_LEVELING_SUPP; 2127 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2128 ("WRITE_LEVELING_SUPP_MASK_BIT\n")); 2129 ret = ddr3_tip_dynamic_write_leveling_supp(dev_num); 2130 if (is_reg_dump != 0) 2131 ddr3_tip_reg_dump(dev_num); 2132 if (ret != MV_OK) { 2133 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2134 ("ddr3_tip_dynamic_write_leveling_supp failure\n")); 2135 if (debug_mode == 0) 2136 return MV_FAIL; 2137 } 2138 } 2139 2140 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2141 if (mask_tune_func & PBS_RX_MASK_BIT) { 2142 training_stage = PBS_RX; 2143 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2144 ("PBS_RX_MASK_BIT CS #%d\n", 2145 effective_cs)); 2146 ret = ddr3_tip_pbs_rx(dev_num); 2147 if (is_reg_dump != 0) 2148 ddr3_tip_reg_dump(dev_num); 2149 if (ret != MV_OK) { 2150 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2151 ("ddr3_tip_pbs_rx failure CS #%d\n", 2152 effective_cs)); 2153 if (debug_mode == 0) 2154 return MV_FAIL; 2155 } 2156 } 2157 } 2158 2159 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2160 if (mask_tune_func & PBS_TX_MASK_BIT) { 2161 training_stage = PBS_TX; 2162 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2163 ("PBS_TX_MASK_BIT CS #%d\n", 2164 effective_cs)); 2165 ret = ddr3_tip_pbs_tx(dev_num); 2166 if (is_reg_dump != 0) 2167 ddr3_tip_reg_dump(dev_num); 2168 if (ret != MV_OK) { 2169 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2170 ("ddr3_tip_pbs_tx failure CS #%d\n", 2171 effective_cs)); 2172 if (debug_mode == 0) 2173 return MV_FAIL; 2174 } 2175 } 2176 } 2177 /* Set to 0 after each loop to avoid illegal value may be used */ 2178 effective_cs = 0; 2179 2180 if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) { 2181 training_stage = SET_TARGET_FREQ; 2182 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2183 ("SET_TARGET_FREQ_MASK_BIT %d\n", 2184 freq_val[tm-> 2185 interface_params[first_active_if]. 2186 memory_freq])); 2187 ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2188 PARAM_NOT_CARE, 2189 tm->interface_params[first_active_if]. 2190 memory_freq); 2191 if (is_reg_dump != 0) 2192 ddr3_tip_reg_dump(dev_num); 2193 if (ret != MV_OK) { 2194 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2195 ("ddr3_tip_freq_set failure\n")); 2196 if (debug_mode == 0) 2197 return MV_FAIL; 2198 } 2199 } 2200 2201 if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) { 2202 training_stage = WRITE_LEVELING_TF; 2203 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2204 ("WRITE_LEVELING_TF_MASK_BIT\n")); 2205 ret = ddr3_tip_dynamic_write_leveling(dev_num); 2206 if (is_reg_dump != 0) 2207 ddr3_tip_reg_dump(dev_num); 2208 if (ret != MV_OK) { 2209 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2210 ("ddr3_tip_dynamic_write_leveling TF failure\n")); 2211 if (debug_mode == 0) 2212 return MV_FAIL; 2213 } 2214 } 2215 2216 if (mask_tune_func & LOAD_PATTERN_HIGH_MASK_BIT) { 2217 training_stage = LOAD_PATTERN_HIGH; 2218 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("LOAD_PATTERN_HIGH\n")); 2219 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2220 if (is_reg_dump != 0) 2221 ddr3_tip_reg_dump(dev_num); 2222 if (ret != MV_OK) { 2223 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2224 ("ddr3_tip_load_all_pattern_to_mem failure\n")); 2225 if (debug_mode == 0) 2226 return MV_FAIL; 2227 } 2228 } 2229 2230 if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) { 2231 training_stage = READ_LEVELING_TF; 2232 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2233 ("READ_LEVELING_TF_MASK_BIT\n")); 2234 ret = ddr3_tip_dynamic_read_leveling(dev_num, tm-> 2235 interface_params[first_active_if]. 2236 memory_freq); 2237 if (is_reg_dump != 0) 2238 ddr3_tip_reg_dump(dev_num); 2239 if (ret != MV_OK) { 2240 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2241 ("ddr3_tip_dynamic_read_leveling TF failure\n")); 2242 if (debug_mode == 0) 2243 return MV_FAIL; 2244 } 2245 } 2246 2247 if (mask_tune_func & DM_PBS_TX_MASK_BIT) { 2248 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("DM_PBS_TX_MASK_BIT\n")); 2249 } 2250 2251 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2252 if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) { 2253 training_stage = VREF_CALIBRATION; 2254 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("VREF\n")); 2255 ret = ddr3_tip_vref(dev_num); 2256 if (is_reg_dump != 0) { 2257 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2258 ("VREF Dump\n")); 2259 ddr3_tip_reg_dump(dev_num); 2260 } 2261 if (ret != MV_OK) { 2262 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2263 ("ddr3_tip_vref failure\n")); 2264 if (debug_mode == 0) 2265 return MV_FAIL; 2266 } 2267 } 2268 } 2269 /* Set to 0 after each loop to avoid illegal value may be used */ 2270 effective_cs = 0; 2271 2272 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2273 if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) { 2274 training_stage = CENTRALIZATION_RX; 2275 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2276 ("CENTRALIZATION_RX_MASK_BIT CS #%d\n", 2277 effective_cs)); 2278 ret = ddr3_tip_centralization_rx(dev_num); 2279 if (is_reg_dump != 0) 2280 ddr3_tip_reg_dump(dev_num); 2281 if (ret != MV_OK) { 2282 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2283 ("ddr3_tip_centralization_rx failure CS #%d\n", 2284 effective_cs)); 2285 if (debug_mode == 0) 2286 return MV_FAIL; 2287 } 2288 } 2289 } 2290 /* Set to 0 after each loop to avoid illegal value may be used */ 2291 effective_cs = 0; 2292 2293 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2294 if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) { 2295 training_stage = WRITE_LEVELING_SUPP_TF; 2296 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2297 ("WRITE_LEVELING_SUPP_TF_MASK_BIT CS #%d\n", 2298 effective_cs)); 2299 ret = ddr3_tip_dynamic_write_leveling_supp(dev_num); 2300 if (is_reg_dump != 0) 2301 ddr3_tip_reg_dump(dev_num); 2302 if (ret != MV_OK) { 2303 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2304 ("ddr3_tip_dynamic_write_leveling_supp TF failure CS #%d\n", 2305 effective_cs)); 2306 if (debug_mode == 0) 2307 return MV_FAIL; 2308 } 2309 } 2310 } 2311 /* Set to 0 after each loop to avoid illegal value may be used */ 2312 effective_cs = 0; 2313 2314 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2315 if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) { 2316 training_stage = CENTRALIZATION_TX; 2317 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2318 ("CENTRALIZATION_TX_MASK_BIT CS #%d\n", 2319 effective_cs)); 2320 ret = ddr3_tip_centralization_tx(dev_num); 2321 if (is_reg_dump != 0) 2322 ddr3_tip_reg_dump(dev_num); 2323 if (ret != MV_OK) { 2324 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2325 ("ddr3_tip_centralization_tx failure CS #%d\n", 2326 effective_cs)); 2327 if (debug_mode == 0) 2328 return MV_FAIL; 2329 } 2330 } 2331 } 2332 /* Set to 0 after each loop to avoid illegal value may be used */ 2333 effective_cs = 0; 2334 2335 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("restore registers to default\n")); 2336 /* restore register values */ 2337 CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num)); 2338 2339 if (is_reg_dump != 0) 2340 ddr3_tip_reg_dump(dev_num); 2341 2342 return MV_OK; 2343 } 2344 2345 /* 2346 * DDR3 Dynamic training flow 2347 */ 2348 static int ddr3_tip_ddr3_auto_tune(u32 dev_num) 2349 { 2350 u32 if_id, stage, ret; 2351 int is_if_fail = 0, is_auto_tune_fail = 0; 2352 2353 training_stage = INIT_CONTROLLER; 2354 2355 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2356 for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) 2357 training_result[stage][if_id] = NO_TEST_DONE; 2358 } 2359 2360 ret = ddr3_tip_ddr3_training_main_flow(dev_num); 2361 2362 /* activate XSB test */ 2363 if (xsb_validate_type != 0) { 2364 run_xsb_test(dev_num, xsb_validation_base_address, 1, 1, 2365 0x1024); 2366 } 2367 2368 if (is_reg_dump != 0) 2369 ddr3_tip_reg_dump(dev_num); 2370 2371 /* print log */ 2372 CHECK_STATUS(ddr3_tip_print_log(dev_num, window_mem_addr)); 2373 2374 if (ret != MV_OK) { 2375 CHECK_STATUS(ddr3_tip_print_stability_log(dev_num)); 2376 } 2377 2378 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2379 is_if_fail = 0; 2380 for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) { 2381 if (training_result[stage][if_id] == TEST_FAILED) 2382 is_if_fail = 1; 2383 } 2384 if (is_if_fail == 1) { 2385 is_auto_tune_fail = 1; 2386 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2387 ("Auto Tune failed for IF %d\n", 2388 if_id)); 2389 } 2390 } 2391 2392 if ((ret == MV_FAIL) || (is_auto_tune_fail == 1)) 2393 return MV_FAIL; 2394 else 2395 return MV_OK; 2396 } 2397 2398 /* 2399 * Enable init sequence 2400 */ 2401 int ddr3_tip_enable_init_sequence(u32 dev_num) 2402 { 2403 int is_fail = 0; 2404 u32 if_id = 0, mem_mask = 0, bus_index = 0; 2405 struct hws_topology_map *tm = ddr3_get_topology_map(); 2406 2407 /* Enable init sequence */ 2408 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 0, 2409 SDRAM_INIT_CONTROL_REG, 0x1, 0x1)); 2410 2411 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2412 VALIDATE_ACTIVE(tm->if_act_mask, if_id); 2413 2414 if (ddr3_tip_if_polling 2415 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1, 2416 SDRAM_INIT_CONTROL_REG, 2417 MAX_POLLING_ITERATIONS) != MV_OK) { 2418 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2419 ("polling failed IF %d\n", 2420 if_id)); 2421 is_fail = 1; 2422 continue; 2423 } 2424 2425 mem_mask = 0; 2426 for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES(); 2427 bus_index++) { 2428 VALIDATE_ACTIVE(tm->bus_act_mask, bus_index); 2429 mem_mask |= 2430 tm->interface_params[if_id]. 2431 as_bus_params[bus_index].mirror_enable_bitmask; 2432 } 2433 2434 if (mem_mask != 0) { 2435 /* Disable Multi CS */ 2436 CHECK_STATUS(ddr3_tip_if_write 2437 (dev_num, ACCESS_TYPE_MULTICAST, 2438 if_id, CS_ENABLE_REG, 1 << 3, 2439 1 << 3)); 2440 } 2441 } 2442 2443 return (is_fail == 0) ? MV_OK : MV_FAIL; 2444 } 2445 2446 int ddr3_tip_register_dq_table(u32 dev_num, u32 *table) 2447 { 2448 dq_map_table = table; 2449 2450 return MV_OK; 2451 } 2452 2453 /* 2454 * Check if pup search is locked 2455 */ 2456 int ddr3_tip_is_pup_lock(u32 *pup_buf, enum hws_training_result read_mode) 2457 { 2458 u32 bit_start = 0, bit_end = 0, bit_id; 2459 2460 if (read_mode == RESULT_PER_BIT) { 2461 bit_start = 0; 2462 bit_end = BUS_WIDTH_IN_BITS - 1; 2463 } else { 2464 bit_start = 0; 2465 bit_end = 0; 2466 } 2467 2468 for (bit_id = bit_start; bit_id <= bit_end; bit_id++) { 2469 if (GET_LOCK_RESULT(pup_buf[bit_id]) == 0) 2470 return 0; 2471 } 2472 2473 return 1; 2474 } 2475 2476 /* 2477 * Get minimum buffer value 2478 */ 2479 u8 ddr3_tip_get_buf_min(u8 *buf_ptr) 2480 { 2481 u8 min_val = 0xff; 2482 u8 cnt = 0; 2483 2484 for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) { 2485 if (buf_ptr[cnt] < min_val) 2486 min_val = buf_ptr[cnt]; 2487 } 2488 2489 return min_val; 2490 } 2491 2492 /* 2493 * Get maximum buffer value 2494 */ 2495 u8 ddr3_tip_get_buf_max(u8 *buf_ptr) 2496 { 2497 u8 max_val = 0; 2498 u8 cnt = 0; 2499 2500 for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) { 2501 if (buf_ptr[cnt] > max_val) 2502 max_val = buf_ptr[cnt]; 2503 } 2504 2505 return max_val; 2506 } 2507 2508 /* 2509 * The following functions return memory parameters: 2510 * bus and device width, device size 2511 */ 2512 2513 u32 hws_ddr3_get_bus_width(void) 2514 { 2515 struct hws_topology_map *tm = ddr3_get_topology_map(); 2516 2517 return (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == 2518 1) ? 16 : 32; 2519 } 2520 2521 u32 hws_ddr3_get_device_width(u32 if_id) 2522 { 2523 struct hws_topology_map *tm = ddr3_get_topology_map(); 2524 2525 return (tm->interface_params[if_id].bus_width == 2526 BUS_WIDTH_8) ? 8 : 16; 2527 } 2528 2529 u32 hws_ddr3_get_device_size(u32 if_id) 2530 { 2531 struct hws_topology_map *tm = ddr3_get_topology_map(); 2532 2533 if (tm->interface_params[if_id].memory_size >= 2534 MEM_SIZE_LAST) { 2535 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2536 ("Error: Wrong device size of Cs: %d", 2537 tm->interface_params[if_id].memory_size)); 2538 return 0; 2539 } else { 2540 return 1 << tm->interface_params[if_id].memory_size; 2541 } 2542 } 2543 2544 int hws_ddr3_calc_mem_cs_size(u32 if_id, u32 cs, u32 *cs_size) 2545 { 2546 u32 cs_mem_size, dev_size; 2547 2548 dev_size = hws_ddr3_get_device_size(if_id); 2549 if (dev_size != 0) { 2550 cs_mem_size = ((hws_ddr3_get_bus_width() / 2551 hws_ddr3_get_device_width(if_id)) * dev_size); 2552 2553 /* the calculated result in Gbytex16 to avoid float using */ 2554 2555 if (cs_mem_size == 2) { 2556 *cs_size = _128M; 2557 } else if (cs_mem_size == 4) { 2558 *cs_size = _256M; 2559 } else if (cs_mem_size == 8) { 2560 *cs_size = _512M; 2561 } else if (cs_mem_size == 16) { 2562 *cs_size = _1G; 2563 } else if (cs_mem_size == 32) { 2564 *cs_size = _2G; 2565 } else { 2566 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2567 ("Error: Wrong Memory size of Cs: %d", cs)); 2568 return MV_FAIL; 2569 } 2570 return MV_OK; 2571 } else { 2572 return MV_FAIL; 2573 } 2574 } 2575 2576 int hws_ddr3_cs_base_adr_calc(u32 if_id, u32 cs, u32 *cs_base_addr) 2577 { 2578 u32 cs_mem_size = 0; 2579 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE 2580 u32 physical_mem_size; 2581 u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE; 2582 #endif 2583 2584 if (hws_ddr3_calc_mem_cs_size(if_id, cs, &cs_mem_size) != MV_OK) 2585 return MV_FAIL; 2586 2587 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE 2588 struct hws_topology_map *tm = ddr3_get_topology_map(); 2589 /* 2590 * if number of address pins doesn't allow to use max mem size that 2591 * is defined in topology mem size is defined by 2592 * DEVICE_MAX_DRAM_ADDRESS_SIZE 2593 */ 2594 physical_mem_size = 2595 mv_hwsmem_size[tm->interface_params[0].memory_size]; 2596 2597 if (hws_ddr3_get_device_width(cs) == 16) { 2598 /* 2599 * 16bit mem device can be twice more - no need in less 2600 * significant pin 2601 */ 2602 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2; 2603 } 2604 2605 if (physical_mem_size > max_mem_size) { 2606 cs_mem_size = max_mem_size * 2607 (hws_ddr3_get_bus_width() / 2608 hws_ddr3_get_device_width(if_id)); 2609 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2610 ("Updated Physical Mem size is from 0x%x to %x\n", 2611 physical_mem_size, 2612 DEVICE_MAX_DRAM_ADDRESS_SIZE)); 2613 } 2614 #endif 2615 2616 /* calculate CS base addr */ 2617 *cs_base_addr = ((cs_mem_size) * cs) & 0xffff0000; 2618 2619 return MV_OK; 2620 } 2621