1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Marvell International Ltd. and its affiliates 4 */ 5 6 #include "ddr3_init.h" 7 #include "mv_ddr_common.h" 8 #include "mv_ddr_training_db.h" 9 #include "mv_ddr_regs.h" 10 11 #define GET_CS_FROM_MASK(mask) (cs_mask2_num[mask]) 12 #define CS_CBE_VALUE(cs_num) (cs_cbe_reg[cs_num]) 13 14 u32 window_mem_addr = 0; 15 u32 phy_reg0_val = 0; 16 u32 phy_reg1_val = 8; 17 u32 phy_reg2_val = 0; 18 u32 phy_reg3_val = PARAM_UNDEFINED; 19 enum mv_ddr_freq low_freq = MV_DDR_FREQ_LOW_FREQ; 20 enum mv_ddr_freq medium_freq; 21 u32 debug_dunit = 0; 22 u32 odt_additional = 1; 23 u32 *dq_map_table = NULL; 24 25 /* in case of ddr4 do not run ddr3_tip_write_additional_odt_setting function - mc odt always 'on' 26 * in ddr4 case the terminations are rttWR and rttPARK and the odt must be always 'on' 0x1498 = 0xf 27 */ 28 u32 odt_config = 1; 29 30 u32 nominal_avs; 31 u32 extension_avs; 32 33 u32 is_pll_before_init = 0, is_adll_calib_before_init = 1, is_dfs_in_init = 0; 34 u32 dfs_low_freq; 35 36 u32 g_rtt_nom_cs0, g_rtt_nom_cs1; 37 u8 calibration_update_control; /* 2 external only, 1 is internal only */ 38 39 enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM]; 40 enum auto_tune_stage training_stage = INIT_CONTROLLER; 41 u32 finger_test = 0, p_finger_start = 11, p_finger_end = 64, 42 n_finger_start = 11, n_finger_end = 64, 43 p_finger_step = 3, n_finger_step = 3; 44 u32 clamp_tbl[] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }; 45 46 /* Initiate to 0xff, this variable is define by user in debug mode */ 47 u32 mode_2t = 0xff; 48 u32 xsb_validate_type = 0; 49 u32 xsb_validation_base_address = 0xf000; 50 u32 first_active_if = 0; 51 u32 dfs_low_phy1 = 0x1f; 52 u32 multicast_id = 0; 53 int use_broadcast = 0; 54 struct hws_tip_freq_config_info *freq_info_table = NULL; 55 u8 is_cbe_required = 0; 56 u32 debug_mode = 0; 57 u32 delay_enable = 0; 58 int rl_mid_freq_wa = 0; 59 60 u32 effective_cs = 0; 61 62 u32 vref_init_val = 0x4; 63 u32 ck_delay = PARAM_UNDEFINED; 64 65 /* Design guidelines parameters */ 66 u32 g_zpri_data = PARAM_UNDEFINED; /* controller data - P drive strength */ 67 u32 g_znri_data = PARAM_UNDEFINED; /* controller data - N drive strength */ 68 u32 g_zpri_ctrl = PARAM_UNDEFINED; /* controller C/A - P drive strength */ 69 u32 g_znri_ctrl = PARAM_UNDEFINED; /* controller C/A - N drive strength */ 70 71 u32 g_zpodt_data = PARAM_UNDEFINED; /* controller data - P ODT */ 72 u32 g_znodt_data = PARAM_UNDEFINED; /* controller data - N ODT */ 73 u32 g_zpodt_ctrl = PARAM_UNDEFINED; /* controller data - P ODT */ 74 u32 g_znodt_ctrl = PARAM_UNDEFINED; /* controller data - N ODT */ 75 76 u32 g_odt_config = PARAM_UNDEFINED; 77 u32 g_rtt_nom = PARAM_UNDEFINED; 78 u32 g_rtt_wr = PARAM_UNDEFINED; 79 u32 g_dic = PARAM_UNDEFINED; 80 u32 g_rtt_park = PARAM_UNDEFINED; 81 82 u32 mask_tune_func = (SET_MEDIUM_FREQ_MASK_BIT | 83 WRITE_LEVELING_MASK_BIT | 84 LOAD_PATTERN_2_MASK_BIT | 85 READ_LEVELING_MASK_BIT | 86 SET_TARGET_FREQ_MASK_BIT | 87 WRITE_LEVELING_TF_MASK_BIT | 88 READ_LEVELING_TF_MASK_BIT | 89 CENTRALIZATION_RX_MASK_BIT | 90 CENTRALIZATION_TX_MASK_BIT); 91 92 static int ddr3_tip_ddr3_training_main_flow(u32 dev_num); 93 static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type, 94 u32 if_id, u32 cl_value, u32 cwl_value); 95 static int ddr3_tip_ddr3_auto_tune(u32 dev_num); 96 97 #ifdef ODT_TEST_SUPPORT 98 static int odt_test(u32 dev_num, enum hws_algo_type algo_type); 99 #endif 100 101 int adll_calibration(u32 dev_num, enum hws_access_type access_type, 102 u32 if_id, enum mv_ddr_freq frequency); 103 static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type, 104 u32 if_id, enum mv_ddr_freq frequency); 105 106 static u8 mem_size_config[MV_DDR_DIE_CAP_LAST] = { 107 0x2, /* 512Mbit */ 108 0x3, /* 1Gbit */ 109 0x0, /* 2Gbit */ 110 0x4, /* 4Gbit */ 111 0x5, /* 8Gbit */ 112 0x0, /* TODO: placeholder for 16-Mbit die capacity */ 113 0x0, /* TODO: placeholder for 32-Mbit die capacity */ 114 0x0, /* TODO: placeholder for 12-Mbit die capacity */ 115 0x0 /* TODO: placeholder for 24-Mbit die capacity */ 116 }; 117 118 static u8 cs_mask2_num[] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 }; 119 120 static struct reg_data odpg_default_value[] = { 121 {0x1034, 0x38000, MASK_ALL_BITS}, 122 {0x1038, 0x0, MASK_ALL_BITS}, 123 {0x10b0, 0x0, MASK_ALL_BITS}, 124 {0x10b8, 0x0, MASK_ALL_BITS}, 125 {0x10c0, 0x0, MASK_ALL_BITS}, 126 {0x10f0, 0x0, MASK_ALL_BITS}, 127 {0x10f4, 0x0, MASK_ALL_BITS}, 128 {0x10f8, 0xff, MASK_ALL_BITS}, 129 {0x10fc, 0xffff, MASK_ALL_BITS}, 130 {0x1130, 0x0, MASK_ALL_BITS}, 131 {0x1830, 0x2000000, MASK_ALL_BITS}, 132 {0x14d0, 0x0, MASK_ALL_BITS}, 133 {0x14d4, 0x0, MASK_ALL_BITS}, 134 {0x14d8, 0x0, MASK_ALL_BITS}, 135 {0x14dc, 0x0, MASK_ALL_BITS}, 136 {0x1454, 0x0, MASK_ALL_BITS}, 137 {0x1594, 0x0, MASK_ALL_BITS}, 138 {0x1598, 0x0, MASK_ALL_BITS}, 139 {0x159c, 0x0, MASK_ALL_BITS}, 140 {0x15a0, 0x0, MASK_ALL_BITS}, 141 {0x15a4, 0x0, MASK_ALL_BITS}, 142 {0x15a8, 0x0, MASK_ALL_BITS}, 143 {0x15ac, 0x0, MASK_ALL_BITS}, 144 {0x1604, 0x0, MASK_ALL_BITS}, 145 {0x1608, 0x0, MASK_ALL_BITS}, 146 {0x160c, 0x0, MASK_ALL_BITS}, 147 {0x1610, 0x0, MASK_ALL_BITS}, 148 {0x1614, 0x0, MASK_ALL_BITS}, 149 {0x1618, 0x0, MASK_ALL_BITS}, 150 {0x1624, 0x0, MASK_ALL_BITS}, 151 {0x1690, 0x0, MASK_ALL_BITS}, 152 {0x1694, 0x0, MASK_ALL_BITS}, 153 {0x1698, 0x0, MASK_ALL_BITS}, 154 {0x169c, 0x0, MASK_ALL_BITS}, 155 {0x14b8, 0x6f67, MASK_ALL_BITS}, 156 {0x1630, 0x0, MASK_ALL_BITS}, 157 {0x1634, 0x0, MASK_ALL_BITS}, 158 {0x1638, 0x0, MASK_ALL_BITS}, 159 {0x163c, 0x0, MASK_ALL_BITS}, 160 {0x16b0, 0x0, MASK_ALL_BITS}, 161 {0x16b4, 0x0, MASK_ALL_BITS}, 162 {0x16b8, 0x0, MASK_ALL_BITS}, 163 {0x16bc, 0x0, MASK_ALL_BITS}, 164 {0x16c0, 0x0, MASK_ALL_BITS}, 165 {0x16c4, 0x0, MASK_ALL_BITS}, 166 {0x16c8, 0x0, MASK_ALL_BITS}, 167 {0x16cc, 0x1, MASK_ALL_BITS}, 168 {0x16f0, 0x1, MASK_ALL_BITS}, 169 {0x16f4, 0x0, MASK_ALL_BITS}, 170 {0x16f8, 0x0, MASK_ALL_BITS}, 171 {0x16fc, 0x0, MASK_ALL_BITS} 172 }; 173 174 /* MR cmd and addr definitions */ 175 struct mv_ddr_mr_data mr_data[] = { 176 {MRS0_CMD, MR0_REG}, 177 {MRS1_CMD, MR1_REG}, 178 {MRS2_CMD, MR2_REG}, 179 {MRS3_CMD, MR3_REG} 180 }; 181 182 /* inverse pads */ 183 static int ddr3_tip_pad_inv(void) 184 { 185 u32 sphy, data; 186 u32 sphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); 187 u32 ck_swap_ctrl_sphy; 188 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 189 190 for (sphy = 0; sphy < sphy_max; sphy++) { 191 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sphy); 192 if (tm->interface_params[0]. 193 as_bus_params[sphy].is_dqs_swap == 1) { 194 data = (INVERT_PAD << INV_PAD4_OFFS | 195 INVERT_PAD << INV_PAD5_OFFS); 196 /* dqs swap */ 197 ddr3_tip_bus_read_modify_write(0, ACCESS_TYPE_UNICAST, 198 0, sphy, 199 DDR_PHY_DATA, 200 PHY_CTRL_PHY_REG, 201 data, data); 202 } 203 204 if (tm->interface_params[0].as_bus_params[sphy]. 205 is_ck_swap == 1 && sphy == 0) { 206 /* TODO: move this code to per platform one */ 207 #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X) 208 /* clock swap for both cs0 and cs1 */ 209 data = (INVERT_PAD << INV_PAD2_OFFS | 210 INVERT_PAD << INV_PAD6_OFFS | 211 INVERT_PAD << INV_PAD4_OFFS | 212 INVERT_PAD << INV_PAD5_OFFS); 213 ck_swap_ctrl_sphy = CK_SWAP_CTRL_PHY_NUM; 214 ddr3_tip_bus_read_modify_write(0, ACCESS_TYPE_UNICAST, 215 0, ck_swap_ctrl_sphy, 216 DDR_PHY_CONTROL, 217 PHY_CTRL_PHY_REG, 218 data, data); 219 #else /* !CONFIG_ARMADA_38X && !CONFIG_ARMADA_39X && !A70X0 && !A80X0 && !A3900 */ 220 #pragma message "unknown platform to configure ddr clock swap" 221 #endif 222 } 223 } 224 225 return MV_OK; 226 } 227 228 static int ddr3_tip_rank_control(u32 dev_num, u32 if_id); 229 230 /* 231 * Update global training parameters by data from user 232 */ 233 int ddr3_tip_tune_training_params(u32 dev_num, 234 struct tune_train_params *params) 235 { 236 if (params->ck_delay != PARAM_UNDEFINED) 237 ck_delay = params->ck_delay; 238 if (params->phy_reg3_val != PARAM_UNDEFINED) 239 phy_reg3_val = params->phy_reg3_val; 240 if (params->g_rtt_nom != PARAM_UNDEFINED) 241 g_rtt_nom = params->g_rtt_nom; 242 if (params->g_rtt_wr != PARAM_UNDEFINED) 243 g_rtt_wr = params->g_rtt_wr; 244 if (params->g_dic != PARAM_UNDEFINED) 245 g_dic = params->g_dic; 246 if (params->g_odt_config != PARAM_UNDEFINED) 247 g_odt_config = params->g_odt_config; 248 if (params->g_zpri_data != PARAM_UNDEFINED) 249 g_zpri_data = params->g_zpri_data; 250 if (params->g_znri_data != PARAM_UNDEFINED) 251 g_znri_data = params->g_znri_data; 252 if (params->g_zpri_ctrl != PARAM_UNDEFINED) 253 g_zpri_ctrl = params->g_zpri_ctrl; 254 if (params->g_znri_ctrl != PARAM_UNDEFINED) 255 g_znri_ctrl = params->g_znri_ctrl; 256 if (params->g_zpodt_data != PARAM_UNDEFINED) 257 g_zpodt_data = params->g_zpodt_data; 258 if (params->g_znodt_data != PARAM_UNDEFINED) 259 g_znodt_data = params->g_znodt_data; 260 if (params->g_zpodt_ctrl != PARAM_UNDEFINED) 261 g_zpodt_ctrl = params->g_zpodt_ctrl; 262 if (params->g_znodt_ctrl != PARAM_UNDEFINED) 263 g_znodt_ctrl = params->g_znodt_ctrl; 264 if (params->g_rtt_park != PARAM_UNDEFINED) 265 g_rtt_park = params->g_rtt_park; 266 267 268 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 269 ("DGL parameters: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n", 270 g_zpri_data, g_znri_data, g_zpri_ctrl, g_znri_ctrl, g_zpodt_data, g_znodt_data, 271 g_zpodt_ctrl, g_znodt_ctrl, g_rtt_nom, g_dic, g_odt_config, g_rtt_wr)); 272 273 return MV_OK; 274 } 275 276 /* 277 * Configure CS 278 */ 279 int ddr3_tip_configure_cs(u32 dev_num, u32 if_id, u32 cs_num, u32 enable) 280 { 281 u32 data, addr_hi, data_high; 282 u32 mem_index; 283 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 284 285 if (enable == 1) { 286 data = (tm->interface_params[if_id].bus_width == 287 MV_DDR_DEV_WIDTH_8BIT) ? 0 : 1; 288 CHECK_STATUS(ddr3_tip_if_write 289 (dev_num, ACCESS_TYPE_UNICAST, if_id, 290 SDRAM_ADDR_CTRL_REG, (data << (cs_num * 4)), 291 0x3 << (cs_num * 4))); 292 mem_index = tm->interface_params[if_id].memory_size; 293 294 addr_hi = mem_size_config[mem_index] & 0x3; 295 CHECK_STATUS(ddr3_tip_if_write 296 (dev_num, ACCESS_TYPE_UNICAST, if_id, 297 SDRAM_ADDR_CTRL_REG, 298 (addr_hi << (2 + cs_num * 4)), 299 0x3 << (2 + cs_num * 4))); 300 301 data_high = (mem_size_config[mem_index] & 0x4) >> 2; 302 CHECK_STATUS(ddr3_tip_if_write 303 (dev_num, ACCESS_TYPE_UNICAST, if_id, 304 SDRAM_ADDR_CTRL_REG, 305 data_high << (20 + cs_num), 1 << (20 + cs_num))); 306 307 /* Enable Address Select Mode */ 308 CHECK_STATUS(ddr3_tip_if_write 309 (dev_num, ACCESS_TYPE_UNICAST, if_id, 310 SDRAM_ADDR_CTRL_REG, 1 << (16 + cs_num), 311 1 << (16 + cs_num))); 312 } 313 switch (cs_num) { 314 case 0: 315 case 1: 316 case 2: 317 CHECK_STATUS(ddr3_tip_if_write 318 (dev_num, ACCESS_TYPE_UNICAST, if_id, 319 DUNIT_CTRL_LOW_REG, (enable << (cs_num + 11)), 320 1 << (cs_num + 11))); 321 break; 322 case 3: 323 CHECK_STATUS(ddr3_tip_if_write 324 (dev_num, ACCESS_TYPE_UNICAST, if_id, 325 DUNIT_CTRL_LOW_REG, (enable << 15), 1 << 15)); 326 break; 327 } 328 329 return MV_OK; 330 } 331 332 /* 333 * Init Controller Flow 334 */ 335 int hws_ddr3_tip_init_controller(u32 dev_num, struct init_cntr_param *init_cntr_prm) 336 { 337 u32 if_id; 338 u32 cs_num; 339 u32 t_ckclk = 0, t_wr = 0, t2t = 0; 340 u32 data_value = 0, cs_cnt = 0, 341 mem_mask = 0, bus_index = 0; 342 enum mv_ddr_speed_bin speed_bin_index = SPEED_BIN_DDR_2133N; 343 u32 cs_mask = 0; 344 u32 cl_value = 0, cwl_val = 0; 345 u32 bus_cnt = 0, adll_tap = 0; 346 enum hws_access_type access_type = ACCESS_TYPE_UNICAST; 347 u32 data_read[MAX_INTERFACE_NUM]; 348 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); 349 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 350 enum mv_ddr_timing timing; 351 enum mv_ddr_freq freq = tm->interface_params[0].memory_freq; 352 353 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 354 ("Init_controller, do_mrs_phy=%d, is_ctrl64_bit=%d\n", 355 init_cntr_prm->do_mrs_phy, 356 init_cntr_prm->is_ctrl64_bit)); 357 358 if (init_cntr_prm->init_phy == 1) { 359 CHECK_STATUS(ddr3_tip_configure_phy(dev_num)); 360 } 361 362 if (generic_init_controller == 1) { 363 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 364 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 365 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 366 ("active IF %d\n", if_id)); 367 mem_mask = 0; 368 for (bus_index = 0; 369 bus_index < octets_per_if_num; 370 bus_index++) { 371 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_index); 372 mem_mask |= 373 tm->interface_params[if_id]. 374 as_bus_params[bus_index].mirror_enable_bitmask; 375 } 376 377 if (mem_mask != 0) { 378 CHECK_STATUS(ddr3_tip_if_write 379 (dev_num, ACCESS_TYPE_MULTICAST, 380 if_id, DUAL_DUNIT_CFG_REG, 0, 381 0x8)); 382 } 383 384 speed_bin_index = 385 tm->interface_params[if_id]. 386 speed_bin_index; 387 388 /* t_ckclk is external clock */ 389 t_ckclk = (MEGA / mv_ddr_freq_get(freq)); 390 391 if (MV_DDR_IS_HALF_BUS_DRAM_MODE(tm->bus_act_mask, octets_per_if_num)) 392 data_value = (0x4000 | 0 | 0x1000000) & ~(1 << 26); 393 else 394 data_value = (0x4000 | 0x8000 | 0x1000000) & ~(1 << 26); 395 396 /* Interface Bus Width */ 397 /* SRMode */ 398 CHECK_STATUS(ddr3_tip_if_write 399 (dev_num, access_type, if_id, 400 SDRAM_CFG_REG, data_value, 401 0x100c000)); 402 403 /* Interleave first command pre-charge enable (TBD) */ 404 CHECK_STATUS(ddr3_tip_if_write 405 (dev_num, access_type, if_id, 406 SDRAM_OPEN_PAGES_CTRL_REG, (1 << 10), 407 (1 << 10))); 408 409 /* Reset divider_b assert -> de-assert */ 410 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 411 SDRAM_CFG_REG, 412 0x0 << PUP_RST_DIVIDER_OFFS, 413 PUP_RST_DIVIDER_MASK << PUP_RST_DIVIDER_OFFS)); 414 415 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 416 SDRAM_CFG_REG, 417 0x1 << PUP_RST_DIVIDER_OFFS, 418 PUP_RST_DIVIDER_MASK << PUP_RST_DIVIDER_OFFS)); 419 420 /* PHY configuration */ 421 /* 422 * Postamble Length = 1.5cc, Addresscntl to clk skew 423 * \BD, Preamble length normal, parralal ADLL enable 424 */ 425 CHECK_STATUS(ddr3_tip_if_write 426 (dev_num, access_type, if_id, 427 DRAM_PHY_CFG_REG, 0x28, 0x3e)); 428 if (init_cntr_prm->is_ctrl64_bit) { 429 /* positive edge */ 430 CHECK_STATUS(ddr3_tip_if_write 431 (dev_num, access_type, if_id, 432 DRAM_PHY_CFG_REG, 0x0, 433 0xff80)); 434 } 435 436 /* calibration block disable */ 437 /* Xbar Read buffer select (for Internal access) */ 438 CHECK_STATUS(ddr3_tip_if_write 439 (dev_num, access_type, if_id, 440 MAIN_PADS_CAL_MACH_CTRL_REG, 0x1200c, 441 0x7dffe01c)); 442 CHECK_STATUS(ddr3_tip_if_write 443 (dev_num, access_type, if_id, 444 MAIN_PADS_CAL_MACH_CTRL_REG, 445 calibration_update_control << 3, 0x3 << 3)); 446 447 /* Pad calibration control - enable */ 448 CHECK_STATUS(ddr3_tip_if_write 449 (dev_num, access_type, if_id, 450 MAIN_PADS_CAL_MACH_CTRL_REG, 0x1, 0x1)); 451 if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) < MV_TIP_REV_3) { 452 /* DDR3 rank ctrl \96 part of the generic code */ 453 /* CS1 mirroring enable + w/a for JIRA DUNIT-14581 */ 454 CHECK_STATUS(ddr3_tip_if_write 455 (dev_num, access_type, if_id, 456 DDR3_RANK_CTRL_REG, 0x27, MASK_ALL_BITS)); 457 } 458 459 cs_mask = 0; 460 data_value = 0x7; 461 /* 462 * Address ctrl \96 Part of the Generic code 463 * The next configuration is done: 464 * 1) Memory Size 465 * 2) Bus_width 466 * 3) CS# 467 * 4) Page Number 468 * Per Dunit get from the Map_topology the parameters: 469 * Bus_width 470 */ 471 472 data_value = 473 (tm->interface_params[if_id]. 474 bus_width == MV_DDR_DEV_WIDTH_8BIT) ? 0 : 1; 475 476 /* create merge cs mask for all cs available in dunit */ 477 for (bus_cnt = 0; 478 bus_cnt < octets_per_if_num; 479 bus_cnt++) { 480 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); 481 cs_mask |= 482 tm->interface_params[if_id]. 483 as_bus_params[bus_cnt].cs_bitmask; 484 } 485 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 486 ("Init_controller IF %d cs_mask %d\n", 487 if_id, cs_mask)); 488 /* 489 * Configure the next upon the Map Topology \96 If the 490 * Dunit is CS0 Configure CS0 if it is multi CS 491 * configure them both: The Bust_width it\92s the 492 * Memory Bus width \96 x8 or x16 493 */ 494 for (cs_cnt = 0; cs_cnt < MAX_CS_NUM; cs_cnt++) { 495 ddr3_tip_configure_cs(dev_num, if_id, cs_cnt, 496 ((cs_mask & (1 << cs_cnt)) ? 1 497 : 0)); 498 } 499 500 if (init_cntr_prm->do_mrs_phy) { 501 /* 502 * MR0 \96 Part of the Generic code 503 * The next configuration is done: 504 * 1) Burst Length 505 * 2) CAS Latency 506 * get for each dunit what is it Speed_bin & 507 * Target Frequency. From those both parameters 508 * get the appropriate Cas_l from the CL table 509 */ 510 cl_value = 511 tm->interface_params[if_id]. 512 cas_l; 513 cwl_val = 514 tm->interface_params[if_id]. 515 cas_wl; 516 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 517 ("cl_value 0x%x cwl_val 0x%x\n", 518 cl_value, cwl_val)); 519 520 t_wr = time_to_nclk(mv_ddr_speed_bin_timing_get 521 (speed_bin_index, 522 SPEED_BIN_TWR), t_ckclk); 523 524 data_value = 525 ((cl_mask_table[cl_value] & 0x1) << 2) | 526 ((cl_mask_table[cl_value] & 0xe) << 3); 527 CHECK_STATUS(ddr3_tip_if_write 528 (dev_num, access_type, if_id, 529 MR0_REG, data_value, 530 (0x7 << 4) | (1 << 2))); 531 CHECK_STATUS(ddr3_tip_if_write 532 (dev_num, access_type, if_id, 533 MR0_REG, twr_mask_table[t_wr] << 9, 534 0x7 << 9)); 535 536 /* 537 * MR1: Set RTT and DIC Design GL values 538 * configured by user 539 */ 540 CHECK_STATUS(ddr3_tip_if_write 541 (dev_num, ACCESS_TYPE_MULTICAST, 542 PARAM_NOT_CARE, MR1_REG, 543 g_dic | g_rtt_nom, 0x266)); 544 545 /* MR2 - Part of the Generic code */ 546 /* 547 * The next configuration is done: 548 * 1) SRT 549 * 2) CAS Write Latency 550 */ 551 data_value = (cwl_mask_table[cwl_val] << 3); 552 data_value |= 553 ((tm->interface_params[if_id]. 554 interface_temp == 555 MV_DDR_TEMP_HIGH) ? (1 << 7) : 0); 556 data_value |= g_rtt_wr; 557 CHECK_STATUS(ddr3_tip_if_write 558 (dev_num, access_type, if_id, 559 MR2_REG, data_value, 560 (0x7 << 3) | (0x1 << 7) | (0x3 << 561 9))); 562 } 563 564 ddr3_tip_write_odt(dev_num, access_type, if_id, 565 cl_value, cwl_val); 566 ddr3_tip_set_timing(dev_num, access_type, if_id, freq); 567 568 if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) < MV_TIP_REV_3) { 569 CHECK_STATUS(ddr3_tip_if_write 570 (dev_num, access_type, if_id, 571 DUNIT_CTRL_HIGH_REG, 0x1000119, 572 0x100017F)); 573 } else { 574 CHECK_STATUS(ddr3_tip_if_write 575 (dev_num, access_type, if_id, 576 DUNIT_CTRL_HIGH_REG, 0x600177 | 577 (init_cntr_prm->is_ctrl64_bit ? 578 CPU_INTERJECTION_ENA_SPLIT_ENA << CPU_INTERJECTION_ENA_OFFS : 579 CPU_INTERJECTION_ENA_SPLIT_DIS << CPU_INTERJECTION_ENA_OFFS), 580 0x1600177 | CPU_INTERJECTION_ENA_MASK << 581 CPU_INTERJECTION_ENA_OFFS)); 582 } 583 584 /* reset bit 7 */ 585 CHECK_STATUS(ddr3_tip_if_write 586 (dev_num, access_type, if_id, 587 DUNIT_CTRL_HIGH_REG, 588 (init_cntr_prm->msys_init << 7), (1 << 7))); 589 590 timing = tm->interface_params[if_id].timing; 591 592 if (mode_2t != 0xff) { 593 t2t = mode_2t; 594 } else if (timing != MV_DDR_TIM_DEFAULT) { 595 t2t = (timing == MV_DDR_TIM_2T) ? 1 : 0; 596 } else { 597 /* calculate number of CS (per interface) */ 598 cs_num = mv_ddr_cs_num_get(); 599 t2t = (cs_num == 1) ? 0 : 1; 600 } 601 602 CHECK_STATUS(ddr3_tip_if_write 603 (dev_num, access_type, if_id, 604 DUNIT_CTRL_LOW_REG, t2t << 3, 605 0x3 << 3)); 606 CHECK_STATUS(ddr3_tip_if_write 607 (dev_num, access_type, if_id, 608 DDR_TIMING_REG, 0x28 << 9, 0x3f << 9)); 609 CHECK_STATUS(ddr3_tip_if_write 610 (dev_num, access_type, if_id, 611 DDR_TIMING_REG, 0xa << 21, 0xff << 21)); 612 613 /* move the block to ddr3_tip_set_timing - end */ 614 /* AUTO_ZQC_TIMING */ 615 CHECK_STATUS(ddr3_tip_if_write 616 (dev_num, access_type, if_id, 617 ZQC_CFG_REG, (AUTO_ZQC_TIMING | (2 << 20)), 618 0x3fffff)); 619 CHECK_STATUS(ddr3_tip_if_read 620 (dev_num, access_type, if_id, 621 DRAM_PHY_CFG_REG, data_read, 0x30)); 622 data_value = 623 (data_read[if_id] == 0) ? (1 << 11) : 0; 624 CHECK_STATUS(ddr3_tip_if_write 625 (dev_num, access_type, if_id, 626 DUNIT_CTRL_HIGH_REG, data_value, 627 (1 << 11))); 628 629 /* Set Active control for ODT write transactions */ 630 CHECK_STATUS(ddr3_tip_if_write 631 (dev_num, ACCESS_TYPE_MULTICAST, 632 PARAM_NOT_CARE, 0x1494, g_odt_config, 633 MASK_ALL_BITS)); 634 635 if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) == MV_TIP_REV_3) { 636 CHECK_STATUS(ddr3_tip_if_write 637 (dev_num, access_type, if_id, 638 0x14a8, 0x900, 0x900)); 639 /* wa: controls control sub-phy outputs floating during self-refresh */ 640 CHECK_STATUS(ddr3_tip_if_write 641 (dev_num, access_type, if_id, 642 0x16d0, 0, 0x8000)); 643 } 644 } 645 } 646 647 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 648 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 649 CHECK_STATUS(ddr3_tip_rank_control(dev_num, if_id)); 650 651 if (init_cntr_prm->do_mrs_phy) 652 ddr3_tip_pad_inv(); 653 654 /* Pad calibration control - disable */ 655 CHECK_STATUS(ddr3_tip_if_write 656 (dev_num, access_type, if_id, 657 MAIN_PADS_CAL_MACH_CTRL_REG, 0x0, 0x1)); 658 CHECK_STATUS(ddr3_tip_if_write 659 (dev_num, access_type, if_id, 660 MAIN_PADS_CAL_MACH_CTRL_REG, 661 calibration_update_control << 3, 0x3 << 3)); 662 } 663 664 665 if (delay_enable != 0) { 666 adll_tap = MEGA / (mv_ddr_freq_get(freq) * 64); 667 ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap); 668 } 669 670 return MV_OK; 671 } 672 673 /* 674 * Rank Control Flow 675 */ 676 static int ddr3_tip_rev2_rank_control(u32 dev_num, u32 if_id) 677 { 678 u32 data_value = 0, bus_cnt = 0; 679 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); 680 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 681 682 for (bus_cnt = 0; bus_cnt < octets_per_if_num; bus_cnt++) { 683 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); 684 data_value |= tm->interface_params[if_id].as_bus_params[bus_cnt]. 685 cs_bitmask; 686 687 if (tm->interface_params[if_id].as_bus_params[bus_cnt]. 688 mirror_enable_bitmask == 1) { 689 /* 690 * Check mirror_enable_bitmask 691 * If it is enabled, CS + 4 bit in a word to be '1' 692 */ 693 if ((tm->interface_params[if_id].as_bus_params[bus_cnt]. 694 cs_bitmask & 0x1) != 0) { 695 data_value |= tm->interface_params[if_id]. 696 as_bus_params[bus_cnt]. 697 mirror_enable_bitmask << 4; 698 } 699 700 if ((tm->interface_params[if_id].as_bus_params[bus_cnt]. 701 cs_bitmask & 0x2) != 0) { 702 data_value |= tm->interface_params[if_id]. 703 as_bus_params[bus_cnt]. 704 mirror_enable_bitmask << 5; 705 } 706 707 if ((tm->interface_params[if_id].as_bus_params[bus_cnt]. 708 cs_bitmask & 0x4) != 0) { 709 data_value |= tm->interface_params[if_id]. 710 as_bus_params[bus_cnt]. 711 mirror_enable_bitmask << 6; 712 } 713 714 if ((tm->interface_params[if_id].as_bus_params[bus_cnt]. 715 cs_bitmask & 0x8) != 0) { 716 data_value |= tm->interface_params[if_id]. 717 as_bus_params[bus_cnt]. 718 mirror_enable_bitmask << 7; 719 } 720 } 721 } 722 723 CHECK_STATUS(ddr3_tip_if_write 724 (dev_num, ACCESS_TYPE_UNICAST, if_id, DDR3_RANK_CTRL_REG, 725 data_value, 0xff)); 726 727 return MV_OK; 728 } 729 730 static int ddr3_tip_rev3_rank_control(u32 dev_num, u32 if_id) 731 { 732 u32 data_value = 0, bus_cnt; 733 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); 734 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 735 736 for (bus_cnt = 1; bus_cnt < octets_per_if_num; bus_cnt++) { 737 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); 738 if ((tm->interface_params[if_id]. 739 as_bus_params[0].cs_bitmask != 740 tm->interface_params[if_id]. 741 as_bus_params[bus_cnt].cs_bitmask) || 742 (tm->interface_params[if_id]. 743 as_bus_params[0].mirror_enable_bitmask != 744 tm->interface_params[if_id]. 745 as_bus_params[bus_cnt].mirror_enable_bitmask)) 746 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 747 ("WARNING:Wrong configuration for pup #%d CS mask and CS mirroring for all pups should be the same\n", 748 bus_cnt)); 749 } 750 751 data_value |= tm->interface_params[if_id]. 752 as_bus_params[0].cs_bitmask; 753 data_value |= tm->interface_params[if_id]. 754 as_bus_params[0].mirror_enable_bitmask << 4; 755 756 CHECK_STATUS(ddr3_tip_if_write 757 (dev_num, ACCESS_TYPE_UNICAST, if_id, DDR3_RANK_CTRL_REG, 758 data_value, 0xff)); 759 760 return MV_OK; 761 } 762 763 static int ddr3_tip_rank_control(u32 dev_num, u32 if_id) 764 { 765 if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) == MV_TIP_REV_2) 766 return ddr3_tip_rev2_rank_control(dev_num, if_id); 767 else 768 return ddr3_tip_rev3_rank_control(dev_num, if_id); 769 } 770 771 /* 772 * Algorithm Parameters Validation 773 */ 774 int ddr3_tip_validate_algo_var(u32 value, u32 fail_value, char *var_name) 775 { 776 if (value == fail_value) { 777 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 778 ("Error: %s is not initialized (Algo Components Validation)\n", 779 var_name)); 780 return 0; 781 } 782 783 return 1; 784 } 785 786 int ddr3_tip_validate_algo_ptr(void *ptr, void *fail_value, char *ptr_name) 787 { 788 if (ptr == fail_value) { 789 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 790 ("Error: %s is not initialized (Algo Components Validation)\n", 791 ptr_name)); 792 return 0; 793 } 794 795 return 1; 796 } 797 798 int ddr3_tip_validate_algo_components(u8 dev_num) 799 { 800 int status = 1; 801 802 /* Check DGL parameters*/ 803 status &= ddr3_tip_validate_algo_var(ck_delay, PARAM_UNDEFINED, "ck_delay"); 804 status &= ddr3_tip_validate_algo_var(phy_reg3_val, PARAM_UNDEFINED, "phy_reg3_val"); 805 status &= ddr3_tip_validate_algo_var(g_rtt_nom, PARAM_UNDEFINED, "g_rtt_nom"); 806 status &= ddr3_tip_validate_algo_var(g_dic, PARAM_UNDEFINED, "g_dic"); 807 status &= ddr3_tip_validate_algo_var(odt_config, PARAM_UNDEFINED, "odt_config"); 808 status &= ddr3_tip_validate_algo_var(g_zpri_data, PARAM_UNDEFINED, "g_zpri_data"); 809 status &= ddr3_tip_validate_algo_var(g_znri_data, PARAM_UNDEFINED, "g_znri_data"); 810 status &= ddr3_tip_validate_algo_var(g_zpri_ctrl, PARAM_UNDEFINED, "g_zpri_ctrl"); 811 status &= ddr3_tip_validate_algo_var(g_znri_ctrl, PARAM_UNDEFINED, "g_znri_ctrl"); 812 status &= ddr3_tip_validate_algo_var(g_zpodt_data, PARAM_UNDEFINED, "g_zpodt_data"); 813 status &= ddr3_tip_validate_algo_var(g_znodt_data, PARAM_UNDEFINED, "g_znodt_data"); 814 status &= ddr3_tip_validate_algo_var(g_zpodt_ctrl, PARAM_UNDEFINED, "g_zpodt_ctrl"); 815 status &= ddr3_tip_validate_algo_var(g_znodt_ctrl, PARAM_UNDEFINED, "g_znodt_ctrl"); 816 817 /* Check functions pointers */ 818 status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].tip_dunit_mux_select_func, 819 NULL, "tip_dunit_mux_select_func"); 820 status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].mv_ddr_dunit_write, 821 NULL, "mv_ddr_dunit_write"); 822 status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].mv_ddr_dunit_read, 823 NULL, "mv_ddr_dunit_read"); 824 status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].mv_ddr_phy_write, 825 NULL, "mv_ddr_phy_write"); 826 status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].mv_ddr_phy_read, 827 NULL, "mv_ddr_phy_read"); 828 status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].tip_get_freq_config_info_func, 829 NULL, "tip_get_freq_config_info_func"); 830 status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].tip_set_freq_divider_func, 831 NULL, "tip_set_freq_divider_func"); 832 status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].tip_get_clock_ratio, 833 NULL, "tip_get_clock_ratio"); 834 835 status &= ddr3_tip_validate_algo_ptr(dq_map_table, NULL, "dq_map_table"); 836 status &= ddr3_tip_validate_algo_var(dfs_low_freq, 0, "dfs_low_freq"); 837 838 return (status == 1) ? MV_OK : MV_NOT_INITIALIZED; 839 } 840 841 842 int ddr3_pre_algo_config(void) 843 { 844 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 845 846 /* Set Bus3 ECC training mode */ 847 if (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) { 848 /* Set Bus3 ECC MUX */ 849 CHECK_STATUS(ddr3_tip_if_write 850 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, 851 DRAM_PINS_MUX_REG, 0x100, 0x100)); 852 } 853 854 /* Set regular ECC training mode (bus4 and bus 3) */ 855 if ((DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask)) || 856 (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) || 857 (DDR3_IS_ECC_PUP8_MODE(tm->bus_act_mask))) { 858 /* Enable ECC Write MUX */ 859 CHECK_STATUS(ddr3_tip_if_write 860 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, 861 TRAINING_SW_2_REG, 0x100, 0x100)); 862 /* General ECC enable */ 863 CHECK_STATUS(ddr3_tip_if_write 864 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, 865 SDRAM_CFG_REG, 0x40000, 0x40000)); 866 /* Disable Read Data ECC MUX */ 867 CHECK_STATUS(ddr3_tip_if_write 868 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, 869 TRAINING_SW_2_REG, 0x0, 0x2)); 870 } 871 872 return MV_OK; 873 } 874 875 int ddr3_post_algo_config(void) 876 { 877 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 878 int status; 879 880 status = ddr3_post_run_alg(); 881 if (MV_OK != status) { 882 printf("DDR3 Post Run Alg - FAILED 0x%x\n", status); 883 return status; 884 } 885 886 /* Un_set ECC training mode */ 887 if ((DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask)) || 888 (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) || 889 (DDR3_IS_ECC_PUP8_MODE(tm->bus_act_mask))) { 890 /* Disable ECC Write MUX */ 891 CHECK_STATUS(ddr3_tip_if_write 892 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, 893 TRAINING_SW_2_REG, 0x0, 0x100)); 894 /* General ECC and Bus3 ECC MUX remains enabled */ 895 } 896 897 return MV_OK; 898 } 899 900 /* 901 * Run Training Flow 902 */ 903 int hws_ddr3_tip_run_alg(u32 dev_num, enum hws_algo_type algo_type) 904 { 905 int status = MV_OK; 906 907 status = ddr3_pre_algo_config(); 908 if (MV_OK != status) { 909 printf("DDR3 Pre Algo Config - FAILED 0x%x\n", status); 910 return status; 911 } 912 913 #ifdef ODT_TEST_SUPPORT 914 if (finger_test == 1) 915 return odt_test(dev_num, algo_type); 916 #endif 917 918 if (algo_type == ALGO_TYPE_DYNAMIC) { 919 status = ddr3_tip_ddr3_auto_tune(dev_num); 920 } 921 922 if (status != MV_OK) { 923 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 924 ("******** DRAM initialization Failed (res 0x%x) ********\n", 925 status)); 926 return status; 927 } 928 929 status = ddr3_post_algo_config(); 930 if (MV_OK != status) { 931 printf("DDR3 Post Algo Config - FAILED 0x%x\n", status); 932 return status; 933 } 934 935 return status; 936 } 937 938 #ifdef ODT_TEST_SUPPORT 939 /* 940 * ODT Test 941 */ 942 static int odt_test(u32 dev_num, enum hws_algo_type algo_type) 943 { 944 int ret = MV_OK, ret_tune = MV_OK; 945 int pfinger_val = 0, nfinger_val; 946 947 for (pfinger_val = p_finger_start; pfinger_val <= p_finger_end; 948 pfinger_val += p_finger_step) { 949 for (nfinger_val = n_finger_start; nfinger_val <= n_finger_end; 950 nfinger_val += n_finger_step) { 951 if (finger_test != 0) { 952 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 953 ("pfinger_val %d nfinger_val %d\n", 954 pfinger_val, nfinger_val)); 955 /* 956 * TODO: need to check the correctness 957 * of the following two lines. 958 */ 959 g_zpodt_data = pfinger_val; 960 g_znodt_data = nfinger_val; 961 } 962 963 if (algo_type == ALGO_TYPE_DYNAMIC) { 964 ret = ddr3_tip_ddr3_auto_tune(dev_num); 965 } 966 } 967 } 968 969 if (ret_tune != MV_OK) { 970 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 971 ("Run_alg: tuning failed %d\n", ret_tune)); 972 ret = (ret == MV_OK) ? ret_tune : ret; 973 } 974 975 return ret; 976 } 977 #endif 978 979 /* 980 * Select Controller 981 */ 982 int hws_ddr3_tip_select_ddr_controller(u32 dev_num, int enable) 983 { 984 return config_func_info[dev_num]. 985 tip_dunit_mux_select_func((u8)dev_num, enable); 986 } 987 988 /* 989 * Dunit Register Write 990 */ 991 int ddr3_tip_if_write(u32 dev_num, enum hws_access_type interface_access, 992 u32 if_id, u32 reg_addr, u32 data_value, u32 mask) 993 { 994 config_func_info[dev_num].mv_ddr_dunit_write(reg_addr, mask, data_value); 995 996 return MV_OK; 997 } 998 999 /* 1000 * Dunit Register Read 1001 */ 1002 int ddr3_tip_if_read(u32 dev_num, enum hws_access_type interface_access, 1003 u32 if_id, u32 reg_addr, u32 *data, u32 mask) 1004 { 1005 config_func_info[dev_num].mv_ddr_dunit_read(reg_addr, mask, data); 1006 1007 return MV_OK; 1008 } 1009 1010 /* 1011 * Dunit Register Polling 1012 */ 1013 int ddr3_tip_if_polling(u32 dev_num, enum hws_access_type access_type, 1014 u32 if_id, u32 exp_value, u32 mask, u32 offset, 1015 u32 poll_tries) 1016 { 1017 u32 poll_cnt = 0, interface_num = 0, start_if, end_if; 1018 u32 read_data[MAX_INTERFACE_NUM]; 1019 int ret; 1020 int is_fail = 0, is_if_fail; 1021 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1022 1023 if (access_type == ACCESS_TYPE_MULTICAST) { 1024 start_if = 0; 1025 end_if = MAX_INTERFACE_NUM - 1; 1026 } else { 1027 start_if = if_id; 1028 end_if = if_id; 1029 } 1030 1031 for (interface_num = start_if; interface_num <= end_if; interface_num++) { 1032 /* polling bit 3 for n times */ 1033 VALIDATE_IF_ACTIVE(tm->if_act_mask, interface_num); 1034 1035 is_if_fail = 0; 1036 for (poll_cnt = 0; poll_cnt < poll_tries; poll_cnt++) { 1037 ret = 1038 ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, 1039 interface_num, offset, read_data, 1040 mask); 1041 if (ret != MV_OK) 1042 return ret; 1043 1044 if (read_data[interface_num] == exp_value) 1045 break; 1046 } 1047 1048 if (poll_cnt >= poll_tries) { 1049 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1050 ("max poll IF #%d\n", interface_num)); 1051 is_fail = 1; 1052 is_if_fail = 1; 1053 } 1054 1055 training_result[training_stage][interface_num] = 1056 (is_if_fail == 1) ? TEST_FAILED : TEST_SUCCESS; 1057 } 1058 1059 return (is_fail == 0) ? MV_OK : MV_FAIL; 1060 } 1061 1062 /* 1063 * Bus read access 1064 */ 1065 int ddr3_tip_bus_read(u32 dev_num, u32 if_id, 1066 enum hws_access_type phy_access, u32 phy_id, 1067 enum hws_ddr_phy phy_type, u32 reg_addr, u32 *data) 1068 { 1069 return config_func_info[dev_num]. 1070 mv_ddr_phy_read(phy_access, phy_id, phy_type, reg_addr, data); 1071 } 1072 1073 /* 1074 * Bus write access 1075 */ 1076 int ddr3_tip_bus_write(u32 dev_num, enum hws_access_type interface_access, 1077 u32 if_id, enum hws_access_type phy_access, 1078 u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, 1079 u32 data_value) 1080 { 1081 return config_func_info[dev_num]. 1082 mv_ddr_phy_write(phy_access, phy_id, phy_type, reg_addr, data_value, OPERATION_WRITE); 1083 } 1084 1085 1086 /* 1087 * Phy read-modify-write 1088 */ 1089 int ddr3_tip_bus_read_modify_write(u32 dev_num, enum hws_access_type access_type, 1090 u32 interface_id, u32 phy_id, 1091 enum hws_ddr_phy phy_type, u32 reg_addr, 1092 u32 data_value, u32 reg_mask) 1093 { 1094 u32 data_val = 0, if_id, start_if, end_if; 1095 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1096 1097 if (access_type == ACCESS_TYPE_MULTICAST) { 1098 start_if = 0; 1099 end_if = MAX_INTERFACE_NUM - 1; 1100 } else { 1101 start_if = interface_id; 1102 end_if = interface_id; 1103 } 1104 1105 for (if_id = start_if; if_id <= end_if; if_id++) { 1106 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 1107 CHECK_STATUS(ddr3_tip_bus_read 1108 (dev_num, if_id, ACCESS_TYPE_UNICAST, phy_id, 1109 phy_type, reg_addr, &data_val)); 1110 data_value = (data_val & (~reg_mask)) | (data_value & reg_mask); 1111 CHECK_STATUS(ddr3_tip_bus_write 1112 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1113 ACCESS_TYPE_UNICAST, phy_id, phy_type, reg_addr, 1114 data_value)); 1115 } 1116 1117 return MV_OK; 1118 } 1119 1120 /* 1121 * ADLL Calibration 1122 */ 1123 int adll_calibration(u32 dev_num, enum hws_access_type access_type, 1124 u32 if_id, enum mv_ddr_freq frequency) 1125 { 1126 struct hws_tip_freq_config_info freq_config_info; 1127 u32 bus_cnt = 0; 1128 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); 1129 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1130 1131 /* Reset Diver_b assert -> de-assert */ 1132 CHECK_STATUS(ddr3_tip_if_write 1133 (dev_num, access_type, if_id, SDRAM_CFG_REG, 1134 0, 0x10000000)); 1135 mdelay(10); 1136 CHECK_STATUS(ddr3_tip_if_write 1137 (dev_num, access_type, if_id, SDRAM_CFG_REG, 1138 0x10000000, 0x10000000)); 1139 1140 CHECK_STATUS(config_func_info[dev_num]. 1141 tip_get_freq_config_info_func((u8)dev_num, frequency, 1142 &freq_config_info)); 1143 1144 for (bus_cnt = 0; bus_cnt < octets_per_if_num; bus_cnt++) { 1145 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); 1146 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1147 (dev_num, access_type, if_id, bus_cnt, 1148 DDR_PHY_DATA, ADLL_CFG0_PHY_REG, 1149 freq_config_info.bw_per_freq << 8, 0x700)); 1150 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1151 (dev_num, access_type, if_id, bus_cnt, 1152 DDR_PHY_DATA, ADLL_CFG2_PHY_REG, 1153 freq_config_info.rate_per_freq, 0x7)); 1154 } 1155 1156 for (bus_cnt = 0; bus_cnt < DDR_IF_CTRL_SUBPHYS_NUM; bus_cnt++) { 1157 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1158 (dev_num, ACCESS_TYPE_UNICAST, if_id, bus_cnt, 1159 DDR_PHY_CONTROL, ADLL_CFG0_PHY_REG, 1160 freq_config_info.bw_per_freq << 8, 0x700)); 1161 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1162 (dev_num, ACCESS_TYPE_UNICAST, if_id, bus_cnt, 1163 DDR_PHY_CONTROL, ADLL_CFG2_PHY_REG, 1164 freq_config_info.rate_per_freq, 0x7)); 1165 } 1166 1167 /* DUnit to Phy drive post edge, ADLL reset assert de-assert */ 1168 CHECK_STATUS(ddr3_tip_if_write 1169 (dev_num, access_type, if_id, DRAM_PHY_CFG_REG, 1170 0, (0x80000000 | 0x40000000))); 1171 mdelay(100 / (mv_ddr_freq_get(frequency)) / mv_ddr_freq_get(MV_DDR_FREQ_LOW_FREQ)); 1172 CHECK_STATUS(ddr3_tip_if_write 1173 (dev_num, access_type, if_id, DRAM_PHY_CFG_REG, 1174 (0x80000000 | 0x40000000), (0x80000000 | 0x40000000))); 1175 1176 /* polling for ADLL Done */ 1177 if (ddr3_tip_if_polling(dev_num, access_type, if_id, 1178 0x3ff03ff, 0x3ff03ff, PHY_LOCK_STATUS_REG, 1179 MAX_POLLING_ITERATIONS) != MV_OK) { 1180 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1181 ("Freq_set: DDR3 poll failed(1)")); 1182 } 1183 1184 /* pup data_pup reset assert-> deassert */ 1185 CHECK_STATUS(ddr3_tip_if_write 1186 (dev_num, access_type, if_id, SDRAM_CFG_REG, 1187 0, 0x60000000)); 1188 mdelay(10); 1189 CHECK_STATUS(ddr3_tip_if_write 1190 (dev_num, access_type, if_id, SDRAM_CFG_REG, 1191 0x60000000, 0x60000000)); 1192 1193 return MV_OK; 1194 } 1195 1196 int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type access_type, 1197 u32 if_id, enum mv_ddr_freq frequency) 1198 { 1199 u32 cl_value = 0, cwl_value = 0, mem_mask = 0, val = 0, 1200 bus_cnt = 0, t_wr = 0, t_ckclk = 0, 1201 cnt_id; 1202 u32 end_if, start_if; 1203 u32 bus_index = 0; 1204 int is_dll_off = 0; 1205 enum mv_ddr_speed_bin speed_bin_index = 0; 1206 struct hws_tip_freq_config_info freq_config_info; 1207 enum hws_result *flow_result = training_result[training_stage]; 1208 u32 adll_tap = 0; 1209 u32 cs_num; 1210 u32 t2t; 1211 u32 cs_mask[MAX_INTERFACE_NUM]; 1212 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); 1213 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1214 unsigned int tclk; 1215 enum mv_ddr_timing timing = tm->interface_params[if_id].timing; 1216 u32 freq = mv_ddr_freq_get(frequency); 1217 1218 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1219 ("dev %d access %d IF %d freq %d\n", dev_num, 1220 access_type, if_id, frequency)); 1221 1222 if (frequency == MV_DDR_FREQ_LOW_FREQ) 1223 is_dll_off = 1; 1224 if (access_type == ACCESS_TYPE_MULTICAST) { 1225 start_if = 0; 1226 end_if = MAX_INTERFACE_NUM - 1; 1227 } else { 1228 start_if = if_id; 1229 end_if = if_id; 1230 } 1231 1232 /* calculate interface cs mask - Oferb 4/11 */ 1233 /* speed bin can be different for each interface */ 1234 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1235 /* cs enable is active low */ 1236 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 1237 cs_mask[if_id] = CS_BIT_MASK; 1238 training_result[training_stage][if_id] = TEST_SUCCESS; 1239 ddr3_tip_calc_cs_mask(dev_num, if_id, effective_cs, 1240 &cs_mask[if_id]); 1241 } 1242 1243 /* speed bin can be different for each interface */ 1244 /* 1245 * moti b - need to remove the loop for multicas access functions 1246 * and loop the unicast access functions 1247 */ 1248 for (if_id = start_if; if_id <= end_if; if_id++) { 1249 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 1250 1251 flow_result[if_id] = TEST_SUCCESS; 1252 speed_bin_index = 1253 tm->interface_params[if_id].speed_bin_index; 1254 if (tm->interface_params[if_id].memory_freq == 1255 frequency) { 1256 cl_value = 1257 tm->interface_params[if_id].cas_l; 1258 cwl_value = 1259 tm->interface_params[if_id].cas_wl; 1260 } else if (tm->cfg_src == MV_DDR_CFG_SPD) { 1261 tclk = 1000000 / freq; 1262 cl_value = mv_ddr_cl_calc(tm->timing_data[MV_DDR_TAA_MIN], tclk); 1263 if (cl_value == 0) { 1264 printf("mv_ddr: unsupported cas latency value found\n"); 1265 return MV_FAIL; 1266 } 1267 cwl_value = mv_ddr_cwl_calc(tclk); 1268 if (cwl_value == 0) { 1269 printf("mv_ddr: unsupported cas write latency value found\n"); 1270 return MV_FAIL; 1271 } 1272 } else { 1273 cl_value = mv_ddr_cl_val_get(speed_bin_index, frequency); 1274 cwl_value = mv_ddr_cwl_val_get(speed_bin_index, frequency); 1275 } 1276 1277 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1278 ("Freq_set dev 0x%x access 0x%x if 0x%x freq 0x%x speed %d:\n\t", 1279 dev_num, access_type, if_id, 1280 frequency, speed_bin_index)); 1281 1282 for (cnt_id = 0; cnt_id < MV_DDR_FREQ_LAST; cnt_id++) { 1283 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, 1284 ("%d ", mv_ddr_cl_val_get(speed_bin_index, cnt_id))); 1285 } 1286 1287 DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, ("\n")); 1288 mem_mask = 0; 1289 for (bus_index = 0; bus_index < octets_per_if_num; 1290 bus_index++) { 1291 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_index); 1292 mem_mask |= 1293 tm->interface_params[if_id]. 1294 as_bus_params[bus_index].mirror_enable_bitmask; 1295 } 1296 1297 if (mem_mask != 0) { 1298 /* motib redundent in KW28 */ 1299 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1300 if_id, 1301 DUAL_DUNIT_CFG_REG, 0, 0x8)); 1302 } 1303 1304 /* dll state after exiting SR */ 1305 if (is_dll_off == 1) { 1306 CHECK_STATUS(ddr3_tip_if_write 1307 (dev_num, access_type, if_id, 1308 DFS_REG, 0x1, 0x1)); 1309 } else { 1310 CHECK_STATUS(ddr3_tip_if_write 1311 (dev_num, access_type, if_id, 1312 DFS_REG, 0, 0x1)); 1313 } 1314 1315 CHECK_STATUS(ddr3_tip_if_write 1316 (dev_num, access_type, if_id, 1317 DUNIT_MMASK_REG, 0, 0x1)); 1318 /* DFS - block transactions */ 1319 CHECK_STATUS(ddr3_tip_if_write 1320 (dev_num, access_type, if_id, 1321 DFS_REG, 0x2, 0x2)); 1322 1323 /* disable ODT in case of dll off */ 1324 if (is_dll_off == 1) { 1325 CHECK_STATUS(ddr3_tip_if_write 1326 (dev_num, access_type, if_id, 1327 0x1874, 0, 0x244)); 1328 CHECK_STATUS(ddr3_tip_if_write 1329 (dev_num, access_type, if_id, 1330 0x1884, 0, 0x244)); 1331 CHECK_STATUS(ddr3_tip_if_write 1332 (dev_num, access_type, if_id, 1333 0x1894, 0, 0x244)); 1334 CHECK_STATUS(ddr3_tip_if_write 1335 (dev_num, access_type, if_id, 1336 0x18a4, 0, 0x244)); 1337 } 1338 1339 /* DFS - Enter Self-Refresh */ 1340 CHECK_STATUS(ddr3_tip_if_write 1341 (dev_num, access_type, if_id, DFS_REG, 0x4, 1342 0x4)); 1343 /* polling on self refresh entry */ 1344 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, 1345 if_id, 0x8, 0x8, DFS_REG, 1346 MAX_POLLING_ITERATIONS) != MV_OK) { 1347 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1348 ("Freq_set: DDR3 poll failed on SR entry\n")); 1349 } 1350 1351 /* Calculate 2T mode */ 1352 if (mode_2t != 0xff) { 1353 t2t = mode_2t; 1354 } else if (timing != MV_DDR_TIM_DEFAULT) { 1355 t2t = (timing == MV_DDR_TIM_2T) ? 1 : 0; 1356 } else { 1357 /* Calculate number of CS per interface */ 1358 cs_num = mv_ddr_cs_num_get(); 1359 t2t = (cs_num == 1) ? 0 : 1; 1360 } 1361 1362 1363 if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_INTERLEAVE_WA) == 1) { 1364 /* Use 1T mode if 1:1 ratio configured */ 1365 if (config_func_info[dev_num].tip_get_clock_ratio(frequency) == 1) { 1366 /* Low freq*/ 1367 CHECK_STATUS(ddr3_tip_if_write 1368 (dev_num, access_type, if_id, 1369 SDRAM_OPEN_PAGES_CTRL_REG, 0x0, 0x3C0)); 1370 t2t = 0; 1371 } else { 1372 /* Middle or target freq */ 1373 CHECK_STATUS(ddr3_tip_if_write 1374 (dev_num, access_type, if_id, 1375 SDRAM_OPEN_PAGES_CTRL_REG, 0x3C0, 0x3C0)); 1376 } 1377 } 1378 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1379 DUNIT_CTRL_LOW_REG, t2t << 3, 0x3 << 3)); 1380 1381 /* PLL configuration */ 1382 config_func_info[dev_num].tip_set_freq_divider_func(dev_num, if_id, 1383 frequency); 1384 1385 /* DFS - CL/CWL/WR parameters after exiting SR */ 1386 CHECK_STATUS(ddr3_tip_if_write 1387 (dev_num, access_type, if_id, DFS_REG, 1388 (cl_mask_table[cl_value] << 8), 0xf00)); 1389 CHECK_STATUS(ddr3_tip_if_write 1390 (dev_num, access_type, if_id, DFS_REG, 1391 (cwl_mask_table[cwl_value] << 12), 0x7000)); 1392 1393 t_ckclk = (MEGA / freq); 1394 t_wr = time_to_nclk(mv_ddr_speed_bin_timing_get 1395 (speed_bin_index, 1396 SPEED_BIN_TWR), t_ckclk); 1397 1398 CHECK_STATUS(ddr3_tip_if_write 1399 (dev_num, access_type, if_id, DFS_REG, 1400 (twr_mask_table[t_wr] << 16), 0x70000)); 1401 1402 /* Restore original RTT values if returning from DLL OFF mode */ 1403 if (is_dll_off == 1) { 1404 CHECK_STATUS(ddr3_tip_if_write 1405 (dev_num, access_type, if_id, 0x1874, 1406 g_dic | g_rtt_nom, 0x266)); 1407 CHECK_STATUS(ddr3_tip_if_write 1408 (dev_num, access_type, if_id, 0x1884, 1409 g_dic | g_rtt_nom, 0x266)); 1410 CHECK_STATUS(ddr3_tip_if_write 1411 (dev_num, access_type, if_id, 0x1894, 1412 g_dic | g_rtt_nom, 0x266)); 1413 CHECK_STATUS(ddr3_tip_if_write 1414 (dev_num, access_type, if_id, 0x18a4, 1415 g_dic | g_rtt_nom, 0x266)); 1416 } 1417 1418 /* Reset divider_b assert -> de-assert */ 1419 CHECK_STATUS(ddr3_tip_if_write 1420 (dev_num, access_type, if_id, 1421 SDRAM_CFG_REG, 0, 0x10000000)); 1422 mdelay(10); 1423 CHECK_STATUS(ddr3_tip_if_write 1424 (dev_num, access_type, if_id, 1425 SDRAM_CFG_REG, 0x10000000, 0x10000000)); 1426 1427 /* ADLL configuration function of process and frequency */ 1428 CHECK_STATUS(config_func_info[dev_num]. 1429 tip_get_freq_config_info_func(dev_num, frequency, 1430 &freq_config_info)); 1431 1432 /* TBD check milo5 using device ID ? */ 1433 for (bus_cnt = 0; bus_cnt < octets_per_if_num; 1434 bus_cnt++) { 1435 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); 1436 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1437 (dev_num, ACCESS_TYPE_UNICAST, 1438 if_id, bus_cnt, DDR_PHY_DATA, 1439 0x92, 1440 freq_config_info. 1441 bw_per_freq << 8 1442 /*freq_mask[dev_num][frequency] << 8 */ 1443 , 0x700)); 1444 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1445 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1446 bus_cnt, DDR_PHY_DATA, 0x94, 1447 freq_config_info.rate_per_freq, 0x7)); 1448 } 1449 1450 /* Dunit to PHY drive post edge, ADLL reset assert -> de-assert */ 1451 CHECK_STATUS(ddr3_tip_if_write 1452 (dev_num, access_type, if_id, 1453 DRAM_PHY_CFG_REG, 0, 1454 (0x80000000 | 0x40000000))); 1455 mdelay(100 / (freq / mv_ddr_freq_get(MV_DDR_FREQ_LOW_FREQ))); 1456 CHECK_STATUS(ddr3_tip_if_write 1457 (dev_num, access_type, if_id, 1458 DRAM_PHY_CFG_REG, (0x80000000 | 0x40000000), 1459 (0x80000000 | 0x40000000))); 1460 1461 /* polling for ADLL Done */ 1462 if (ddr3_tip_if_polling 1463 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x3ff03ff, 1464 0x3ff03ff, PHY_LOCK_STATUS_REG, 1465 MAX_POLLING_ITERATIONS) != MV_OK) { 1466 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1467 ("Freq_set: DDR3 poll failed(1)\n")); 1468 } 1469 1470 /* pup data_pup reset assert-> deassert */ 1471 CHECK_STATUS(ddr3_tip_if_write 1472 (dev_num, access_type, if_id, 1473 SDRAM_CFG_REG, 0, 0x60000000)); 1474 mdelay(10); 1475 CHECK_STATUS(ddr3_tip_if_write 1476 (dev_num, access_type, if_id, 1477 SDRAM_CFG_REG, 0x60000000, 0x60000000)); 1478 1479 /* Set proper timing params before existing Self-Refresh */ 1480 ddr3_tip_set_timing(dev_num, access_type, if_id, frequency); 1481 if (delay_enable != 0) { 1482 adll_tap = (is_dll_off == 1) ? 1000 : (MEGA / (freq * 64)); 1483 ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap); 1484 } 1485 1486 /* Exit SR */ 1487 CHECK_STATUS(ddr3_tip_if_write 1488 (dev_num, access_type, if_id, DFS_REG, 0, 1489 0x4)); 1490 if (ddr3_tip_if_polling 1491 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x8, DFS_REG, 1492 MAX_POLLING_ITERATIONS) != MV_OK) { 1493 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1494 ("Freq_set: DDR3 poll failed(2)")); 1495 } 1496 1497 /* Refresh Command */ 1498 CHECK_STATUS(ddr3_tip_if_write 1499 (dev_num, access_type, if_id, 1500 SDRAM_OP_REG, 0x2, 0xf1f)); 1501 if (ddr3_tip_if_polling 1502 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f, 1503 SDRAM_OP_REG, MAX_POLLING_ITERATIONS) != MV_OK) { 1504 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1505 ("Freq_set: DDR3 poll failed(3)")); 1506 } 1507 1508 /* Release DFS Block */ 1509 CHECK_STATUS(ddr3_tip_if_write 1510 (dev_num, access_type, if_id, DFS_REG, 0, 1511 0x2)); 1512 /* Controller to MBUS Retry - normal */ 1513 CHECK_STATUS(ddr3_tip_if_write 1514 (dev_num, access_type, if_id, DUNIT_MMASK_REG, 1515 0x1, 0x1)); 1516 1517 /* MRO: Burst Length 8, CL , Auto_precharge 0x16cc */ 1518 val = 1519 ((cl_mask_table[cl_value] & 0x1) << 2) | 1520 ((cl_mask_table[cl_value] & 0xe) << 3); 1521 CHECK_STATUS(ddr3_tip_if_write 1522 (dev_num, access_type, if_id, MR0_REG, 1523 val, (0x7 << 4) | (1 << 2))); 1524 /* MR2: CWL = 10 , Auto Self-Refresh - disable */ 1525 val = (cwl_mask_table[cwl_value] << 3) | g_rtt_wr; 1526 /* 1527 * nklein 24.10.13 - should not be here - leave value as set in 1528 * the init configuration val |= (1 << 9); 1529 * val |= ((tm->interface_params[if_id]. 1530 * interface_temp == MV_DDR_TEMP_HIGH) ? (1 << 7) : 0); 1531 */ 1532 /* nklein 24.10.13 - see above comment */ 1533 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1534 if_id, MR2_REG, 1535 val, (0x7 << 3) | (0x3 << 9))); 1536 1537 /* ODT TIMING */ 1538 val = ((cl_value - cwl_value + 1) << 4) | 1539 ((cl_value - cwl_value + 6) << 8) | 1540 ((cl_value - 1) << 12) | ((cl_value + 6) << 16); 1541 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1542 if_id, DDR_ODT_TIMING_LOW_REG, 1543 val, 0xffff0)); 1544 val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12); 1545 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1546 if_id, DDR_ODT_TIMING_HIGH_REG, 1547 val, 0xffff)); 1548 1549 /* in case of ddr4 need to set the receiver to odt always 'on' (odt_config = '0') 1550 * in case of ddr3 configure the odt through the timing 1551 */ 1552 if (odt_config != 0) { 1553 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, DUNIT_ODT_CTRL_REG, 0xf, 0xf)); 1554 } 1555 else { 1556 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, DUNIT_ODT_CTRL_REG, 1557 0x30f, 0x30f)); 1558 } 1559 1560 /* re-write CL */ 1561 val = ((cl_mask_table[cl_value] & 0x1) << 2) | 1562 ((cl_mask_table[cl_value] & 0xe) << 3); 1563 1564 CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask, MR_CMD0, 1565 val, (0x7 << 4) | (0x1 << 2))); 1566 1567 /* re-write CWL */ 1568 val = (cwl_mask_table[cwl_value] << 3) | g_rtt_wr; 1569 CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask, MR_CMD2, 1570 val, (0x7 << 3) | (0x3 << 9))); 1571 1572 if (mem_mask != 0) { 1573 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1574 if_id, 1575 DUAL_DUNIT_CFG_REG, 1576 1 << 3, 0x8)); 1577 } 1578 } 1579 1580 return MV_OK; 1581 } 1582 1583 /* 1584 * Set ODT values 1585 */ 1586 static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type, 1587 u32 if_id, u32 cl_value, u32 cwl_value) 1588 { 1589 /* ODT TIMING */ 1590 u32 val = (cl_value - cwl_value + 6); 1591 1592 val = ((cl_value - cwl_value + 1) << 4) | ((val & 0xf) << 8) | 1593 (((cl_value - 1) & 0xf) << 12) | 1594 (((cl_value + 6) & 0xf) << 16) | (((val & 0x10) >> 4) << 21); 1595 val |= (((cl_value - 1) >> 4) << 22) | (((cl_value + 6) >> 4) << 23); 1596 1597 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1598 DDR_ODT_TIMING_LOW_REG, val, 0xffff0)); 1599 val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12); 1600 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1601 DDR_ODT_TIMING_HIGH_REG, val, 0xffff)); 1602 if (odt_additional == 1) { 1603 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, 1604 if_id, 1605 SDRAM_ODT_CTRL_HIGH_REG, 1606 0xf, 0xf)); 1607 } 1608 1609 /* ODT Active */ 1610 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1611 DUNIT_ODT_CTRL_REG, 0xf, 0xf)); 1612 1613 return MV_OK; 1614 } 1615 1616 /* 1617 * Set Timing values for training 1618 */ 1619 static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type, 1620 u32 if_id, enum mv_ddr_freq frequency) 1621 { 1622 u32 t_ckclk = 0, t_ras = 0; 1623 u32 t_rcd = 0, t_rp = 0, t_wr = 0, t_wtr = 0, t_rrd = 0, t_rtp = 0, 1624 t_rfc = 0, t_mod = 0, t_r2r = 0x3, t_r2r_high = 0, 1625 t_r2w_w2r = 0x3, t_r2w_w2r_high = 0x1, t_w2w = 0x3; 1626 u32 refresh_interval_cnt, t_hclk, t_refi, t_faw, t_pd, t_xpdll; 1627 u32 val = 0, page_size = 0, mask = 0; 1628 enum mv_ddr_speed_bin speed_bin_index; 1629 enum mv_ddr_die_capacity memory_size = MV_DDR_DIE_CAP_2GBIT; 1630 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1631 u32 freq = mv_ddr_freq_get(frequency); 1632 1633 speed_bin_index = tm->interface_params[if_id].speed_bin_index; 1634 memory_size = tm->interface_params[if_id].memory_size; 1635 page_size = mv_ddr_page_size_get(tm->interface_params[if_id].bus_width, memory_size); 1636 t_ckclk = (MEGA / freq); 1637 /* HCLK in[ps] */ 1638 t_hclk = MEGA / (freq / config_func_info[dev_num].tip_get_clock_ratio(frequency)); 1639 1640 t_refi = (tm->interface_params[if_id].interface_temp == MV_DDR_TEMP_HIGH) ? TREFI_HIGH : TREFI_LOW; 1641 t_refi *= 1000; /* psec */ 1642 refresh_interval_cnt = t_refi / t_hclk; /* no units */ 1643 1644 if (page_size == 1) { 1645 t_faw = mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TFAW1K); 1646 t_faw = time_to_nclk(t_faw, t_ckclk); 1647 t_faw = GET_MAX_VALUE(20, t_faw); 1648 } else { /* page size =2, we do not support page size 0.5k */ 1649 t_faw = mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TFAW2K); 1650 t_faw = time_to_nclk(t_faw, t_ckclk); 1651 t_faw = GET_MAX_VALUE(28, t_faw); 1652 } 1653 1654 t_pd = GET_MAX_VALUE(t_ckclk * 3, mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TPD)); 1655 t_pd = time_to_nclk(t_pd, t_ckclk); 1656 1657 t_xpdll = GET_MAX_VALUE(t_ckclk * 10, mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TXPDLL)); 1658 t_xpdll = time_to_nclk(t_xpdll, t_ckclk); 1659 1660 t_rrd = (page_size == 1) ? mv_ddr_speed_bin_timing_get(speed_bin_index, 1661 SPEED_BIN_TRRD1K) : 1662 mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TRRD2K); 1663 t_rrd = GET_MAX_VALUE(t_ckclk * 4, t_rrd); 1664 t_rtp = GET_MAX_VALUE(t_ckclk * 4, mv_ddr_speed_bin_timing_get(speed_bin_index, 1665 SPEED_BIN_TRTP)); 1666 t_mod = GET_MAX_VALUE(t_ckclk * 12, 15000); 1667 t_wtr = GET_MAX_VALUE(t_ckclk * 4, mv_ddr_speed_bin_timing_get(speed_bin_index, 1668 SPEED_BIN_TWTR)); 1669 t_ras = time_to_nclk(mv_ddr_speed_bin_timing_get(speed_bin_index, 1670 SPEED_BIN_TRAS), 1671 t_ckclk); 1672 t_rcd = time_to_nclk(mv_ddr_speed_bin_timing_get(speed_bin_index, 1673 SPEED_BIN_TRCD), 1674 t_ckclk); 1675 t_rp = time_to_nclk(mv_ddr_speed_bin_timing_get(speed_bin_index, 1676 SPEED_BIN_TRP), 1677 t_ckclk); 1678 t_wr = time_to_nclk(mv_ddr_speed_bin_timing_get(speed_bin_index, 1679 SPEED_BIN_TWR), 1680 t_ckclk); 1681 t_wtr = time_to_nclk(t_wtr, t_ckclk); 1682 t_rrd = time_to_nclk(t_rrd, t_ckclk); 1683 t_rtp = time_to_nclk(t_rtp, t_ckclk); 1684 t_rfc = time_to_nclk(mv_ddr_rfc_get(memory_size) * 1000, t_ckclk); 1685 t_mod = time_to_nclk(t_mod, t_ckclk); 1686 1687 /* SDRAM Timing Low */ 1688 val = (((t_ras - 1) & SDRAM_TIMING_LOW_TRAS_MASK) << SDRAM_TIMING_LOW_TRAS_OFFS) | 1689 (((t_rcd - 1) & SDRAM_TIMING_LOW_TRCD_MASK) << SDRAM_TIMING_LOW_TRCD_OFFS) | 1690 (((t_rcd - 1) >> SDRAM_TIMING_LOW_TRCD_OFFS & SDRAM_TIMING_HIGH_TRCD_MASK) 1691 << SDRAM_TIMING_HIGH_TRCD_OFFS) | 1692 (((t_rp - 1) & SDRAM_TIMING_LOW_TRP_MASK) << SDRAM_TIMING_LOW_TRP_OFFS) | 1693 (((t_rp - 1) >> SDRAM_TIMING_LOW_TRP_MASK & SDRAM_TIMING_HIGH_TRP_MASK) 1694 << SDRAM_TIMING_HIGH_TRP_OFFS) | 1695 (((t_wr - 1) & SDRAM_TIMING_LOW_TWR_MASK) << SDRAM_TIMING_LOW_TWR_OFFS) | 1696 (((t_wtr - 1) & SDRAM_TIMING_LOW_TWTR_MASK) << SDRAM_TIMING_LOW_TWTR_OFFS) | 1697 ((((t_ras - 1) >> 4) & SDRAM_TIMING_LOW_TRAS_HIGH_MASK) << SDRAM_TIMING_LOW_TRAS_HIGH_OFFS) | 1698 (((t_rrd - 1) & SDRAM_TIMING_LOW_TRRD_MASK) << SDRAM_TIMING_LOW_TRRD_OFFS) | 1699 (((t_rtp - 1) & SDRAM_TIMING_LOW_TRTP_MASK) << SDRAM_TIMING_LOW_TRTP_OFFS); 1700 1701 mask = (SDRAM_TIMING_LOW_TRAS_MASK << SDRAM_TIMING_LOW_TRAS_OFFS) | 1702 (SDRAM_TIMING_LOW_TRCD_MASK << SDRAM_TIMING_LOW_TRCD_OFFS) | 1703 (SDRAM_TIMING_HIGH_TRCD_MASK << SDRAM_TIMING_HIGH_TRCD_OFFS) | 1704 (SDRAM_TIMING_LOW_TRP_MASK << SDRAM_TIMING_LOW_TRP_OFFS) | 1705 (SDRAM_TIMING_HIGH_TRP_MASK << SDRAM_TIMING_HIGH_TRP_OFFS) | 1706 (SDRAM_TIMING_LOW_TWR_MASK << SDRAM_TIMING_LOW_TWR_OFFS) | 1707 (SDRAM_TIMING_LOW_TWTR_MASK << SDRAM_TIMING_LOW_TWTR_OFFS) | 1708 (SDRAM_TIMING_LOW_TRAS_HIGH_MASK << SDRAM_TIMING_LOW_TRAS_HIGH_OFFS) | 1709 (SDRAM_TIMING_LOW_TRRD_MASK << SDRAM_TIMING_LOW_TRRD_OFFS) | 1710 (SDRAM_TIMING_LOW_TRTP_MASK << SDRAM_TIMING_LOW_TRTP_OFFS); 1711 1712 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1713 SDRAM_TIMING_LOW_REG, val, mask)); 1714 1715 /* SDRAM Timing High */ 1716 val = 0; 1717 mask = 0; 1718 1719 val = (((t_rfc - 1) & SDRAM_TIMING_HIGH_TRFC_MASK) << SDRAM_TIMING_HIGH_TRFC_OFFS) | 1720 ((t_r2r & SDRAM_TIMING_HIGH_TR2R_MASK) << SDRAM_TIMING_HIGH_TR2R_OFFS) | 1721 ((t_r2w_w2r & SDRAM_TIMING_HIGH_TR2W_W2R_MASK) << SDRAM_TIMING_HIGH_TR2W_W2R_OFFS) | 1722 ((t_w2w & SDRAM_TIMING_HIGH_TW2W_MASK) << SDRAM_TIMING_HIGH_TW2W_OFFS) | 1723 ((((t_rfc - 1) >> 7) & SDRAM_TIMING_HIGH_TRFC_HIGH_MASK) << SDRAM_TIMING_HIGH_TRFC_HIGH_OFFS) | 1724 ((t_r2r_high & SDRAM_TIMING_HIGH_TR2R_HIGH_MASK) << SDRAM_TIMING_HIGH_TR2R_HIGH_OFFS) | 1725 ((t_r2w_w2r_high & SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_MASK) << SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_OFFS) | 1726 (((t_mod - 1) & SDRAM_TIMING_HIGH_TMOD_MASK) << SDRAM_TIMING_HIGH_TMOD_OFFS) | 1727 ((((t_mod - 1) >> 4) & SDRAM_TIMING_HIGH_TMOD_HIGH_MASK) << SDRAM_TIMING_HIGH_TMOD_HIGH_OFFS); 1728 1729 mask = (SDRAM_TIMING_HIGH_TRFC_MASK << SDRAM_TIMING_HIGH_TRFC_OFFS) | 1730 (SDRAM_TIMING_HIGH_TR2R_MASK << SDRAM_TIMING_HIGH_TR2R_OFFS) | 1731 (SDRAM_TIMING_HIGH_TR2W_W2R_MASK << SDRAM_TIMING_HIGH_TR2W_W2R_OFFS) | 1732 (SDRAM_TIMING_HIGH_TW2W_MASK << SDRAM_TIMING_HIGH_TW2W_OFFS) | 1733 (SDRAM_TIMING_HIGH_TRFC_HIGH_MASK << SDRAM_TIMING_HIGH_TRFC_HIGH_OFFS) | 1734 (SDRAM_TIMING_HIGH_TR2R_HIGH_MASK << SDRAM_TIMING_HIGH_TR2R_HIGH_OFFS) | 1735 (SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_MASK << SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_OFFS) | 1736 (SDRAM_TIMING_HIGH_TMOD_MASK << SDRAM_TIMING_HIGH_TMOD_OFFS) | 1737 (SDRAM_TIMING_HIGH_TMOD_HIGH_MASK << SDRAM_TIMING_HIGH_TMOD_HIGH_OFFS); 1738 1739 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1740 SDRAM_TIMING_HIGH_REG, val, mask)); 1741 1742 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1743 SDRAM_CFG_REG, 1744 refresh_interval_cnt << REFRESH_OFFS, 1745 REFRESH_MASK << REFRESH_OFFS)); 1746 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, 1747 SDRAM_ADDR_CTRL_REG, (t_faw - 1) << T_FAW_OFFS, 1748 T_FAW_MASK << T_FAW_OFFS)); 1749 1750 CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, DDR_TIMING_REG, 1751 (t_pd - 1) << DDR_TIMING_TPD_OFFS | 1752 (t_xpdll - 1) << DDR_TIMING_TXPDLL_OFFS, 1753 DDR_TIMING_TPD_MASK << DDR_TIMING_TPD_OFFS | 1754 DDR_TIMING_TXPDLL_MASK << DDR_TIMING_TXPDLL_OFFS)); 1755 1756 1757 return MV_OK; 1758 } 1759 1760 1761 /* 1762 * Write CS Result 1763 */ 1764 int ddr3_tip_write_cs_result(u32 dev_num, u32 offset) 1765 { 1766 u32 if_id, bus_num, cs_bitmask, data_val, cs_num; 1767 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); 1768 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1769 1770 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1771 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 1772 for (bus_num = 0; bus_num < octets_per_if_num; 1773 bus_num++) { 1774 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_num); 1775 cs_bitmask = 1776 tm->interface_params[if_id]. 1777 as_bus_params[bus_num].cs_bitmask; 1778 if (cs_bitmask != effective_cs) { 1779 cs_num = GET_CS_FROM_MASK(cs_bitmask); 1780 ddr3_tip_bus_read(dev_num, if_id, 1781 ACCESS_TYPE_UNICAST, bus_num, 1782 DDR_PHY_DATA, 1783 offset + 1784 (effective_cs * 0x4), 1785 &data_val); 1786 ddr3_tip_bus_write(dev_num, 1787 ACCESS_TYPE_UNICAST, 1788 if_id, 1789 ACCESS_TYPE_UNICAST, 1790 bus_num, DDR_PHY_DATA, 1791 offset + 1792 (cs_num * 0x4), 1793 data_val); 1794 } 1795 } 1796 } 1797 1798 return MV_OK; 1799 } 1800 1801 /* 1802 * Write MRS 1803 */ 1804 int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, enum mr_number mr_num, u32 data, u32 mask) 1805 { 1806 u32 if_id; 1807 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1808 1809 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1810 PARAM_NOT_CARE, mr_data[mr_num].reg_addr, data, mask)); 1811 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1812 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 1813 CHECK_STATUS(ddr3_tip_if_write 1814 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1815 SDRAM_OP_REG, 1816 (cs_mask_arr[if_id] << 8) | mr_data[mr_num].cmd, 0xf1f)); 1817 } 1818 1819 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1820 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 1821 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 1822 0x1f, SDRAM_OP_REG, 1823 MAX_POLLING_ITERATIONS) != MV_OK) { 1824 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1825 ("write_mrs_cmd: Poll cmd fail")); 1826 } 1827 } 1828 1829 return MV_OK; 1830 } 1831 1832 /* 1833 * Reset XSB Read FIFO 1834 */ 1835 int ddr3_tip_reset_fifo_ptr(u32 dev_num) 1836 { 1837 u32 if_id = 0; 1838 1839 /* Configure PHY reset value to 0 in order to "clean" the FIFO */ 1840 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1841 if_id, 0x15c8, 0, 0xff000000)); 1842 /* 1843 * Move PHY to RL mode (only in RL mode the PHY overrides FIFO values 1844 * during FIFO reset) 1845 */ 1846 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1847 if_id, TRAINING_SW_2_REG, 1848 0x1, 0x9)); 1849 /* In order that above configuration will influence the PHY */ 1850 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1851 if_id, 0x15b0, 1852 0x80000000, 0x80000000)); 1853 /* Reset read fifo assertion */ 1854 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1855 if_id, 0x1400, 0, 0x40000000)); 1856 /* Reset read fifo deassertion */ 1857 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1858 if_id, 0x1400, 1859 0x40000000, 0x40000000)); 1860 /* Move PHY back to functional mode */ 1861 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1862 if_id, TRAINING_SW_2_REG, 1863 0x8, 0x9)); 1864 /* Stop training machine */ 1865 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1866 if_id, 0x15b4, 0x10000, 0x10000)); 1867 1868 return MV_OK; 1869 } 1870 1871 /* 1872 * Reset Phy registers 1873 */ 1874 int ddr3_tip_ddr3_reset_phy_regs(u32 dev_num) 1875 { 1876 u32 if_id, phy_id, cs; 1877 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); 1878 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1879 1880 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1881 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 1882 for (phy_id = 0; phy_id < octets_per_if_num; 1883 phy_id++) { 1884 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id); 1885 CHECK_STATUS(ddr3_tip_bus_write 1886 (dev_num, ACCESS_TYPE_UNICAST, 1887 if_id, ACCESS_TYPE_UNICAST, 1888 phy_id, DDR_PHY_DATA, 1889 WL_PHY_REG(effective_cs), 1890 phy_reg0_val)); 1891 CHECK_STATUS(ddr3_tip_bus_write 1892 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1893 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1894 RL_PHY_REG(effective_cs), 1895 phy_reg2_val)); 1896 CHECK_STATUS(ddr3_tip_bus_write 1897 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1898 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1899 CRX_PHY_REG(effective_cs), phy_reg3_val)); 1900 CHECK_STATUS(ddr3_tip_bus_write 1901 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1902 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1903 CTX_PHY_REG(effective_cs), phy_reg1_val)); 1904 CHECK_STATUS(ddr3_tip_bus_write 1905 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1906 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1907 PBS_TX_BCAST_PHY_REG(effective_cs), 0x0)); 1908 CHECK_STATUS(ddr3_tip_bus_write 1909 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1910 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1911 PBS_RX_BCAST_PHY_REG(effective_cs), 0)); 1912 CHECK_STATUS(ddr3_tip_bus_write 1913 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1914 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1915 PBS_TX_PHY_REG(effective_cs, DQSP_PAD), 0)); 1916 CHECK_STATUS(ddr3_tip_bus_write 1917 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1918 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1919 PBS_RX_PHY_REG(effective_cs, DQSP_PAD), 0)); 1920 CHECK_STATUS(ddr3_tip_bus_write 1921 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1922 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1923 PBS_TX_PHY_REG(effective_cs, DQSN_PAD), 0)); 1924 CHECK_STATUS(ddr3_tip_bus_write 1925 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1926 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1927 PBS_RX_PHY_REG(effective_cs, DQSN_PAD), 0)); 1928 } 1929 } 1930 1931 /* Set Receiver Calibration value */ 1932 for (cs = 0; cs < MAX_CS_NUM; cs++) { 1933 /* PHY register 0xdb bits[5:0] - configure to 63 */ 1934 CHECK_STATUS(ddr3_tip_bus_write 1935 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1936 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1937 DDR_PHY_DATA, VREF_BCAST_PHY_REG(cs), 63)); 1938 } 1939 1940 return MV_OK; 1941 } 1942 1943 /* 1944 * Restore Dunit registers 1945 */ 1946 int ddr3_tip_restore_dunit_regs(u32 dev_num) 1947 { 1948 u32 index_cnt; 1949 1950 mv_ddr_set_calib_controller(); 1951 1952 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1953 PARAM_NOT_CARE, MAIN_PADS_CAL_MACH_CTRL_REG, 1954 0x1, 0x1)); 1955 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1956 PARAM_NOT_CARE, MAIN_PADS_CAL_MACH_CTRL_REG, 1957 calibration_update_control << 3, 1958 0x3 << 3)); 1959 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 1960 PARAM_NOT_CARE, 1961 ODPG_WR_RD_MODE_ENA_REG, 1962 0xffff, MASK_ALL_BITS)); 1963 1964 for (index_cnt = 0; index_cnt < ARRAY_SIZE(odpg_default_value); 1965 index_cnt++) { 1966 CHECK_STATUS(ddr3_tip_if_write 1967 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1968 odpg_default_value[index_cnt].reg_addr, 1969 odpg_default_value[index_cnt].reg_data, 1970 odpg_default_value[index_cnt].reg_mask)); 1971 } 1972 1973 return MV_OK; 1974 } 1975 1976 int ddr3_tip_adll_regs_bypass(u32 dev_num, u32 reg_val1, u32 reg_val2) 1977 { 1978 u32 if_id, phy_id; 1979 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); 1980 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1981 1982 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1983 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 1984 for (phy_id = 0; phy_id < octets_per_if_num; phy_id++) { 1985 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id); 1986 CHECK_STATUS(ddr3_tip_bus_write 1987 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1988 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1989 CTX_PHY_REG(effective_cs), reg_val1)); 1990 CHECK_STATUS(ddr3_tip_bus_write 1991 (dev_num, ACCESS_TYPE_UNICAST, if_id, 1992 ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, 1993 PBS_TX_BCAST_PHY_REG(effective_cs), reg_val2)); 1994 } 1995 } 1996 1997 return MV_OK; 1998 } 1999 2000 /* 2001 * Auto tune main flow 2002 */ 2003 static int ddr3_tip_ddr3_training_main_flow(u32 dev_num) 2004 { 2005 /* TODO: enable this functionality for other platforms */ 2006 #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X) 2007 struct init_cntr_param init_cntr_prm; 2008 #endif 2009 int ret = MV_OK; 2010 int adll_bypass_flag = 0; 2011 u32 if_id; 2012 unsigned int max_cs = mv_ddr_cs_num_get(); 2013 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2014 enum mv_ddr_freq freq = tm->interface_params[0].memory_freq; 2015 unsigned int *freq_tbl = mv_ddr_freq_tbl_get(); 2016 2017 #ifdef DDR_VIEWER_TOOL 2018 if (debug_training == DEBUG_LEVEL_TRACE) { 2019 CHECK_STATUS(print_device_info((u8)dev_num)); 2020 } 2021 #endif 2022 2023 ddr3_tip_validate_algo_components(dev_num); 2024 2025 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2026 CHECK_STATUS(ddr3_tip_ddr3_reset_phy_regs(dev_num)); 2027 } 2028 /* Set to 0 after each loop to avoid illegal value may be used */ 2029 effective_cs = 0; 2030 2031 freq_tbl[MV_DDR_FREQ_LOW_FREQ] = dfs_low_freq; 2032 2033 if (is_pll_before_init != 0) { 2034 for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { 2035 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 2036 config_func_info[dev_num].tip_set_freq_divider_func( 2037 (u8)dev_num, if_id, freq); 2038 } 2039 } 2040 2041 /* TODO: enable this functionality for other platforms */ 2042 #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X) 2043 if (is_adll_calib_before_init != 0) { 2044 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2045 ("with adll calib before init\n")); 2046 adll_calibration(dev_num, ACCESS_TYPE_MULTICAST, 0, freq); 2047 } 2048 2049 if (is_reg_dump != 0) { 2050 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2051 ("Dump before init controller\n")); 2052 ddr3_tip_reg_dump(dev_num); 2053 } 2054 2055 if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) { 2056 training_stage = INIT_CONTROLLER; 2057 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2058 ("INIT_CONTROLLER_MASK_BIT\n")); 2059 init_cntr_prm.do_mrs_phy = 1; 2060 init_cntr_prm.is_ctrl64_bit = 0; 2061 init_cntr_prm.init_phy = 1; 2062 init_cntr_prm.msys_init = 0; 2063 ret = hws_ddr3_tip_init_controller(dev_num, &init_cntr_prm); 2064 if (is_reg_dump != 0) 2065 ddr3_tip_reg_dump(dev_num); 2066 if (ret != MV_OK) { 2067 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2068 ("hws_ddr3_tip_init_controller failure\n")); 2069 if (debug_mode == 0) 2070 return MV_FAIL; 2071 } 2072 } 2073 #endif 2074 2075 ret = adll_calibration(dev_num, ACCESS_TYPE_MULTICAST, 0, freq); 2076 if (ret != MV_OK) { 2077 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2078 ("adll_calibration failure\n")); 2079 if (debug_mode == 0) 2080 return MV_FAIL; 2081 } 2082 2083 if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) { 2084 training_stage = SET_LOW_FREQ; 2085 2086 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2087 ddr3_tip_adll_regs_bypass(dev_num, 0, 0x1f); 2088 adll_bypass_flag = 1; 2089 } 2090 effective_cs = 0; 2091 2092 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2093 ("SET_LOW_FREQ_MASK_BIT %d\n", 2094 freq_tbl[low_freq])); 2095 ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2096 PARAM_NOT_CARE, low_freq); 2097 if (is_reg_dump != 0) 2098 ddr3_tip_reg_dump(dev_num); 2099 if (ret != MV_OK) { 2100 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2101 ("ddr3_tip_freq_set failure\n")); 2102 if (debug_mode == 0) 2103 return MV_FAIL; 2104 } 2105 } 2106 2107 if (mask_tune_func & WRITE_LEVELING_LF_MASK_BIT) { 2108 training_stage = WRITE_LEVELING_LF; 2109 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2110 ("WRITE_LEVELING_LF_MASK_BIT\n")); 2111 ret = ddr3_tip_dynamic_write_leveling(dev_num, 1); 2112 if (is_reg_dump != 0) 2113 ddr3_tip_reg_dump(dev_num); 2114 if (ret != MV_OK) { 2115 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2116 ("ddr3_tip_dynamic_write_leveling LF failure\n")); 2117 if (debug_mode == 0) 2118 return MV_FAIL; 2119 } 2120 } 2121 2122 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2123 if (mask_tune_func & LOAD_PATTERN_MASK_BIT) { 2124 training_stage = LOAD_PATTERN; 2125 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2126 ("LOAD_PATTERN_MASK_BIT #%d\n", 2127 effective_cs)); 2128 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2129 if (is_reg_dump != 0) 2130 ddr3_tip_reg_dump(dev_num); 2131 if (ret != MV_OK) { 2132 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2133 ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n", 2134 effective_cs)); 2135 if (debug_mode == 0) 2136 return MV_FAIL; 2137 } 2138 } 2139 } 2140 2141 if (adll_bypass_flag == 1) { 2142 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2143 ddr3_tip_adll_regs_bypass(dev_num, phy_reg1_val, 0); 2144 adll_bypass_flag = 0; 2145 } 2146 } 2147 2148 /* Set to 0 after each loop to avoid illegal value may be used */ 2149 effective_cs = 0; 2150 2151 if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) { 2152 training_stage = SET_MEDIUM_FREQ; 2153 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2154 ("SET_MEDIUM_FREQ_MASK_BIT %d\n", 2155 freq_tbl[medium_freq])); 2156 ret = 2157 ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2158 PARAM_NOT_CARE, medium_freq); 2159 if (is_reg_dump != 0) 2160 ddr3_tip_reg_dump(dev_num); 2161 if (ret != MV_OK) { 2162 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2163 ("ddr3_tip_freq_set failure\n")); 2164 if (debug_mode == 0) 2165 return MV_FAIL; 2166 } 2167 } 2168 2169 if (mask_tune_func & WRITE_LEVELING_MASK_BIT) { 2170 training_stage = WRITE_LEVELING; 2171 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2172 ("WRITE_LEVELING_MASK_BIT\n")); 2173 if ((rl_mid_freq_wa == 0) || (freq_tbl[medium_freq] == 533)) { 2174 ret = ddr3_tip_dynamic_write_leveling(dev_num, 0); 2175 } else { 2176 /* Use old WL */ 2177 ret = ddr3_tip_legacy_dynamic_write_leveling(dev_num); 2178 } 2179 2180 if (is_reg_dump != 0) 2181 ddr3_tip_reg_dump(dev_num); 2182 if (ret != MV_OK) { 2183 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2184 ("ddr3_tip_dynamic_write_leveling failure\n")); 2185 if (debug_mode == 0) 2186 return MV_FAIL; 2187 } 2188 } 2189 2190 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2191 if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) { 2192 training_stage = LOAD_PATTERN_2; 2193 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2194 ("LOAD_PATTERN_2_MASK_BIT CS #%d\n", 2195 effective_cs)); 2196 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2197 if (is_reg_dump != 0) 2198 ddr3_tip_reg_dump(dev_num); 2199 if (ret != MV_OK) { 2200 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2201 ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n", 2202 effective_cs)); 2203 if (debug_mode == 0) 2204 return MV_FAIL; 2205 } 2206 } 2207 } 2208 /* Set to 0 after each loop to avoid illegal value may be used */ 2209 effective_cs = 0; 2210 2211 if (mask_tune_func & READ_LEVELING_MASK_BIT) { 2212 training_stage = READ_LEVELING; 2213 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2214 ("READ_LEVELING_MASK_BIT\n")); 2215 if ((rl_mid_freq_wa == 0) || (freq_tbl[medium_freq] == 533)) { 2216 ret = ddr3_tip_dynamic_read_leveling(dev_num, medium_freq); 2217 } else { 2218 /* Use old RL */ 2219 ret = ddr3_tip_legacy_dynamic_read_leveling(dev_num); 2220 } 2221 2222 if (is_reg_dump != 0) 2223 ddr3_tip_reg_dump(dev_num); 2224 if (ret != MV_OK) { 2225 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2226 ("ddr3_tip_dynamic_read_leveling failure\n")); 2227 if (debug_mode == 0) 2228 return MV_FAIL; 2229 } 2230 } 2231 2232 if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) { 2233 training_stage = WRITE_LEVELING_SUPP; 2234 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2235 ("WRITE_LEVELING_SUPP_MASK_BIT\n")); 2236 ret = ddr3_tip_dynamic_write_leveling_supp(dev_num); 2237 if (is_reg_dump != 0) 2238 ddr3_tip_reg_dump(dev_num); 2239 if (ret != MV_OK) { 2240 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2241 ("ddr3_tip_dynamic_write_leveling_supp failure\n")); 2242 if (debug_mode == 0) 2243 return MV_FAIL; 2244 } 2245 } 2246 2247 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2248 if (mask_tune_func & PBS_RX_MASK_BIT) { 2249 training_stage = PBS_RX; 2250 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2251 ("PBS_RX_MASK_BIT CS #%d\n", 2252 effective_cs)); 2253 ret = ddr3_tip_pbs_rx(dev_num); 2254 if (is_reg_dump != 0) 2255 ddr3_tip_reg_dump(dev_num); 2256 if (ret != MV_OK) { 2257 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2258 ("ddr3_tip_pbs_rx failure CS #%d\n", 2259 effective_cs)); 2260 if (debug_mode == 0) 2261 return MV_FAIL; 2262 } 2263 } 2264 } 2265 2266 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2267 if (mask_tune_func & PBS_TX_MASK_BIT) { 2268 training_stage = PBS_TX; 2269 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2270 ("PBS_TX_MASK_BIT CS #%d\n", 2271 effective_cs)); 2272 ret = ddr3_tip_pbs_tx(dev_num); 2273 if (is_reg_dump != 0) 2274 ddr3_tip_reg_dump(dev_num); 2275 if (ret != MV_OK) { 2276 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2277 ("ddr3_tip_pbs_tx failure CS #%d\n", 2278 effective_cs)); 2279 if (debug_mode == 0) 2280 return MV_FAIL; 2281 } 2282 } 2283 } 2284 /* Set to 0 after each loop to avoid illegal value may be used */ 2285 effective_cs = 0; 2286 2287 if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) { 2288 training_stage = SET_TARGET_FREQ; 2289 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2290 ("SET_TARGET_FREQ_MASK_BIT %d\n", 2291 freq_tbl[tm-> 2292 interface_params[first_active_if]. 2293 memory_freq])); 2294 ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, 2295 PARAM_NOT_CARE, 2296 tm->interface_params[first_active_if]. 2297 memory_freq); 2298 if (is_reg_dump != 0) 2299 ddr3_tip_reg_dump(dev_num); 2300 if (ret != MV_OK) { 2301 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2302 ("ddr3_tip_freq_set failure\n")); 2303 if (debug_mode == 0) 2304 return MV_FAIL; 2305 } 2306 } 2307 2308 if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) { 2309 training_stage = WRITE_LEVELING_TF; 2310 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2311 ("WRITE_LEVELING_TF_MASK_BIT\n")); 2312 ret = ddr3_tip_dynamic_write_leveling(dev_num, 0); 2313 if (is_reg_dump != 0) 2314 ddr3_tip_reg_dump(dev_num); 2315 if (ret != MV_OK) { 2316 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2317 ("ddr3_tip_dynamic_write_leveling TF failure\n")); 2318 if (debug_mode == 0) 2319 return MV_FAIL; 2320 } 2321 } 2322 2323 if (mask_tune_func & LOAD_PATTERN_HIGH_MASK_BIT) { 2324 training_stage = LOAD_PATTERN_HIGH; 2325 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("LOAD_PATTERN_HIGH\n")); 2326 ret = ddr3_tip_load_all_pattern_to_mem(dev_num); 2327 if (is_reg_dump != 0) 2328 ddr3_tip_reg_dump(dev_num); 2329 if (ret != MV_OK) { 2330 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2331 ("ddr3_tip_load_all_pattern_to_mem failure\n")); 2332 if (debug_mode == 0) 2333 return MV_FAIL; 2334 } 2335 } 2336 2337 if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) { 2338 training_stage = READ_LEVELING_TF; 2339 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2340 ("READ_LEVELING_TF_MASK_BIT\n")); 2341 ret = ddr3_tip_dynamic_read_leveling(dev_num, tm-> 2342 interface_params[first_active_if]. 2343 memory_freq); 2344 if (is_reg_dump != 0) 2345 ddr3_tip_reg_dump(dev_num); 2346 if (ret != MV_OK) { 2347 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2348 ("ddr3_tip_dynamic_read_leveling TF failure\n")); 2349 if (debug_mode == 0) 2350 return MV_FAIL; 2351 } 2352 } 2353 2354 if (mask_tune_func & RL_DQS_BURST_MASK_BIT) { 2355 training_stage = READ_LEVELING_TF; 2356 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2357 ("RL_DQS_BURST_MASK_BIT\n")); 2358 ret = mv_ddr_rl_dqs_burst(0, 0, tm->interface_params[0].memory_freq); 2359 if (is_reg_dump != 0) 2360 ddr3_tip_reg_dump(dev_num); 2361 if (ret != MV_OK) { 2362 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2363 ("mv_ddr_rl_dqs_burst TF failure\n")); 2364 if (debug_mode == 0) 2365 return MV_FAIL; 2366 } 2367 } 2368 2369 if (mask_tune_func & DM_PBS_TX_MASK_BIT) { 2370 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("DM_PBS_TX_MASK_BIT\n")); 2371 } 2372 2373 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2374 if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) { 2375 training_stage = VREF_CALIBRATION; 2376 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("VREF\n")); 2377 ret = ddr3_tip_vref(dev_num); 2378 if (is_reg_dump != 0) { 2379 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2380 ("VREF Dump\n")); 2381 ddr3_tip_reg_dump(dev_num); 2382 } 2383 if (ret != MV_OK) { 2384 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2385 ("ddr3_tip_vref failure\n")); 2386 if (debug_mode == 0) 2387 return MV_FAIL; 2388 } 2389 } 2390 } 2391 /* Set to 0 after each loop to avoid illegal value may be used */ 2392 effective_cs = 0; 2393 2394 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2395 if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) { 2396 training_stage = CENTRALIZATION_RX; 2397 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2398 ("CENTRALIZATION_RX_MASK_BIT CS #%d\n", 2399 effective_cs)); 2400 ret = ddr3_tip_centralization_rx(dev_num); 2401 if (is_reg_dump != 0) 2402 ddr3_tip_reg_dump(dev_num); 2403 if (ret != MV_OK) { 2404 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2405 ("ddr3_tip_centralization_rx failure CS #%d\n", 2406 effective_cs)); 2407 if (debug_mode == 0) 2408 return MV_FAIL; 2409 } 2410 } 2411 } 2412 /* Set to 0 after each loop to avoid illegal value may be used */ 2413 effective_cs = 0; 2414 2415 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2416 if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) { 2417 training_stage = WRITE_LEVELING_SUPP_TF; 2418 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2419 ("WRITE_LEVELING_SUPP_TF_MASK_BIT CS #%d\n", 2420 effective_cs)); 2421 ret = ddr3_tip_dynamic_write_leveling_supp(dev_num); 2422 if (is_reg_dump != 0) 2423 ddr3_tip_reg_dump(dev_num); 2424 if (ret != MV_OK) { 2425 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2426 ("ddr3_tip_dynamic_write_leveling_supp TF failure CS #%d\n", 2427 effective_cs)); 2428 if (debug_mode == 0) 2429 return MV_FAIL; 2430 } 2431 } 2432 } 2433 /* Set to 0 after each loop to avoid illegal value may be used */ 2434 effective_cs = 0; 2435 2436 2437 for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { 2438 if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) { 2439 training_stage = CENTRALIZATION_TX; 2440 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2441 ("CENTRALIZATION_TX_MASK_BIT CS #%d\n", 2442 effective_cs)); 2443 ret = ddr3_tip_centralization_tx(dev_num); 2444 if (is_reg_dump != 0) 2445 ddr3_tip_reg_dump(dev_num); 2446 if (ret != MV_OK) { 2447 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2448 ("ddr3_tip_centralization_tx failure CS #%d\n", 2449 effective_cs)); 2450 if (debug_mode == 0) 2451 return MV_FAIL; 2452 } 2453 } 2454 } 2455 /* Set to 0 after each loop to avoid illegal value may be used */ 2456 effective_cs = 0; 2457 2458 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("restore registers to default\n")); 2459 /* restore register values */ 2460 CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num)); 2461 2462 if (is_reg_dump != 0) 2463 ddr3_tip_reg_dump(dev_num); 2464 2465 return MV_OK; 2466 } 2467 2468 /* 2469 * DDR3 Dynamic training flow 2470 */ 2471 static int ddr3_tip_ddr3_auto_tune(u32 dev_num) 2472 { 2473 int status; 2474 u32 if_id, stage; 2475 int is_if_fail = 0, is_auto_tune_fail = 0; 2476 2477 training_stage = INIT_CONTROLLER; 2478 2479 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2480 for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) 2481 training_result[stage][if_id] = NO_TEST_DONE; 2482 } 2483 2484 status = ddr3_tip_ddr3_training_main_flow(dev_num); 2485 2486 /* activate XSB test */ 2487 if (xsb_validate_type != 0) { 2488 run_xsb_test(dev_num, xsb_validation_base_address, 1, 1, 2489 0x1024); 2490 } 2491 2492 if (is_reg_dump != 0) 2493 ddr3_tip_reg_dump(dev_num); 2494 2495 /* print log */ 2496 CHECK_STATUS(ddr3_tip_print_log(dev_num, window_mem_addr)); 2497 2498 #ifndef EXCLUDE_DEBUG_PRINTS 2499 if (status != MV_OK) { 2500 CHECK_STATUS(ddr3_tip_print_stability_log(dev_num)); 2501 } 2502 #endif /* EXCLUDE_DEBUG_PRINTS */ 2503 2504 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2505 is_if_fail = 0; 2506 for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) { 2507 if (training_result[stage][if_id] == TEST_FAILED) 2508 is_if_fail = 1; 2509 } 2510 if (is_if_fail == 1) { 2511 is_auto_tune_fail = 1; 2512 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2513 ("Auto Tune failed for IF %d\n", 2514 if_id)); 2515 } 2516 } 2517 2518 if (((status == MV_FAIL) && (is_auto_tune_fail == 0)) || 2519 ((status == MV_OK) && (is_auto_tune_fail == 1))) { 2520 /* 2521 * If MainFlow result and trainingResult DB not in sync, 2522 * issue warning (caused by no update of trainingResult DB 2523 * when failed) 2524 */ 2525 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 2526 ("Warning: Algorithm return value and Result DB" 2527 "are not synced (status 0x%x result DB %d)\n", 2528 status, is_auto_tune_fail)); 2529 } 2530 2531 if ((status != MV_OK) || (is_auto_tune_fail == 1)) 2532 return MV_FAIL; 2533 else 2534 return MV_OK; 2535 } 2536 2537 /* 2538 * Enable init sequence 2539 */ 2540 int ddr3_tip_enable_init_sequence(u32 dev_num) 2541 { 2542 int is_fail = 0; 2543 u32 if_id = 0, mem_mask = 0, bus_index = 0; 2544 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); 2545 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2546 2547 /* Enable init sequence */ 2548 CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 0, 2549 SDRAM_INIT_CTRL_REG, 0x1, 0x1)); 2550 2551 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 2552 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 2553 2554 if (ddr3_tip_if_polling 2555 (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1, 2556 SDRAM_INIT_CTRL_REG, 2557 MAX_POLLING_ITERATIONS) != MV_OK) { 2558 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2559 ("polling failed IF %d\n", 2560 if_id)); 2561 is_fail = 1; 2562 continue; 2563 } 2564 2565 mem_mask = 0; 2566 for (bus_index = 0; bus_index < octets_per_if_num; 2567 bus_index++) { 2568 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_index); 2569 mem_mask |= 2570 tm->interface_params[if_id]. 2571 as_bus_params[bus_index].mirror_enable_bitmask; 2572 } 2573 2574 if (mem_mask != 0) { 2575 /* Disable Multi CS */ 2576 CHECK_STATUS(ddr3_tip_if_write 2577 (dev_num, ACCESS_TYPE_MULTICAST, 2578 if_id, DUAL_DUNIT_CFG_REG, 1 << 3, 2579 1 << 3)); 2580 } 2581 } 2582 2583 return (is_fail == 0) ? MV_OK : MV_FAIL; 2584 } 2585 2586 int ddr3_tip_register_dq_table(u32 dev_num, u32 *table) 2587 { 2588 dq_map_table = table; 2589 2590 return MV_OK; 2591 } 2592 2593 /* 2594 * Check if pup search is locked 2595 */ 2596 int ddr3_tip_is_pup_lock(u32 *pup_buf, enum hws_training_result read_mode) 2597 { 2598 u32 bit_start = 0, bit_end = 0, bit_id; 2599 2600 if (read_mode == RESULT_PER_BIT) { 2601 bit_start = 0; 2602 bit_end = BUS_WIDTH_IN_BITS - 1; 2603 } else { 2604 bit_start = 0; 2605 bit_end = 0; 2606 } 2607 2608 for (bit_id = bit_start; bit_id <= bit_end; bit_id++) { 2609 if (GET_LOCK_RESULT(pup_buf[bit_id]) == 0) 2610 return 0; 2611 } 2612 2613 return 1; 2614 } 2615 2616 /* 2617 * Get minimum buffer value 2618 */ 2619 u8 ddr3_tip_get_buf_min(u8 *buf_ptr) 2620 { 2621 u8 min_val = 0xff; 2622 u8 cnt = 0; 2623 2624 for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) { 2625 if (buf_ptr[cnt] < min_val) 2626 min_val = buf_ptr[cnt]; 2627 } 2628 2629 return min_val; 2630 } 2631 2632 /* 2633 * Get maximum buffer value 2634 */ 2635 u8 ddr3_tip_get_buf_max(u8 *buf_ptr) 2636 { 2637 u8 max_val = 0; 2638 u8 cnt = 0; 2639 2640 for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) { 2641 if (buf_ptr[cnt] > max_val) 2642 max_val = buf_ptr[cnt]; 2643 } 2644 2645 return max_val; 2646 } 2647 2648 /* 2649 * The following functions return memory parameters: 2650 * bus and device width, device size 2651 */ 2652 2653 u32 hws_ddr3_get_bus_width(void) 2654 { 2655 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2656 2657 return (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == 2658 1) ? 16 : 32; 2659 } 2660 2661 u32 hws_ddr3_get_device_width(u32 if_id) 2662 { 2663 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2664 2665 return (tm->interface_params[if_id].bus_width == 2666 MV_DDR_DEV_WIDTH_8BIT) ? 8 : 16; 2667 } 2668 2669 u32 hws_ddr3_get_device_size(u32 if_id) 2670 { 2671 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2672 2673 if (tm->interface_params[if_id].memory_size >= 2674 MV_DDR_DIE_CAP_LAST) { 2675 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2676 ("Error: Wrong device size of Cs: %d", 2677 tm->interface_params[if_id].memory_size)); 2678 return 0; 2679 } else { 2680 return 1 << tm->interface_params[if_id].memory_size; 2681 } 2682 } 2683 2684 int hws_ddr3_calc_mem_cs_size(u32 if_id, u32 cs, u32 *cs_size) 2685 { 2686 u32 cs_mem_size, dev_size; 2687 2688 dev_size = hws_ddr3_get_device_size(if_id); 2689 if (dev_size != 0) { 2690 cs_mem_size = ((hws_ddr3_get_bus_width() / 2691 hws_ddr3_get_device_width(if_id)) * dev_size); 2692 2693 /* the calculated result in Gbytex16 to avoid float using */ 2694 2695 if (cs_mem_size == 2) { 2696 *cs_size = _128M; 2697 } else if (cs_mem_size == 4) { 2698 *cs_size = _256M; 2699 } else if (cs_mem_size == 8) { 2700 *cs_size = _512M; 2701 } else if (cs_mem_size == 16) { 2702 *cs_size = _1G; 2703 } else if (cs_mem_size == 32) { 2704 *cs_size = _2G; 2705 } else { 2706 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2707 ("Error: Wrong Memory size of Cs: %d", cs)); 2708 return MV_FAIL; 2709 } 2710 return MV_OK; 2711 } else { 2712 return MV_FAIL; 2713 } 2714 } 2715 2716 int hws_ddr3_cs_base_adr_calc(u32 if_id, u32 cs, u32 *cs_base_addr) 2717 { 2718 u32 cs_mem_size = 0; 2719 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE 2720 u32 physical_mem_size; 2721 u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE; 2722 #endif 2723 2724 if (hws_ddr3_calc_mem_cs_size(if_id, cs, &cs_mem_size) != MV_OK) 2725 return MV_FAIL; 2726 2727 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE 2728 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2729 /* 2730 * if number of address pins doesn't allow to use max mem size that 2731 * is defined in topology mem size is defined by 2732 * DEVICE_MAX_DRAM_ADDRESS_SIZE 2733 */ 2734 physical_mem_size = mem_size[tm->interface_params[0].memory_size]; 2735 2736 if (hws_ddr3_get_device_width(cs) == 16) { 2737 /* 2738 * 16bit mem device can be twice more - no need in less 2739 * significant pin 2740 */ 2741 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2; 2742 } 2743 2744 if (physical_mem_size > max_mem_size) { 2745 cs_mem_size = max_mem_size * 2746 (hws_ddr3_get_bus_width() / 2747 hws_ddr3_get_device_width(if_id)); 2748 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 2749 ("Updated Physical Mem size is from 0x%x to %x\n", 2750 physical_mem_size, 2751 DEVICE_MAX_DRAM_ADDRESS_SIZE)); 2752 } 2753 #endif 2754 2755 /* calculate CS base addr */ 2756 *cs_base_addr = ((cs_mem_size) * cs) & 0xffff0000; 2757 2758 return MV_OK; 2759 } 2760 2761 /* TODO: consider to move to misl phy driver */ 2762 enum { 2763 MISL_PHY_DRV_OHM_30 = 0xf, 2764 MISL_PHY_DRV_OHM_48 = 0xa, 2765 MISL_PHY_DRV_OHM_80 = 0x6, 2766 MISL_PHY_DRV_OHM_120 = 0x4 2767 }; 2768 2769 enum { 2770 MISL_PHY_ODT_OHM_60 = 0x8, 2771 MISL_PHY_ODT_OHM_80 = 0x6, 2772 MISL_PHY_ODT_OHM_120 = 0x4, 2773 MISL_PHY_ODT_OHM_240 = 0x2 2774 }; 2775 2776 static unsigned int mv_ddr_misl_phy_drv_calc(unsigned int cfg) 2777 { 2778 unsigned int val; 2779 2780 switch (cfg) { 2781 case MV_DDR_OHM_30: 2782 val = MISL_PHY_DRV_OHM_30; 2783 break; 2784 case MV_DDR_OHM_48: 2785 val = MISL_PHY_DRV_OHM_48; 2786 break; 2787 case MV_DDR_OHM_80: 2788 val = MISL_PHY_DRV_OHM_80; 2789 break; 2790 case MV_DDR_OHM_120: 2791 val = MISL_PHY_DRV_OHM_120; 2792 break; 2793 default: 2794 val = PARAM_UNDEFINED; 2795 } 2796 2797 return val; 2798 } 2799 2800 static unsigned int mv_ddr_misl_phy_odt_calc(unsigned int cfg) 2801 { 2802 unsigned int val; 2803 2804 switch (cfg) { 2805 case MV_DDR_OHM_60: 2806 val = MISL_PHY_ODT_OHM_60; 2807 break; 2808 case MV_DDR_OHM_80: 2809 val = MISL_PHY_ODT_OHM_80; 2810 break; 2811 case MV_DDR_OHM_120: 2812 val = MISL_PHY_ODT_OHM_120; 2813 break; 2814 case MV_DDR_OHM_240: 2815 val = MISL_PHY_ODT_OHM_240; 2816 break; 2817 default: 2818 val = PARAM_UNDEFINED; 2819 } 2820 2821 return val; 2822 } 2823 2824 unsigned int mv_ddr_misl_phy_drv_data_p_get(void) 2825 { 2826 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2827 unsigned int drv_data_p = mv_ddr_misl_phy_drv_calc(tm->edata.phy_edata.drv_data_p); 2828 2829 if (drv_data_p == PARAM_UNDEFINED) 2830 printf("error: %s: unsupported drv_data_p parameter found\n", __func__); 2831 2832 return drv_data_p; 2833 } 2834 2835 unsigned int mv_ddr_misl_phy_drv_data_n_get(void) 2836 { 2837 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2838 unsigned int drv_data_n = mv_ddr_misl_phy_drv_calc(tm->edata.phy_edata.drv_data_n); 2839 2840 if (drv_data_n == PARAM_UNDEFINED) 2841 printf("error: %s: unsupported drv_data_n parameter found\n", __func__); 2842 2843 return drv_data_n; 2844 } 2845 2846 unsigned int mv_ddr_misl_phy_drv_ctrl_p_get(void) 2847 { 2848 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2849 unsigned int drv_ctrl_p = mv_ddr_misl_phy_drv_calc(tm->edata.phy_edata.drv_ctrl_p); 2850 2851 if (drv_ctrl_p == PARAM_UNDEFINED) 2852 printf("error: %s: unsupported drv_ctrl_p parameter found\n", __func__); 2853 2854 return drv_ctrl_p; 2855 } 2856 2857 unsigned int mv_ddr_misl_phy_drv_ctrl_n_get(void) 2858 { 2859 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2860 unsigned int drv_ctrl_n = mv_ddr_misl_phy_drv_calc(tm->edata.phy_edata.drv_ctrl_n); 2861 2862 if (drv_ctrl_n == PARAM_UNDEFINED) 2863 printf("error: %s: unsupported drv_ctrl_n parameter found\n", __func__); 2864 2865 return drv_ctrl_n; 2866 } 2867 2868 unsigned int mv_ddr_misl_phy_odt_p_get(void) 2869 { 2870 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2871 unsigned int cs_num = mv_ddr_cs_num_get(); 2872 unsigned int odt_p = PARAM_UNDEFINED; 2873 2874 if (cs_num > 0 && cs_num <= MAX_CS_NUM) 2875 odt_p = mv_ddr_misl_phy_odt_calc(tm->edata.phy_edata.odt_p[cs_num - 1]); 2876 2877 if (odt_p == PARAM_UNDEFINED) 2878 printf("error: %s: unsupported odt_p parameter found\n", __func__); 2879 2880 return odt_p; 2881 } 2882 2883 unsigned int mv_ddr_misl_phy_odt_n_get(void) 2884 { 2885 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 2886 unsigned int cs_num = mv_ddr_cs_num_get(); 2887 unsigned int odt_n = PARAM_UNDEFINED; 2888 2889 if (cs_num > 0 && cs_num <= MAX_CS_NUM) 2890 odt_n = mv_ddr_misl_phy_odt_calc(tm->edata.phy_edata.odt_n[cs_num - 1]); 2891 2892 if (odt_n == PARAM_UNDEFINED) 2893 printf("error: %s: unsupported odt_n parameter found\n", __func__); 2894 2895 return odt_n; 2896 } 2897