1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Marvell International Ltd. and its affiliates 4 */ 5 6 #include "ddr3_init.h" 7 8 static u32 bist_offset = 32; 9 enum hws_pattern sweep_pattern = PATTERN_KILLER_DQ0; 10 11 static int ddr3_tip_bist_operation(u32 dev_num, 12 enum hws_access_type access_type, 13 u32 if_id, 14 enum hws_bist_operation oper_type); 15 16 /* 17 * BIST activate 18 */ 19 int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern, 20 enum hws_access_type access_type, u32 if_num, 21 enum hws_dir dir, 22 enum hws_stress_jump addr_stress_jump, 23 enum hws_pattern_duration duration, 24 enum hws_bist_operation oper_type, 25 u32 offset, u32 cs_num, u32 pattern_addr_length) 26 { 27 u32 tx_burst_size; 28 u32 delay_between_burst; 29 u32 rd_mode; 30 struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); 31 32 /* odpg bist write enable */ 33 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 34 (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), 35 (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); 36 37 /* odpg bist read enable/disable */ 38 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 39 (dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) : 40 (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), 41 (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); 42 43 ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset); 44 45 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_length, MASK_ALL_BITS); 46 tx_burst_size = (dir == OPER_WRITE) ? 47 pattern_table[pattern].tx_burst_size : 0; 48 delay_between_burst = (dir == OPER_WRITE) ? 2 : 0; 49 rd_mode = (dir == OPER_WRITE) ? 1 : 0; 50 ddr3_tip_configure_odpg(0, access_type, 0, dir, 51 pattern_table[pattern].num_of_phases_tx, tx_burst_size, 52 pattern_table[pattern].num_of_phases_rx, 53 delay_between_burst, 54 rd_mode, cs_num, addr_stress_jump, duration); 55 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_OFFS_REG, offset, MASK_ALL_BITS); 56 57 if (oper_type == BIST_STOP) { 58 ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP); 59 } else { 60 ddr3_tip_bist_operation(0, access_type, 0, BIST_START); 61 if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK) 62 return MV_FAIL; 63 ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP); 64 } 65 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); 66 67 return MV_OK; 68 } 69 70 /* 71 * BIST read result 72 */ 73 int ddr3_tip_bist_read_result(u32 dev_num, u32 if_id, 74 struct bist_result *pst_bist_result) 75 { 76 int ret; 77 u32 read_data[MAX_INTERFACE_NUM]; 78 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 79 80 if (IS_IF_ACTIVE(tm->if_act_mask, if_id) == 0) 81 return MV_NOT_SUPPORTED; 82 DEBUG_TRAINING_BIST_ENGINE(DEBUG_LEVEL_TRACE, 83 ("ddr3_tip_bist_read_result if_id %d\n", 84 if_id)); 85 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 86 ODPG_DATA_RX_WORD_ERR_DATA_HIGH_REG, read_data, 87 MASK_ALL_BITS); 88 if (ret != MV_OK) 89 return ret; 90 pst_bist_result->bist_fail_high = read_data[if_id]; 91 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 92 ODPG_DATA_RX_WORD_ERR_DATA_LOW_REG, read_data, 93 MASK_ALL_BITS); 94 if (ret != MV_OK) 95 return ret; 96 pst_bist_result->bist_fail_low = read_data[if_id]; 97 98 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 99 ODPG_DATA_RX_WORD_ERR_ADDR_REG, read_data, 100 MASK_ALL_BITS); 101 if (ret != MV_OK) 102 return ret; 103 pst_bist_result->bist_last_fail_addr = read_data[if_id]; 104 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 105 ODPG_DATA_RX_WORD_ERR_CNTR_REG, read_data, 106 MASK_ALL_BITS); 107 if (ret != MV_OK) 108 return ret; 109 pst_bist_result->bist_error_cnt = read_data[if_id]; 110 111 return MV_OK; 112 } 113 114 /* 115 * BIST flow - Activate & read result 116 */ 117 int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result, 118 u32 cs_num) 119 { 120 int ret; 121 u32 i = 0; 122 u32 win_base; 123 struct bist_result st_bist_result; 124 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 125 126 for (i = 0; i < MAX_INTERFACE_NUM; i++) { 127 VALIDATE_IF_ACTIVE(tm->if_act_mask, i); 128 hws_ddr3_cs_base_adr_calc(i, cs_num, &win_base); 129 ret = ddr3_tip_bist_activate(dev_num, pattern, 130 ACCESS_TYPE_UNICAST, 131 i, OPER_WRITE, STRESS_NONE, 132 DURATION_SINGLE, BIST_START, 133 bist_offset + win_base, 134 cs_num, 15); 135 if (ret != MV_OK) { 136 printf("ddr3_tip_bist_activate failed (0x%x)\n", ret); 137 return ret; 138 } 139 140 ret = ddr3_tip_bist_activate(dev_num, pattern, 141 ACCESS_TYPE_UNICAST, 142 i, OPER_READ, STRESS_NONE, 143 DURATION_SINGLE, BIST_START, 144 bist_offset + win_base, 145 cs_num, 15); 146 if (ret != MV_OK) { 147 printf("ddr3_tip_bist_activate failed (0x%x)\n", ret); 148 return ret; 149 } 150 151 ret = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result); 152 if (ret != MV_OK) { 153 printf("ddr3_tip_bist_read_result failed\n"); 154 return ret; 155 } 156 result[i] = st_bist_result.bist_error_cnt; 157 } 158 159 return MV_OK; 160 } 161 162 /* 163 * Set BIST Operation 164 */ 165 166 static int ddr3_tip_bist_operation(u32 dev_num, 167 enum hws_access_type access_type, 168 u32 if_id, enum hws_bist_operation oper_type) 169 { 170 if (oper_type == BIST_STOP) 171 mv_ddr_odpg_disable(); 172 else 173 mv_ddr_odpg_enable(); 174 175 return MV_OK; 176 } 177 178 /* 179 * Print BIST result 180 */ 181 void ddr3_tip_print_bist_res(void) 182 { 183 u32 dev_num = 0; 184 u32 i; 185 struct bist_result st_bist_result[MAX_INTERFACE_NUM]; 186 int res; 187 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 188 189 for (i = 0; i < MAX_INTERFACE_NUM; i++) { 190 VALIDATE_IF_ACTIVE(tm->if_act_mask, i); 191 192 res = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result[i]); 193 if (res != MV_OK) { 194 DEBUG_TRAINING_BIST_ENGINE( 195 DEBUG_LEVEL_ERROR, 196 ("ddr3_tip_bist_read_result failed\n")); 197 return; 198 } 199 } 200 201 DEBUG_TRAINING_BIST_ENGINE( 202 DEBUG_LEVEL_INFO, 203 ("interface | error_cnt | fail_low | fail_high | fail_addr\n")); 204 205 for (i = 0; i < MAX_INTERFACE_NUM; i++) { 206 VALIDATE_IF_ACTIVE(tm->if_act_mask, i); 207 208 DEBUG_TRAINING_BIST_ENGINE( 209 DEBUG_LEVEL_INFO, 210 ("%d | 0x%08x | 0x%08x | 0x%08x | 0x%08x\n", 211 i, st_bist_result[i].bist_error_cnt, 212 st_bist_result[i].bist_fail_low, 213 st_bist_result[i].bist_fail_high, 214 st_bist_result[i].bist_last_fail_addr)); 215 } 216 } 217 218 enum { 219 PASS, 220 FAIL 221 }; 222 #define TIP_ITERATION_NUM 31 223 static int mv_ddr_tip_bist(enum hws_dir dir, u32 val, enum hws_pattern pattern, u32 cs, u32 *result) 224 { 225 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 226 enum hws_training_ip_stat training_result; 227 u16 *reg_map = ddr3_tip_get_mask_results_pup_reg_map(); 228 u32 max_subphy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); 229 u32 subphy, read_data; 230 231 ddr3_tip_ip_training(0, ACCESS_TYPE_MULTICAST, 0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 232 RESULT_PER_BYTE, HWS_CONTROL_ELEMENT_ADLL, HWS_LOW2HIGH, dir, tm->if_act_mask, val, 233 TIP_ITERATION_NUM, pattern, EDGE_FP, CS_SINGLE, cs, &training_result); 234 235 for (subphy = 0; subphy < max_subphy; subphy++) { 236 ddr3_tip_if_read(0, ACCESS_TYPE_UNICAST, 0, reg_map[subphy], &read_data, MASK_ALL_BITS); 237 if (((read_data >> BLOCK_STATUS_OFFS) & BLOCK_STATUS_MASK) == BLOCK_STATUS_NOT_LOCKED) 238 *result |= (FAIL << subphy); 239 } 240 241 return MV_OK; 242 } 243 244 struct interval { 245 u8 *vector; 246 u8 lendpnt; /* interval's left endpoint */ 247 u8 rendpnt; /* interval's right endpoint */ 248 u8 size; /* interval's size */ 249 u8 lmarker; /* left marker */ 250 u8 rmarker; /* right marker */ 251 u8 pass_lendpnt; /* left endpoint of internal pass interval */ 252 u8 pass_rendpnt; /* right endpoint of internal pass interval */ 253 }; 254 255 static int interval_init(u8 *vector, u8 lendpnt, u8 rendpnt, 256 u8 lmarker, u8 rmarker, struct interval *intrvl) 257 { 258 if (intrvl == NULL) { 259 printf("%s: NULL intrvl pointer found\n", __func__); 260 return MV_FAIL; 261 } 262 263 if (vector == NULL) { 264 printf("%s: NULL vector pointer found\n", __func__); 265 return MV_FAIL; 266 } 267 intrvl->vector = vector; 268 269 if (lendpnt >= rendpnt) { 270 printf("%s: incorrect lendpnt and/or rendpnt parameters found\n", __func__); 271 return MV_FAIL; 272 } 273 intrvl->lendpnt = lendpnt; 274 intrvl->rendpnt = rendpnt; 275 intrvl->size = rendpnt - lendpnt + 1; 276 277 if ((lmarker < lendpnt) || (lmarker > rendpnt)) { 278 printf("%s: incorrect lmarker parameter found\n", __func__); 279 return MV_FAIL; 280 } 281 intrvl->lmarker = lmarker; 282 283 if ((rmarker < lmarker) || (rmarker > (intrvl->rendpnt + intrvl->size))) { 284 printf("%s: incorrect rmarker parameter found\n", __func__); 285 return MV_FAIL; 286 } 287 intrvl->rmarker = rmarker; 288 289 return MV_OK; 290 } 291 static int interval_set(u8 pass_lendpnt, u8 pass_rendpnt, struct interval *intrvl) 292 { 293 if (intrvl == NULL) { 294 printf("%s: NULL intrvl pointer found\n", __func__); 295 return MV_FAIL; 296 } 297 298 intrvl->pass_lendpnt = pass_lendpnt; 299 intrvl->pass_rendpnt = pass_rendpnt; 300 301 return MV_OK; 302 } 303 304 static int interval_proc(struct interval *intrvl) 305 { 306 int curr; 307 int pass_lendpnt, pass_rendpnt; 308 int lmt; 309 int fcnt = 0, pcnt = 0; 310 311 if (intrvl == NULL) { 312 printf("%s: NULL intrvl pointer found\n", __func__); 313 return MV_FAIL; 314 } 315 316 /* count fails and passes */ 317 curr = intrvl->lendpnt; 318 while (curr <= intrvl->rendpnt) { 319 if (intrvl->vector[curr] == PASS) 320 pcnt++; 321 else 322 fcnt++; 323 curr++; 324 } 325 326 /* check for all fail */ 327 if (fcnt == intrvl->size) { 328 printf("%s: no pass found\n", __func__); 329 return MV_FAIL; 330 } 331 332 /* check for all pass */ 333 if (pcnt == intrvl->size) { 334 if (interval_set(intrvl->lendpnt, intrvl->rendpnt, intrvl) != MV_OK) 335 return MV_FAIL; 336 return MV_OK; 337 } 338 339 /* proceed with rmarker */ 340 curr = intrvl->rmarker; 341 if (intrvl->vector[curr % intrvl->size] == PASS) { /* pass at rmarker */ 342 /* search for fail on right */ 343 if (intrvl->rmarker > intrvl->rendpnt) 344 lmt = intrvl->rendpnt + intrvl->size; 345 else 346 lmt = intrvl->rmarker + intrvl->size - 1; 347 while ((curr <= lmt) && 348 (intrvl->vector[curr % intrvl->size] == PASS)) 349 curr++; 350 if (curr > lmt) { /* fail not found */ 351 printf("%s: rmarker: fail following pass not found\n", __func__); 352 return MV_FAIL; 353 } 354 /* fail found */ 355 pass_rendpnt = curr - 1; 356 } else { /* fail at rmarker */ 357 /* search for pass on left */ 358 if (intrvl->rmarker > intrvl->rendpnt) 359 lmt = intrvl->rmarker - intrvl->size + 1; 360 else 361 lmt = intrvl->lendpnt; 362 while ((curr >= lmt) && 363 (intrvl->vector[curr % intrvl->size] == FAIL)) 364 curr--; 365 if (curr < lmt) { /* pass not found */ 366 printf("%s: rmarker: pass preceding fail not found\n", __func__); 367 return MV_FAIL; 368 } 369 /* pass found */ 370 pass_rendpnt = curr; 371 } 372 373 /* search for fail on left */ 374 curr = pass_rendpnt; 375 if (pass_rendpnt > intrvl->rendpnt) 376 lmt = pass_rendpnt - intrvl->size + 1; 377 else 378 lmt = intrvl->lendpnt; 379 while ((curr >= lmt) && 380 (intrvl->vector[curr % intrvl->size] == PASS)) 381 curr--; 382 if (curr < lmt) { /* fail not found */ 383 printf("%s: rmarker: fail preceding pass not found\n", __func__); 384 return MV_FAIL; 385 } 386 /* fail found */ 387 pass_lendpnt = curr + 1; 388 if (interval_set(pass_lendpnt, pass_rendpnt, intrvl) != MV_OK) 389 return MV_FAIL; 390 391 return MV_OK; 392 } 393 394 #define ADLL_TAPS_PER_PERIOD 64 395 int mv_ddr_dm_to_dq_diff_get(u8 vw_sphy_hi_lmt, u8 vw_sphy_lo_lmt, u8 *vw_vector, 396 int *vw_sphy_hi_diff, int *vw_sphy_lo_diff) 397 { 398 struct interval intrvl; 399 400 /* init interval structure */ 401 if (interval_init(vw_vector, 0, ADLL_TAPS_PER_PERIOD - 1, 402 vw_sphy_lo_lmt, vw_sphy_hi_lmt, &intrvl) != MV_OK) 403 return MV_FAIL; 404 405 /* find pass sub-interval */ 406 if (interval_proc(&intrvl) != MV_OK) 407 return MV_FAIL; 408 409 /* check for all pass */ 410 if ((intrvl.pass_rendpnt == intrvl.rendpnt) && 411 (intrvl.pass_lendpnt == intrvl.lendpnt)) { 412 printf("%s: no fail found\n", __func__); 413 return MV_FAIL; 414 } 415 416 *vw_sphy_hi_diff = intrvl.pass_rendpnt - vw_sphy_hi_lmt; 417 *vw_sphy_lo_diff = vw_sphy_lo_lmt - intrvl.pass_lendpnt; 418 419 return MV_OK; 420 } 421 422 static int mv_ddr_bist_tx(enum hws_access_type access_type) 423 { 424 mv_ddr_odpg_done_clr(); 425 426 ddr3_tip_bist_operation(0, access_type, 0, BIST_START); 427 428 if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK) 429 return MV_FAIL; 430 431 ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP); 432 433 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); 434 435 return MV_OK; 436 } 437 438 /* prepare odpg for bist operation */ 439 #define WR_OP_ODPG_DATA_CMD_BURST_DLY 2 440 static int mv_ddr_odpg_bist_prepare(enum hws_pattern pattern, enum hws_access_type access_type, 441 enum hws_dir dir, enum hws_stress_jump stress_jump_addr, 442 enum hws_pattern_duration duration, u32 offset, u32 cs, 443 u32 pattern_addr_len, enum dm_direction dm_dir) 444 { 445 struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); 446 u32 tx_burst_size; 447 u32 burst_delay; 448 u32 rd_mode; 449 450 /* odpg bist write enable */ 451 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 452 (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), 453 (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); 454 455 /* odpg bist read enable/disable */ 456 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 457 (dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) : 458 (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), 459 (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); 460 461 if (pattern == PATTERN_00 || pattern == PATTERN_FF) 462 ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset); 463 else 464 mv_ddr_load_dm_pattern_to_odpg(access_type, pattern, dm_dir); 465 466 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_len, MASK_ALL_BITS); 467 if (dir == OPER_WRITE) { 468 tx_burst_size = pattern_table[pattern].tx_burst_size; 469 burst_delay = WR_OP_ODPG_DATA_CMD_BURST_DLY; 470 rd_mode = ODPG_MODE_TX; 471 } else { 472 tx_burst_size = 0; 473 burst_delay = 0; 474 rd_mode = ODPG_MODE_RX; 475 } 476 ddr3_tip_configure_odpg(0, access_type, 0, dir, pattern_table[pattern].num_of_phases_tx, 477 tx_burst_size, pattern_table[pattern].num_of_phases_rx, burst_delay, 478 rd_mode, cs, stress_jump_addr, duration); 479 480 return MV_OK; 481 } 482 483 #define BYTES_PER_BURST_64BIT 0x20 484 #define BYTES_PER_BURST_32BIT 0x10 485 int mv_ddr_dm_vw_get(enum hws_pattern pattern, u32 cs, u8 *vw_vector) 486 { 487 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 488 struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); 489 u32 adll_tap; 490 u32 wr_ctrl_adll[MAX_BUS_NUM] = {0}; 491 u32 rd_ctrl_adll[MAX_BUS_NUM] = {0}; 492 u32 subphy; 493 u32 subphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); 494 u32 odpg_addr = 0x0; 495 u32 result; 496 u32 idx; 497 /* burst length in bytes */ 498 u32 burst_len = (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask) ? 499 BYTES_PER_BURST_64BIT : BYTES_PER_BURST_32BIT); 500 501 /* save dqs values to restore after algorithm's run */ 502 ddr3_tip_read_adll_value(0, wr_ctrl_adll, CTX_PHY_REG(cs), MASK_ALL_BITS); 503 ddr3_tip_read_adll_value(0, rd_ctrl_adll, CRX_PHY_REG(cs), MASK_ALL_BITS); 504 505 /* fill memory with base pattern */ 506 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); 507 mv_ddr_odpg_bist_prepare(pattern, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE, 508 bist_offset, cs, pattern_table[pattern].num_of_phases_tx, 509 (pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE); 510 511 for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) { 512 /* change target odpg address */ 513 odpg_addr = adll_tap * burst_len; 514 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG, 515 odpg_addr, MASK_ALL_BITS); 516 517 ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE, 518 pattern_table[pattern].num_of_phases_tx, 519 pattern_table[pattern].tx_burst_size, 520 pattern_table[pattern].num_of_phases_rx, 521 WR_OP_ODPG_DATA_CMD_BURST_DLY, 522 ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE); 523 524 /* odpg bist write enable */ 525 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 526 (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), 527 (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); 528 529 /* odpg bist read disable */ 530 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 531 (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), 532 (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); 533 534 /* trigger odpg */ 535 mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST); 536 } 537 538 /* fill memory with vref pattern to increment addr using odpg bist */ 539 mv_ddr_odpg_bist_prepare(PATTERN_VREF, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE, 540 bist_offset, cs, pattern_table[pattern].num_of_phases_tx, 541 (pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE); 542 543 for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) { 544 ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_MULTICAST, 0, 545 DDR_PHY_DATA, CTX_PHY_REG(cs), adll_tap); 546 /* change target odpg address */ 547 odpg_addr = adll_tap * burst_len; 548 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG, 549 odpg_addr, MASK_ALL_BITS); 550 ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE, 551 pattern_table[pattern].num_of_phases_tx, 552 pattern_table[pattern].tx_burst_size, 553 pattern_table[pattern].num_of_phases_rx, 554 WR_OP_ODPG_DATA_CMD_BURST_DLY, 555 ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE); 556 557 /* odpg bist write enable */ 558 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 559 (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), 560 (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); 561 562 /* odpg bist read disable */ 563 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 564 (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), 565 (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); 566 567 /* trigger odpg */ 568 mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST); 569 } 570 571 /* restore subphy's tx adll_tap to its position */ 572 for (subphy = 0; subphy < subphy_max; subphy++) { 573 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy); 574 ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST, 575 subphy, DDR_PHY_DATA, CTX_PHY_REG(cs), 576 wr_ctrl_adll[subphy]); 577 } 578 579 /* read and validate bist (comparing with the base pattern) */ 580 for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) { 581 result = 0; 582 odpg_addr = adll_tap * burst_len; 583 /* change addr to fit write */ 584 mv_ddr_pattern_start_addr_set(pattern_table, pattern, odpg_addr); 585 mv_ddr_tip_bist(OPER_READ, 0, pattern, 0, &result); 586 for (subphy = 0; subphy < subphy_max; subphy++) { 587 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy); 588 idx = ADLL_TAPS_PER_PERIOD * subphy + adll_tap; 589 vw_vector[idx] |= ((result >> subphy) & 0x1); 590 } 591 } 592 593 /* restore subphy's rx adll_tap to its position */ 594 for (subphy = 0; subphy < subphy_max; subphy++) { 595 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy); 596 ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST, 597 subphy, DDR_PHY_DATA, CRX_PHY_REG(cs), 598 rd_ctrl_adll[subphy]); 599 } 600 601 return MV_OK; 602 } 603