xref: /openbmc/u-boot/drivers/ddr/marvell/a38x/mv_ddr_plat.c (revision 2b4ffbf6b4944a0b3125fd2c9c0ba3568264367a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Marvell International Ltd. and its affiliates
4  */
5 
6 #include "ddr3_init.h"
7 
8 #include "mv_ddr_sys_env_lib.h"
9 
10 #define DDR_INTERFACES_NUM		1
11 #define DDR_INTERFACE_OCTETS_NUM	5
12 
13 /*
14  * 1. L2 filter should be set at binary header to 0xD000000,
15  *    to avoid conflict with internal register IO.
16  * 2. U-Boot modifies internal registers base to 0xf100000,
17  *    and than should update L2 filter accordingly to 0xf000000 (3.75 GB)
18  */
19 #define L2_FILTER_FOR_MAX_MEMORY_SIZE	0xC0000000 /* temporary limit l2 filter to 3gb (LSP issue) */
20 #define ADDRESS_FILTERING_END_REGISTER	0x8c04
21 
22 #define DYNAMIC_CS_SIZE_CONFIG
23 #define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
24 
25 /* Termal Sensor Registers */
26 #define TSEN_CONTROL_LSB_REG		0xE4070
27 #define TSEN_CONTROL_LSB_TC_TRIM_OFFSET	0
28 #define TSEN_CONTROL_LSB_TC_TRIM_MASK	(0x7 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET)
29 #define TSEN_CONTROL_MSB_REG		0xE4074
30 #define TSEN_CONTROL_MSB_RST_OFFSET	8
31 #define TSEN_CONTROL_MSB_RST_MASK	(0x1 << TSEN_CONTROL_MSB_RST_OFFSET)
32 #define TSEN_STATUS_REG			0xe4078
33 #define TSEN_STATUS_READOUT_VALID_OFFSET	10
34 #define TSEN_STATUS_READOUT_VALID_MASK	(0x1 <<				\
35 					 TSEN_STATUS_READOUT_VALID_OFFSET)
36 #define TSEN_STATUS_TEMP_OUT_OFFSET	0
37 #define TSEN_STATUS_TEMP_OUT_MASK	(0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
38 
39 static struct dlb_config ddr3_dlb_config_table[] = {
40 	{DLB_CTRL_REG, 0x2000005c},
41 	{DLB_BUS_OPT_WT_REG, 0x00880000},
42 	{DLB_AGING_REG, 0x0f7f007f},
43 	{DLB_EVICTION_CTRL_REG, 0x0000129f},
44 	{DLB_EVICTION_TIMERS_REG, 0x00ff0000},
45 	{DLB_WTS_DIFF_CS_REG, 0x04030802},
46 	{DLB_WTS_DIFF_BG_REG, 0x00000a02},
47 	{DLB_WTS_SAME_BG_REG, 0x09000a01},
48 	{DLB_WTS_CMDS_REG, 0x00020005},
49 	{DLB_WTS_ATTR_PRIO_REG, 0x00060f10},
50 	{DLB_QUEUE_MAP_REG, 0x00000543},
51 	{DLB_SPLIT_REG, 0x00000000},
52 	{DLB_USER_CMD_REG, 0x00000000},
53 	{0x0, 0x0}
54 };
55 
56 static struct dlb_config *sys_env_dlb_config_ptr_get(void)
57 {
58 	return &ddr3_dlb_config_table[0];
59 }
60 
61 static u8 a38x_bw_per_freq[DDR_FREQ_LAST] = {
62 	0x3,			/* DDR_FREQ_100 */
63 	0x4,			/* DDR_FREQ_400 */
64 	0x4,			/* DDR_FREQ_533 */
65 	0x5,			/* DDR_FREQ_667 */
66 	0x5,			/* DDR_FREQ_800 */
67 	0x5,			/* DDR_FREQ_933 */
68 	0x5,			/* DDR_FREQ_1066 */
69 	0x3,			/* DDR_FREQ_311 */
70 	0x3,			/* DDR_FREQ_333 */
71 	0x4,			/* DDR_FREQ_467 */
72 	0x5,			/* DDR_FREQ_850 */
73 	0x5,			/* DDR_FREQ_600 */
74 	0x3,			/* DDR_FREQ_300 */
75 	0x5,			/* DDR_FREQ_900 */
76 	0x3,			/* DDR_FREQ_360 */
77 	0x5			/* DDR_FREQ_1000 */
78 };
79 
80 static u8 a38x_rate_per_freq[DDR_FREQ_LAST] = {
81 	0x1,			/* DDR_FREQ_100 */
82 	0x2,			/* DDR_FREQ_400 */
83 	0x2,			/* DDR_FREQ_533 */
84 	0x2,			/* DDR_FREQ_667 */
85 	0x2,			/* DDR_FREQ_800 */
86 	0x3,			/* DDR_FREQ_933 */
87 	0x3,			/* DDR_FREQ_1066 */
88 	0x1,			/* DDR_FREQ_311 */
89 	0x1,			/* DDR_FREQ_333 */
90 	0x2,			/* DDR_FREQ_467 */
91 	0x2,			/* DDR_FREQ_850 */
92 	0x2,			/* DDR_FREQ_600 */
93 	0x1,			/* DDR_FREQ_300 */
94 	0x2,			/* DDR_FREQ_900 */
95 	0x1,			/* DDR_FREQ_360 */
96 	0x2			/* DDR_FREQ_1000 */
97 };
98 
99 static u16 a38x_vco_freq_per_sar_ref_clk_25_mhz[] = {
100 	666,			/* 0 */
101 	1332,
102 	800,
103 	1600,
104 	1066,
105 	2132,
106 	1200,
107 	2400,
108 	1332,
109 	1332,
110 	1500,
111 	1500,
112 	1600,			/* 12 */
113 	1600,
114 	1700,
115 	1700,
116 	1866,
117 	1866,
118 	1800,			/* 18 */
119 	2000,
120 	2000,
121 	4000,
122 	2132,
123 	2132,
124 	2300,
125 	2300,
126 	2400,
127 	2400,
128 	2500,
129 	2500,
130 	800
131 };
132 
133 static u16 a38x_vco_freq_per_sar_ref_clk_40_mhz[] = {
134 	666,			/* 0 */
135 	1332,
136 	800,
137 	800,			/* 0x3 */
138 	1066,
139 	1066,			/* 0x5 */
140 	1200,
141 	2400,
142 	1332,
143 	1332,
144 	1500,			/* 10 */
145 	1600,			/* 0xB */
146 	1600,
147 	1600,
148 	1700,
149 	1560,			/* 0xF */
150 	1866,
151 	1866,
152 	1800,
153 	2000,
154 	2000,			/* 20 */
155 	4000,
156 	2132,
157 	2132,
158 	2300,
159 	2300,
160 	2400,
161 	2400,
162 	2500,
163 	2500,
164 	1800			/* 30 - 0x1E */
165 };
166 
167 
168 static u32 async_mode_at_tf;
169 
170 static u32 dq_bit_map_2_phy_pin[] = {
171 	1, 0, 2, 6, 9, 8, 3, 7,	/* 0 */
172 	8, 9, 1, 7, 2, 6, 3, 0,	/* 1 */
173 	3, 9, 7, 8, 1, 0, 2, 6,	/* 2 */
174 	1, 0, 6, 2, 8, 3, 7, 9,	/* 3 */
175 	0, 1, 2, 9, 7, 8, 3, 6,	/* 4 */
176 };
177 
178 void mv_ddr_mem_scrubbing(void)
179 {
180 }
181 
182 static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
183 				     enum hws_ddr_freq freq);
184 
185 /*
186  * Read temperature TJ value
187  */
188 static u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
189 {
190 	int reg = 0;
191 
192 	/* Initiates TSEN hardware reset once */
193 	if ((reg_read(TSEN_CONTROL_MSB_REG) & TSEN_CONTROL_MSB_RST_MASK) == 0) {
194 		reg_bit_set(TSEN_CONTROL_MSB_REG, TSEN_CONTROL_MSB_RST_MASK);
195 		/* set Tsen Tc Trim to correct default value (errata #132698) */
196 		reg = reg_read(TSEN_CONTROL_LSB_REG);
197 		reg &= ~TSEN_CONTROL_LSB_TC_TRIM_MASK;
198 		reg |= 0x3 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET;
199 		reg_write(TSEN_CONTROL_LSB_REG, reg);
200 	}
201 	mdelay(10);
202 
203 	/* Check if the readout field is valid */
204 	if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
205 		printf("%s: TSEN not ready\n", __func__);
206 		return 0;
207 	}
208 
209 	reg = reg_read(TSEN_STATUS_REG);
210 	reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
211 
212 	return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
213 }
214 
215 /*
216  * Name:     ddr3_tip_a38x_get_freq_config.
217  * Desc:
218  * Args:
219  * Notes:
220  * Returns:  MV_OK if success, other error code if fail.
221  */
222 static int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
223 				  struct hws_tip_freq_config_info
224 				  *freq_config_info)
225 {
226 	if (a38x_bw_per_freq[freq] == 0xff)
227 		return MV_NOT_SUPPORTED;
228 
229 	if (freq_config_info == NULL)
230 		return MV_BAD_PARAM;
231 
232 	freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
233 	freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
234 	freq_config_info->is_supported = 1;
235 
236 	return MV_OK;
237 }
238 
239 static void dunit_read(u32 addr, u32 mask, u32 *data)
240 {
241 	*data = reg_read(addr) & mask;
242 }
243 
244 static void dunit_write(u32 addr, u32 mask, u32 data)
245 {
246 	u32 reg_val = data;
247 
248 	if (mask != MASK_ALL_BITS) {
249 		dunit_read(addr, MASK_ALL_BITS, &reg_val);
250 		reg_val &= (~mask);
251 		reg_val |= (data & mask);
252 	}
253 
254 	reg_write(addr, reg_val);
255 }
256 
257 #define ODPG_ENABLE_REG				0x186d4
258 #define ODPG_EN_OFFS				0
259 #define ODPG_EN_MASK				0x1
260 #define ODPG_EN_ENA				1
261 #define ODPG_EN_DONE				0
262 #define ODPG_DIS_OFFS				8
263 #define ODPG_DIS_MASK				0x1
264 #define ODPG_DIS_DIS				1
265 void mv_ddr_odpg_enable(void)
266 {
267 	dunit_write(ODPG_ENABLE_REG,
268 		    ODPG_EN_MASK << ODPG_EN_OFFS,
269 		    ODPG_EN_ENA << ODPG_EN_OFFS);
270 }
271 
272 void mv_ddr_odpg_disable(void)
273 {
274 	dunit_write(ODPG_ENABLE_REG,
275 		    ODPG_DIS_MASK << ODPG_DIS_OFFS,
276 		    ODPG_DIS_DIS << ODPG_DIS_OFFS);
277 }
278 
279 void mv_ddr_odpg_done_clr(void)
280 {
281 	return;
282 }
283 
284 int mv_ddr_is_odpg_done(u32 count)
285 {
286 	u32 i, data;
287 
288 	for (i = 0; i < count; i++) {
289 		dunit_read(ODPG_ENABLE_REG, MASK_ALL_BITS, &data);
290 		if (((data >> ODPG_EN_OFFS) & ODPG_EN_MASK) ==
291 		     ODPG_EN_DONE)
292 			break;
293 	}
294 
295 	if (i >= count) {
296 		printf("%s: timeout\n", __func__);
297 		return MV_FAIL;
298 	}
299 
300 	return MV_OK;
301 }
302 
303 void mv_ddr_training_enable(void)
304 {
305 	dunit_write(GLOB_CTRL_STATUS_REG,
306 		    TRAINING_TRIGGER_MASK << TRAINING_TRIGGER_OFFS,
307 		    TRAINING_TRIGGER_ENA << TRAINING_TRIGGER_OFFS);
308 }
309 
310 #define DRAM_INIT_CTRL_STATUS_REG	0x18488
311 #define TRAINING_TRIGGER_OFFS		0
312 #define TRAINING_TRIGGER_MASK		0x1
313 #define TRAINING_TRIGGER_ENA		1
314 #define TRAINING_DONE_OFFS		1
315 #define TRAINING_DONE_MASK		0x1
316 #define TRAINING_DONE_DONE		1
317 #define TRAINING_DONE_NOT_DONE		0
318 #define TRAINING_RESULT_OFFS		2
319 #define TRAINING_RESULT_MASK		0x1
320 #define TRAINING_RESULT_PASS		0
321 #define TRAINING_RESULT_FAIL		1
322 int mv_ddr_is_training_done(u32 count, u32 *result)
323 {
324 	u32 i, data;
325 
326 	if (result == NULL) {
327 		printf("%s: NULL result pointer found\n", __func__);
328 		return MV_FAIL;
329 	}
330 
331 	for (i = 0; i < count; i++) {
332 		dunit_read(DRAM_INIT_CTRL_STATUS_REG, MASK_ALL_BITS, &data);
333 		if (((data >> TRAINING_DONE_OFFS) & TRAINING_DONE_MASK) ==
334 		     TRAINING_DONE_DONE)
335 			break;
336 	}
337 
338 	if (i >= count) {
339 		printf("%s: timeout\n", __func__);
340 		return MV_FAIL;
341 	}
342 
343 	*result = (data >> TRAINING_RESULT_OFFS) & TRAINING_RESULT_MASK;
344 
345 	return MV_OK;
346 }
347 
348 #define DM_PAD	10
349 u32 mv_ddr_dm_pad_get(void)
350 {
351 	return DM_PAD;
352 }
353 
354 /*
355  * Name:     ddr3_tip_a38x_select_ddr_controller.
356  * Desc:     Enable/Disable access to Marvell's server.
357  * Args:     dev_num     - device number
358  *           enable        - whether to enable or disable the server
359  * Notes:
360  * Returns:  MV_OK if success, other error code if fail.
361  */
362 static int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
363 {
364 	u32 reg;
365 
366 	reg = reg_read(DUAL_DUNIT_CFG_REG);
367 
368 	if (enable)
369 		reg |= (1 << 6);
370 	else
371 		reg &= ~(1 << 6);
372 
373 	reg_write(DUAL_DUNIT_CFG_REG, reg);
374 
375 	return MV_OK;
376 }
377 
378 static u8 ddr3_tip_clock_mode(u32 frequency)
379 {
380 	if ((frequency == DDR_FREQ_LOW_FREQ) || (freq_val[frequency] <= 400))
381 		return 1;
382 
383 	return 2;
384 }
385 
386 static int mv_ddr_sar_freq_get(int dev_num, enum hws_ddr_freq *freq)
387 {
388 	u32 reg, ref_clk_satr;
389 
390 	/* Read sample at reset setting */
391 	reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
392 	       RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
393 		RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
394 
395 	ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
396 	if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
397 	    DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
398 		switch (reg) {
399 		case 0x1:
400 			DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
401 					      ("Warning: Unsupported freq mode for 333Mhz configured(%d)\n",
402 					      reg));
403 			/* fallthrough */
404 		case 0x0:
405 			*freq = DDR_FREQ_333;
406 			break;
407 		case 0x3:
408 			DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
409 					      ("Warning: Unsupported freq mode for 400Mhz configured(%d)\n",
410 					      reg));
411 			/* fallthrough */
412 		case 0x2:
413 			*freq = DDR_FREQ_400;
414 			break;
415 		case 0xd:
416 			DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
417 					      ("Warning: Unsupported freq mode for 533Mhz configured(%d)\n",
418 					      reg));
419 			/* fallthrough */
420 		case 0x4:
421 			*freq = DDR_FREQ_533;
422 			break;
423 		case 0x6:
424 			*freq = DDR_FREQ_600;
425 			break;
426 		case 0x11:
427 		case 0x14:
428 			DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
429 					      ("Warning: Unsupported freq mode for 667Mhz configured(%d)\n",
430 					      reg));
431 			/* fallthrough */
432 		case 0x8:
433 			*freq = DDR_FREQ_667;
434 			break;
435 		case 0x15:
436 		case 0x1b:
437 			DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
438 					      ("Warning: Unsupported freq mode for 800Mhz configured(%d)\n",
439 					      reg));
440 			/* fallthrough */
441 		case 0xc:
442 			*freq = DDR_FREQ_800;
443 			break;
444 		case 0x10:
445 			*freq = DDR_FREQ_933;
446 			break;
447 		case 0x12:
448 			*freq = DDR_FREQ_900;
449 			break;
450 		case 0x13:
451 			*freq = DDR_FREQ_933;
452 			break;
453 		default:
454 			*freq = 0;
455 			return MV_NOT_SUPPORTED;
456 		}
457 	} else { /* REFCLK 40MHz case */
458 		switch (reg) {
459 		case 0x3:
460 			*freq = DDR_FREQ_400;
461 			break;
462 		case 0x5:
463 			*freq = DDR_FREQ_533;
464 			break;
465 		case 0xb:
466 			*freq = DDR_FREQ_800;
467 			break;
468 		case 0x1e:
469 			*freq = DDR_FREQ_900;
470 			break;
471 		default:
472 			*freq = 0;
473 			return MV_NOT_SUPPORTED;
474 		}
475 	}
476 
477 	return MV_OK;
478 }
479 
480 static int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq)
481 {
482 	u32 reg, ref_clk_satr;
483 
484 	/* Read sample at reset setting */
485 	reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
486 	RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
487 	RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
488 
489 	ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
490 	if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
491 	    DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
492 		switch (reg) {
493 		case 0x0:
494 		case 0x1:
495 			/* Medium is same as TF to run PBS in this freq */
496 			*freq = DDR_FREQ_333;
497 			break;
498 		case 0x2:
499 		case 0x3:
500 			/* Medium is same as TF to run PBS in this freq */
501 			*freq = DDR_FREQ_400;
502 			break;
503 		case 0x4:
504 		case 0xd:
505 			/* Medium is same as TF to run PBS in this freq */
506 			*freq = DDR_FREQ_533;
507 			break;
508 		case 0x8:
509 		case 0x10:
510 		case 0x11:
511 		case 0x14:
512 			*freq = DDR_FREQ_333;
513 			break;
514 		case 0xc:
515 		case 0x15:
516 		case 0x1b:
517 			*freq = DDR_FREQ_400;
518 			break;
519 		case 0x6:
520 			*freq = DDR_FREQ_300;
521 			break;
522 		case 0x12:
523 			*freq = DDR_FREQ_360;
524 			break;
525 		case 0x13:
526 			*freq = DDR_FREQ_400;
527 			break;
528 		default:
529 			*freq = 0;
530 			return MV_NOT_SUPPORTED;
531 		}
532 	} else { /* REFCLK 40MHz case */
533 		switch (reg) {
534 		case 0x3:
535 			/* Medium is same as TF to run PBS in this freq */
536 			*freq = DDR_FREQ_400;
537 			break;
538 		case 0x5:
539 			/* Medium is same as TF to run PBS in this freq */
540 			*freq = DDR_FREQ_533;
541 			break;
542 		case 0xb:
543 			*freq = DDR_FREQ_400;
544 			break;
545 		case 0x1e:
546 			*freq = DDR_FREQ_360;
547 			break;
548 		default:
549 			*freq = 0;
550 			return MV_NOT_SUPPORTED;
551 		}
552 	}
553 
554 	return MV_OK;
555 }
556 
557 static int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
558 {
559 #if defined(CONFIG_ARMADA_39X)
560 	info_ptr->device_id = 0x6900;
561 #else
562 	info_ptr->device_id = 0x6800;
563 #endif
564 	info_ptr->ck_delay = ck_delay;
565 
566 	return MV_OK;
567 }
568 
569 /* check indirect access to phy register file completed */
570 static int is_prfa_done(void)
571 {
572 	u32 reg_val;
573 	u32 iter = 0;
574 
575 	do {
576 		if (iter++ > MAX_POLLING_ITERATIONS) {
577 			printf("error: %s: polling timeout\n", __func__);
578 			return MV_FAIL;
579 		}
580 		dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
581 		reg_val >>= PRFA_REQ_OFFS;
582 		reg_val &= PRFA_REQ_MASK;
583 	} while (reg_val == PRFA_REQ_ENA); /* request pending */
584 
585 	return MV_OK;
586 }
587 
588 /* write to phy register thru indirect access */
589 static int prfa_write(enum hws_access_type phy_access, u32 phy,
590 		      enum hws_ddr_phy phy_type, u32 addr,
591 		      u32 data, enum hws_operation op_type)
592 {
593 	u32 reg_val = ((data & PRFA_DATA_MASK) << PRFA_DATA_OFFS) |
594 		      ((addr & PRFA_REG_NUM_MASK) << PRFA_REG_NUM_OFFS) |
595 		      ((phy & PRFA_PUP_NUM_MASK) << PRFA_PUP_NUM_OFFS) |
596 		      ((phy_type & PRFA_PUP_CTRL_DATA_MASK) << PRFA_PUP_CTRL_DATA_OFFS) |
597 		      ((phy_access & PRFA_PUP_BCAST_WR_ENA_MASK) << PRFA_PUP_BCAST_WR_ENA_OFFS) |
598 		      (((addr >> 6) & PRFA_REG_NUM_HI_MASK) << PRFA_REG_NUM_HI_OFFS) |
599 		      ((op_type & PRFA_TYPE_MASK) << PRFA_TYPE_OFFS);
600 	dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
601 	reg_val |= (PRFA_REQ_ENA << PRFA_REQ_OFFS);
602 	dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
603 
604 	/* polling for prfa request completion */
605 	if (is_prfa_done() != MV_OK)
606 		return MV_FAIL;
607 
608 	return MV_OK;
609 }
610 
611 /* read from phy register thru indirect access */
612 static int prfa_read(enum hws_access_type phy_access, u32 phy,
613 		     enum hws_ddr_phy phy_type, u32 addr, u32 *data)
614 {
615 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
616 	u32 max_phy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
617 	u32 i, reg_val;
618 
619 	if (phy_access == ACCESS_TYPE_MULTICAST) {
620 		for (i = 0; i < max_phy; i++) {
621 			VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i);
622 			if (prfa_write(ACCESS_TYPE_UNICAST, i, phy_type, addr, 0, OPERATION_READ) != MV_OK)
623 				return MV_FAIL;
624 			dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
625 			data[i] = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
626 		}
627 	} else {
628 		if (prfa_write(phy_access, phy, phy_type, addr, 0, OPERATION_READ) != MV_OK)
629 			return MV_FAIL;
630 		dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
631 		*data = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
632 	}
633 
634 	return MV_OK;
635 }
636 
637 static int mv_ddr_sw_db_init(u32 dev_num, u32 board_id)
638 {
639 	struct hws_tip_config_func_db config_func;
640 
641 	/* new read leveling version */
642 	config_func.mv_ddr_dunit_read = dunit_read;
643 	config_func.mv_ddr_dunit_write = dunit_write;
644 	config_func.tip_dunit_mux_select_func =
645 		ddr3_tip_a38x_select_ddr_controller;
646 	config_func.tip_get_freq_config_info_func =
647 		ddr3_tip_a38x_get_freq_config;
648 	config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
649 	config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
650 	config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
651 	config_func.tip_get_clock_ratio = ddr3_tip_clock_mode;
652 	config_func.tip_external_read = ddr3_tip_ext_read;
653 	config_func.tip_external_write = ddr3_tip_ext_write;
654 	config_func.mv_ddr_phy_read = prfa_read;
655 	config_func.mv_ddr_phy_write = prfa_write;
656 
657 	ddr3_tip_init_config_func(dev_num, &config_func);
658 
659 	ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
660 
661 	/* set device attributes*/
662 	ddr3_tip_dev_attr_init(dev_num);
663 	ddr3_tip_dev_attr_set(dev_num, MV_ATTR_TIP_REV, MV_TIP_REV_4);
664 	ddr3_tip_dev_attr_set(dev_num, MV_ATTR_PHY_EDGE, MV_DDR_PHY_EDGE_POSITIVE);
665 	ddr3_tip_dev_attr_set(dev_num, MV_ATTR_OCTET_PER_INTERFACE, DDR_INTERFACE_OCTETS_NUM);
666 #ifdef CONFIG_ARMADA_39X
667 	ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 1);
668 #else
669 	ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 0);
670 #endif
671 
672 	ca_delay = 0;
673 	delay_enable = 1;
674 	dfs_low_freq = DFS_LOW_FREQ_VALUE;
675 	calibration_update_control = 1;
676 
677 #ifdef CONFIG_ARMADA_38X
678 	/* For a38x only, change to 2T mode to resolve low freq instability */
679 	mode_2t = 1;
680 #endif
681 
682 	ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
683 
684 	return MV_OK;
685 }
686 
687 static int mv_ddr_training_mask_set(void)
688 {
689 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
690 	enum hws_ddr_freq ddr_freq = tm->interface_params[0].memory_freq;
691 
692 	mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
693 			  LOAD_PATTERN_MASK_BIT |
694 			  SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
695 			  WRITE_LEVELING_SUPP_MASK_BIT |
696 			  READ_LEVELING_MASK_BIT |
697 			  PBS_RX_MASK_BIT |
698 			  PBS_TX_MASK_BIT |
699 			  SET_TARGET_FREQ_MASK_BIT |
700 			  WRITE_LEVELING_TF_MASK_BIT |
701 			  WRITE_LEVELING_SUPP_TF_MASK_BIT |
702 			  READ_LEVELING_TF_MASK_BIT |
703 			  CENTRALIZATION_RX_MASK_BIT |
704 			  CENTRALIZATION_TX_MASK_BIT);
705 	rl_mid_freq_wa = 1;
706 
707 	if ((ddr_freq == DDR_FREQ_333) || (ddr_freq == DDR_FREQ_400)) {
708 		mask_tune_func = (WRITE_LEVELING_MASK_BIT |
709 				  LOAD_PATTERN_2_MASK_BIT |
710 				  WRITE_LEVELING_SUPP_MASK_BIT |
711 				  READ_LEVELING_MASK_BIT |
712 				  PBS_RX_MASK_BIT |
713 				  PBS_TX_MASK_BIT |
714 				  CENTRALIZATION_RX_MASK_BIT |
715 				  CENTRALIZATION_TX_MASK_BIT);
716 		rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
717 	}
718 
719 	/* Supplementary not supported for ECC modes */
720 	if (1 == ddr3_if_ecc_enabled()) {
721 		mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
722 		mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
723 		mask_tune_func &= ~PBS_TX_MASK_BIT;
724 		mask_tune_func &= ~PBS_RX_MASK_BIT;
725 	}
726 
727 	return MV_OK;
728 }
729 
730 /* function: mv_ddr_set_calib_controller
731  * this function sets the controller which will control
732  * the calibration cycle in the end of the training.
733  * 1 - internal controller
734  * 2 - external controller
735  */
736 void mv_ddr_set_calib_controller(void)
737 {
738 	calibration_update_control = CAL_UPDATE_CTRL_INT;
739 }
740 
741 static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
742 				     enum hws_ddr_freq frequency)
743 {
744 	u32 divider = 0;
745 	u32 sar_val, ref_clk_satr;
746 	u32 async_val;
747 
748 	if (if_id != 0) {
749 		DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
750 				      ("A38x does not support interface 0x%x\n",
751 				       if_id));
752 		return MV_BAD_PARAM;
753 	}
754 
755 	/* get VCO freq index */
756 	sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
757 		   RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
758 		RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
759 
760 	ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
761 	if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
762 	    DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ)
763 		divider = a38x_vco_freq_per_sar_ref_clk_25_mhz[sar_val] / freq_val[frequency];
764 	else
765 		divider = a38x_vco_freq_per_sar_ref_clk_40_mhz[sar_val] / freq_val[frequency];
766 
767 	if ((async_mode_at_tf == 1) && (freq_val[frequency] > 400)) {
768 		/* Set async mode */
769 		dunit_write(0x20220, 0x1000, 0x1000);
770 		dunit_write(0xe42f4, 0x200, 0x200);
771 
772 		/* Wait for async mode setup */
773 		mdelay(5);
774 
775 		/* Set KNL values */
776 		switch (frequency) {
777 #ifdef CONFIG_DDR3
778 		case DDR_FREQ_467:
779 			async_val = 0x806f012;
780 			break;
781 		case DDR_FREQ_533:
782 			async_val = 0x807f012;
783 			break;
784 		case DDR_FREQ_600:
785 			async_val = 0x805f00a;
786 			break;
787 #endif
788 		case DDR_FREQ_667:
789 			async_val = 0x809f012;
790 			break;
791 		case DDR_FREQ_800:
792 			async_val = 0x807f00a;
793 			break;
794 #ifdef CONFIG_DDR3
795 		case DDR_FREQ_850:
796 			async_val = 0x80cb012;
797 			break;
798 #endif
799 		case DDR_FREQ_900:
800 			async_val = 0x80d7012;
801 			break;
802 		case DDR_FREQ_933:
803 			async_val = 0x80df012;
804 			break;
805 		case DDR_FREQ_1000:
806 			async_val = 0x80ef012;
807 			break;
808 		case DDR_FREQ_1066:
809 			async_val = 0x80ff012;
810 			break;
811 		default:
812 			/* set DDR_FREQ_667 as default */
813 			async_val = 0x809f012;
814 		}
815 		dunit_write(0xe42f0, 0xffffffff, async_val);
816 	} else {
817 		/* Set sync mode */
818 		dunit_write(0x20220, 0x1000, 0x0);
819 		dunit_write(0xe42f4, 0x200, 0x0);
820 
821 		/* cpupll_clkdiv_reset_mask */
822 		dunit_write(0xe4264, 0xff, 0x1f);
823 
824 		/* cpupll_clkdiv_reload_smooth */
825 		dunit_write(0xe4260, (0xff << 8), (0x2 << 8));
826 
827 		/* cpupll_clkdiv_relax_en */
828 		dunit_write(0xe4260, (0xff << 24), (0x2 << 24));
829 
830 		/* write the divider */
831 		dunit_write(0xe4268, (0x3f << 8), (divider << 8));
832 
833 		/* set cpupll_clkdiv_reload_ratio */
834 		dunit_write(0xe4264, (1 << 8), (1 << 8));
835 
836 		/* undet cpupll_clkdiv_reload_ratio */
837 		dunit_write(0xe4264, (1 << 8), 0x0);
838 
839 		/* clear cpupll_clkdiv_reload_force */
840 		dunit_write(0xe4260, (0xff << 8), 0x0);
841 
842 		/* clear cpupll_clkdiv_relax_en */
843 		dunit_write(0xe4260, (0xff << 24), 0x0);
844 
845 		/* clear cpupll_clkdiv_reset_mask */
846 		dunit_write(0xe4264, 0xff, 0x0);
847 	}
848 
849 	/* Dunit training clock + 1:1/2:1 mode */
850 	dunit_write(0x18488, (1 << 16), ((ddr3_tip_clock_mode(frequency) & 0x1) << 16));
851 	dunit_write(0x1524, (1 << 15), ((ddr3_tip_clock_mode(frequency) - 1) << 15));
852 
853 	return MV_OK;
854 }
855 
856 /*
857  * external read from memory
858  */
859 int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
860 		      u32 num_of_bursts, u32 *data)
861 {
862 	u32 burst_num;
863 
864 	for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
865 		data[burst_num] = readl(reg_addr + 4 * burst_num);
866 
867 	return MV_OK;
868 }
869 
870 /*
871  * external write to memory
872  */
873 int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
874 		       u32 num_of_bursts, u32 *data) {
875 	u32 burst_num;
876 
877 	for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
878 		writel(data[burst_num], reg_addr + 4 * burst_num);
879 
880 	return MV_OK;
881 }
882 
883 int mv_ddr_early_init(void)
884 {
885 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
886 
887 	/* FIXME: change this configuration per ddr type
888 	 * configure a380 and a390 to work with receiver odt timing
889 	 * the odt_config is defined:
890 	 * '1' in ddr4
891 	 * '0' in ddr3
892 	 * here the parameter is run over in ddr4 and ddr3 to '1' (in ddr4 the default is '1')
893 	 * to configure the odt to work with timing restrictions
894 	 */
895 
896 	mv_ddr_sw_db_init(0, 0);
897 
898 	if (tm->interface_params[0].memory_freq != DDR_FREQ_SAR)
899 		async_mode_at_tf = 1;
900 
901 	return MV_OK;
902 }
903 
904 int mv_ddr_early_init2(void)
905 {
906 	mv_ddr_training_mask_set();
907 
908 	return MV_OK;
909 }
910 
911 int mv_ddr_pre_training_fixup(void)
912 {
913 	return 0;
914 }
915 
916 int mv_ddr_post_training_fixup(void)
917 {
918 	return 0;
919 }
920 
921 int ddr3_post_run_alg(void)
922 {
923 	return MV_OK;
924 }
925 
926 int ddr3_silicon_post_init(void)
927 {
928 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
929 
930 	/* Set half bus width */
931 	if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
932 		CHECK_STATUS(ddr3_tip_if_write
933 			     (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
934 			      SDRAM_CFG_REG, 0x0, 0x8000));
935 	}
936 
937 	return MV_OK;
938 }
939 
940 u32 mv_ddr_init_freq_get(void)
941 {
942 	enum hws_ddr_freq freq;
943 
944 	mv_ddr_sar_freq_get(0, &freq);
945 
946 	return freq;
947 }
948 
949 static u32 ddr3_get_bus_width(void)
950 {
951 	u32 bus_width;
952 
953 	bus_width = (reg_read(SDRAM_CFG_REG) & 0x8000) >>
954 		BUS_IN_USE_OFFS;
955 
956 	return (bus_width == 0) ? 16 : 32;
957 }
958 
959 static u32 ddr3_get_device_width(u32 cs)
960 {
961 	u32 device_width;
962 
963 	device_width = (reg_read(SDRAM_ADDR_CTRL_REG) &
964 			(CS_STRUCT_MASK << CS_STRUCT_OFFS(cs))) >>
965 			CS_STRUCT_OFFS(cs);
966 
967 	return (device_width == 0) ? 8 : 16;
968 }
969 
970 static u32 ddr3_get_device_size(u32 cs)
971 {
972 	u32 device_size_low, device_size_high, device_size;
973 	u32 data, cs_low_offset, cs_high_offset;
974 
975 	cs_low_offset = CS_SIZE_OFFS(cs);
976 	cs_high_offset = CS_SIZE_HIGH_OFFS(cs);
977 
978 	data = reg_read(SDRAM_ADDR_CTRL_REG);
979 	device_size_low = (data >> cs_low_offset) & 0x3;
980 	device_size_high = (data >> cs_high_offset) & 0x1;
981 
982 	device_size = device_size_low | (device_size_high << 2);
983 
984 	switch (device_size) {
985 	case 0:
986 		return 2048;
987 	case 2:
988 		return 512;
989 	case 3:
990 		return 1024;
991 	case 4:
992 		return 4096;
993 	case 5:
994 		return 8192;
995 	case 1:
996 	default:
997 		DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
998 		/* zeroes mem size in ddr3_calc_mem_cs_size */
999 		return 0;
1000 	}
1001 }
1002 
1003 static int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size)
1004 {
1005 	u32 cs_mem_size;
1006 
1007 	/* Calculate in MiB */
1008 	cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
1009 		       ddr3_get_device_size(cs)) / 8;
1010 
1011 	/*
1012 	 * Multiple controller bus width, 2x for 64 bit
1013 	 * (SoC controller may be 32 or 64 bit,
1014 	 * so bit 15 in 0x1400, that means if whole bus used or only half,
1015 	 * have a differnt meaning
1016 	 */
1017 	cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
1018 
1019 	if ((cs_mem_size < 128) || (cs_mem_size > 4096)) {
1020 		DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
1021 		return MV_BAD_VALUE;
1022 	}
1023 
1024 	*cs_size = cs_mem_size << 20; /* write cs size in bytes */
1025 
1026 	return MV_OK;
1027 }
1028 
1029 static int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
1030 {
1031 	u32 reg, cs;
1032 	uint64_t mem_total_size = 0;
1033 	uint64_t cs_mem_size = 0;
1034 	uint64_t mem_total_size_c, cs_mem_size_c;
1035 
1036 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1037 	u32 physical_mem_size;
1038 	u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
1039 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1040 #endif
1041 
1042 	/* Open fast path windows */
1043 	for (cs = 0; cs < MAX_CS_NUM; cs++) {
1044 		if (cs_ena & (1 << cs)) {
1045 			/* get CS size */
1046 			if (ddr3_calc_mem_cs_size(cs, &cs_mem_size) != MV_OK)
1047 				return MV_FAIL;
1048 
1049 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1050 			/*
1051 			 * if number of address pins doesn't allow to use max
1052 			 * mem size that is defined in topology
1053 			 * mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE
1054 			 */
1055 			physical_mem_size = mem_size
1056 				[tm->interface_params[0].memory_size];
1057 
1058 			if (ddr3_get_device_width(cs) == 16) {
1059 				/*
1060 				 * 16bit mem device can be twice more - no need
1061 				 * in less significant pin
1062 				 */
1063 				max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
1064 			}
1065 
1066 			if (physical_mem_size > max_mem_size) {
1067 				cs_mem_size = max_mem_size *
1068 					(ddr3_get_bus_width() /
1069 					 ddr3_get_device_width(cs));
1070 				printf("Updated Physical Mem size is from 0x%x to %x\n",
1071 				       physical_mem_size,
1072 				       DEVICE_MAX_DRAM_ADDRESS_SIZE);
1073 			}
1074 #endif
1075 
1076 			/* set fast path window control for the cs */
1077 			reg = 0xffffe1;
1078 			reg |= (cs << 2);
1079 			reg |= (cs_mem_size - 1) & 0xffff0000;
1080 			/*Open fast path Window */
1081 			reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
1082 
1083 			/* Set fast path window base address for the cs */
1084 			reg = ((cs_mem_size) * cs) & 0xffff0000;
1085 			/* Set base address */
1086 			reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
1087 
1088 			/*
1089 			 * Since memory size may be bigger than 4G the summ may
1090 			 * be more than 32 bit word,
1091 			 * so to estimate the result divide mem_total_size and
1092 			 * cs_mem_size by 0x10000 (it is equal to >> 16)
1093 			 */
1094 			mem_total_size_c = (mem_total_size >> 16) & 0xffffffffffff;
1095 			cs_mem_size_c = (cs_mem_size >> 16) & 0xffffffffffff;
1096 			/* if the sum less than 2 G - calculate the value */
1097 			if (mem_total_size_c + cs_mem_size_c < 0x10000)
1098 				mem_total_size += cs_mem_size;
1099 			else	/* put max possible size */
1100 				mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
1101 		}
1102 	}
1103 
1104 	/* Set L2 filtering to Max Memory size */
1105 	reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
1106 
1107 	return MV_OK;
1108 }
1109 
1110 static int ddr3_restore_and_set_final_windows(u32 *win, const char *ddr_type)
1111 {
1112 	u32 win_ctrl_reg, num_of_win_regs;
1113 	u32 cs_ena = mv_ddr_sys_env_get_cs_ena_from_reg();
1114 	u32 ui;
1115 
1116 	win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1117 	num_of_win_regs = 16;
1118 
1119 	/* Return XBAR windows 4-7 or 16-19 init configuration */
1120 	for (ui = 0; ui < num_of_win_regs; ui++)
1121 		reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
1122 
1123 	printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
1124 	       ddr_type);
1125 
1126 #if defined DYNAMIC_CS_SIZE_CONFIG
1127 	if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
1128 		printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
1129 #else
1130 	u32 reg, cs;
1131 	reg = 0x1fffffe1;
1132 	for (cs = 0; cs < MAX_CS_NUM; cs++) {
1133 		if (cs_ena & (1 << cs)) {
1134 			reg |= (cs << 2);
1135 			break;
1136 		}
1137 	}
1138 	/* Open fast path Window to - 0.5G */
1139 	reg_write(REG_FASTPATH_WIN_CTRL_ADDR(0), reg);
1140 #endif
1141 
1142 	return MV_OK;
1143 }
1144 
1145 static int ddr3_save_and_set_training_windows(u32 *win)
1146 {
1147 	u32 cs_ena;
1148 	u32 reg, tmp_count, cs, ui;
1149 	u32 win_ctrl_reg, win_base_reg, win_remap_reg;
1150 	u32 num_of_win_regs, win_jump_index;
1151 	win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1152 	win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
1153 	win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
1154 	win_jump_index = 0x10;
1155 	num_of_win_regs = 16;
1156 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1157 
1158 #ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
1159 	/*
1160 	 * Disable L2 filtering during DDR training
1161 	 * (when Cross Bar window is open)
1162 	 */
1163 	reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
1164 #endif
1165 
1166 	cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
1167 
1168 	/* Close XBAR Window 19 - Not needed */
1169 	/* {0x000200e8}  -   Open Mbus Window - 2G */
1170 	reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
1171 
1172 	/* Save XBAR Windows 4-19 init configurations */
1173 	for (ui = 0; ui < num_of_win_regs; ui++)
1174 		win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
1175 
1176 	/* Open XBAR Windows 4-7 or 16-19 for other CS */
1177 	reg = 0;
1178 	tmp_count = 0;
1179 	for (cs = 0; cs < MAX_CS_NUM; cs++) {
1180 		if (cs_ena & (1 << cs)) {
1181 			switch (cs) {
1182 			case 0:
1183 				reg = 0x0e00;
1184 				break;
1185 			case 1:
1186 				reg = 0x0d00;
1187 				break;
1188 			case 2:
1189 				reg = 0x0b00;
1190 				break;
1191 			case 3:
1192 				reg = 0x0700;
1193 				break;
1194 			}
1195 			reg |= (1 << 0);
1196 			reg |= (SDRAM_CS_SIZE & 0xffff0000);
1197 
1198 			reg_write(win_ctrl_reg + win_jump_index * tmp_count,
1199 				  reg);
1200 			reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
1201 			       0xffff0000);
1202 			reg_write(win_base_reg + win_jump_index * tmp_count,
1203 				  reg);
1204 
1205 			if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
1206 				reg_write(win_remap_reg +
1207 					  win_jump_index * tmp_count, 0);
1208 
1209 			tmp_count++;
1210 		}
1211 	}
1212 
1213 	return MV_OK;
1214 }
1215 
1216 static u32 win[16];
1217 
1218 int mv_ddr_pre_training_soc_config(const char *ddr_type)
1219 {
1220 	u32 soc_num;
1221 	u32 reg_val;
1222 
1223 	/* Switching CPU to MRVL ID */
1224 	soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
1225 		SAR1_CPU_CORE_OFFSET;
1226 	switch (soc_num) {
1227 	case 0x3:
1228 		reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET);
1229 		reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET);
1230 		/* fallthrough */
1231 	case 0x1:
1232 		reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
1233 		/* fallthrough */
1234 	case 0x0:
1235 		reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
1236 		/* fallthrough */
1237 	default:
1238 		break;
1239 	}
1240 
1241 	/*
1242 	 * Set DRAM Reset Mask in case detected GPIO indication of wakeup from
1243 	 * suspend i.e the DRAM values will not be overwritten / reset when
1244 	 * waking from suspend
1245 	 */
1246 	if (mv_ddr_sys_env_suspend_wakeup_check() ==
1247 	    SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
1248 		reg_bit_set(SDRAM_INIT_CTRL_REG,
1249 			    DRAM_RESET_MASK_MASKED << DRAM_RESET_MASK_OFFS);
1250 	}
1251 
1252 	/* Check if DRAM is already initialized  */
1253 	if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
1254 	    (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
1255 		printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
1256 		return MV_OK;
1257 	}
1258 
1259 	/* Fix read ready phases for all SOC in reg 0x15c8 */
1260 	reg_val = reg_read(TRAINING_DBG_3_REG);
1261 
1262 	reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));
1263 	reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));	/* phase 0 */
1264 
1265 	reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));
1266 	reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));	/* phase 1 */
1267 
1268 	reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));
1269 	reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));	/* phase 3 */
1270 
1271 	reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));
1272 	reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));	/* phase 4 */
1273 
1274 	reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));
1275 	reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));	/* phase 5 */
1276 
1277 	reg_write(TRAINING_DBG_3_REG, reg_val);
1278 
1279 	/*
1280 	 * Axi_bresp_mode[8] = Compliant,
1281 	 * Axi_addr_decode_cntrl[11] = Internal,
1282 	 * Axi_data_bus_width[0] = 128bit
1283 	 * */
1284 	/* 0x14a8 - AXI Control Register */
1285 	reg_write(AXI_CTRL_REG, 0);
1286 
1287 	/*
1288 	 * Stage 2 - Training Values Setup
1289 	 */
1290 	/* Set X-BAR windows for the training sequence */
1291 	ddr3_save_and_set_training_windows(win);
1292 
1293 	return MV_OK;
1294 }
1295 
1296 static int ddr3_new_tip_dlb_config(void)
1297 {
1298 	u32 reg, i = 0;
1299 	struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
1300 
1301 	/* Write the configuration */
1302 	while (config_table_ptr[i].reg_addr != 0) {
1303 		reg_write(config_table_ptr[i].reg_addr,
1304 			  config_table_ptr[i].reg_data);
1305 		i++;
1306 	}
1307 
1308 
1309 	/* Enable DLB */
1310 	reg = reg_read(DLB_CTRL_REG);
1311 	reg &= ~(DLB_EN_MASK << DLB_EN_OFFS) &
1312 	       ~(WR_COALESCE_EN_MASK << WR_COALESCE_EN_OFFS) &
1313 	       ~(AXI_PREFETCH_EN_MASK << AXI_PREFETCH_EN_OFFS) &
1314 	       ~(MBUS_PREFETCH_EN_MASK << MBUS_PREFETCH_EN_OFFS) &
1315 	       ~(PREFETCH_NXT_LN_SZ_TRIG_MASK << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1316 
1317 	reg |= (DLB_EN_ENA << DLB_EN_OFFS) |
1318 	       (WR_COALESCE_EN_ENA << WR_COALESCE_EN_OFFS) |
1319 	       (AXI_PREFETCH_EN_ENA << AXI_PREFETCH_EN_OFFS) |
1320 	       (MBUS_PREFETCH_EN_ENA << MBUS_PREFETCH_EN_OFFS) |
1321 	       (PREFETCH_NXT_LN_SZ_TRIG_ENA << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1322 
1323 	reg_write(DLB_CTRL_REG, reg);
1324 
1325 	return MV_OK;
1326 }
1327 
1328 int mv_ddr_post_training_soc_config(const char *ddr_type)
1329 {
1330 	u32 reg_val;
1331 
1332 	/* Restore and set windows */
1333 	ddr3_restore_and_set_final_windows(win, ddr_type);
1334 
1335 	/* Update DRAM init indication in bootROM register */
1336 	reg_val = reg_read(REG_BOOTROM_ROUTINE_ADDR);
1337 	reg_write(REG_BOOTROM_ROUTINE_ADDR,
1338 		  reg_val | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
1339 
1340 	/* DLB config */
1341 	ddr3_new_tip_dlb_config();
1342 
1343 	return MV_OK;
1344 }
1345 
1346 void mv_ddr_mc_config(void)
1347 {
1348 	/* Memory controller initializations */
1349 	struct init_cntr_param init_param;
1350 	int status;
1351 
1352 	init_param.do_mrs_phy = 1;
1353 	init_param.is_ctrl64_bit = 0;
1354 	init_param.init_phy = 1;
1355 	init_param.msys_init = 1;
1356 	status = hws_ddr3_tip_init_controller(0, &init_param);
1357 	if (status != MV_OK)
1358 		printf("DDR3 init controller - FAILED 0x%x\n", status);
1359 
1360 	status = mv_ddr_mc_init();
1361 	if (status != MV_OK)
1362 		printf("DDR3 init_sequence - FAILED 0x%x\n", status);
1363 }
1364 /* function: mv_ddr_mc_init
1365  * this function enables the dunit after init controller configuration
1366  */
1367 int mv_ddr_mc_init(void)
1368 {
1369 	CHECK_STATUS(ddr3_tip_enable_init_sequence(0));
1370 
1371 	return MV_OK;
1372 }
1373 
1374 /* function: ddr3_tip_configure_phy
1375  * configures phy and electrical parameters
1376  */
1377 int ddr3_tip_configure_phy(u32 dev_num)
1378 {
1379 	u32 if_id, phy_id;
1380 	u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
1381 	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1382 
1383 	CHECK_STATUS(ddr3_tip_bus_write
1384 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1385 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1386 		PAD_ZRI_CAL_PHY_REG,
1387 		((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
1388 	CHECK_STATUS(ddr3_tip_bus_write
1389 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1390 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1391 		PAD_ZRI_CAL_PHY_REG,
1392 		((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
1393 	CHECK_STATUS(ddr3_tip_bus_write
1394 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1395 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1396 		PAD_ODT_CAL_PHY_REG,
1397 		((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
1398 	CHECK_STATUS(ddr3_tip_bus_write
1399 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1400 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1401 		PAD_ODT_CAL_PHY_REG,
1402 		((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
1403 
1404 	CHECK_STATUS(ddr3_tip_bus_write
1405 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1406 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1407 		PAD_PRE_DISABLE_PHY_REG, 0));
1408 	CHECK_STATUS(ddr3_tip_bus_write
1409 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1410 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1411 		CMOS_CONFIG_PHY_REG, 0));
1412 	CHECK_STATUS(ddr3_tip_bus_write
1413 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1414 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1415 		CMOS_CONFIG_PHY_REG, 0));
1416 
1417 	for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
1418 		/* check if the interface is enabled */
1419 		VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
1420 
1421 		for (phy_id = 0;
1422 			phy_id < octets_per_if_num;
1423 			phy_id++) {
1424 				VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id);
1425 				/* Vref & clamp */
1426 				CHECK_STATUS(ddr3_tip_bus_read_modify_write
1427 					(dev_num, ACCESS_TYPE_UNICAST,
1428 					if_id, phy_id, DDR_PHY_DATA,
1429 					PAD_CFG_PHY_REG,
1430 					((clamp_tbl[if_id] << 4) | vref_init_val),
1431 					((0x7 << 4) | 0x7)));
1432 				/* clamp not relevant for control */
1433 				CHECK_STATUS(ddr3_tip_bus_read_modify_write
1434 					(dev_num, ACCESS_TYPE_UNICAST,
1435 					if_id, phy_id, DDR_PHY_CONTROL,
1436 					PAD_CFG_PHY_REG, 0x4, 0x7));
1437 		}
1438 	}
1439 
1440 	if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_PHY_EDGE) ==
1441 		MV_DDR_PHY_EDGE_POSITIVE)
1442 		CHECK_STATUS(ddr3_tip_bus_write
1443 		(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1444 		ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1445 		DDR_PHY_DATA, 0x90, 0x6002));
1446 
1447 
1448 	return MV_OK;
1449 }
1450 
1451 
1452 int mv_ddr_manual_cal_do(void)
1453 {
1454 	return 0;
1455 }
1456