xref: /openbmc/u-boot/drivers/ddr/fsl/options.c (revision 1a68faac)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2008, 2010-2016 Freescale Semiconductor, Inc.
4  * Copyright 2017-2018 NXP Semiconductor
5  */
6 
7 #include <common.h>
8 #include <hwconfig.h>
9 #include <fsl_ddr_sdram.h>
10 
11 #include <fsl_ddr.h>
12 #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
13 	defined(CONFIG_ARM)
14 #include <asm/arch/clock.h>
15 #endif
16 
17 /*
18  * Use our own stack based buffer before relocation to allow accessing longer
19  * hwconfig strings that might be in the environment before we've relocated.
20  * This is pretty fragile on both the use of stack and if the buffer is big
21  * enough. However we will get a warning from env_get_f() for the latter.
22  */
23 
24 /* Board-specific functions defined in each board's ddr.c */
25 void __weak fsl_ddr_board_options(memctl_options_t *popts,
26 				  dimm_params_t *pdimm,
27 				  unsigned int ctrl_num)
28 {
29 	return;
30 }
31 
32 struct dynamic_odt {
33 	unsigned int odt_rd_cfg;
34 	unsigned int odt_wr_cfg;
35 	unsigned int odt_rtt_norm;
36 	unsigned int odt_rtt_wr;
37 };
38 
39 #ifdef CONFIG_SYS_FSL_DDR4
40 /* Quad rank is not verified yet due availability.
41  * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
42  */
43 static __maybe_unused const struct dynamic_odt single_Q[4] = {
44 	{	/* cs0 */
45 		FSL_DDR_ODT_NEVER,
46 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
47 		DDR4_RTT_34_OHM,	/* unverified */
48 		DDR4_RTT_120_OHM
49 	},
50 	{	/* cs1 */
51 		FSL_DDR_ODT_NEVER,
52 		FSL_DDR_ODT_NEVER,
53 		DDR4_RTT_OFF,
54 		DDR4_RTT_120_OHM
55 	},
56 	{	/* cs2 */
57 		FSL_DDR_ODT_NEVER,
58 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
59 		DDR4_RTT_34_OHM,
60 		DDR4_RTT_120_OHM
61 	},
62 	{	/* cs3 */
63 		FSL_DDR_ODT_NEVER,
64 		FSL_DDR_ODT_NEVER,	/* tied high */
65 		DDR4_RTT_OFF,
66 		DDR4_RTT_120_OHM
67 	}
68 };
69 
70 static __maybe_unused const struct dynamic_odt single_D[4] = {
71 	{	/* cs0 */
72 		FSL_DDR_ODT_NEVER,
73 		FSL_DDR_ODT_ALL,
74 		DDR4_RTT_40_OHM,
75 		DDR4_RTT_OFF
76 	},
77 	{	/* cs1 */
78 		FSL_DDR_ODT_NEVER,
79 		FSL_DDR_ODT_NEVER,
80 		DDR4_RTT_OFF,
81 		DDR4_RTT_OFF
82 	},
83 	{0, 0, 0, 0},
84 	{0, 0, 0, 0}
85 };
86 
87 static __maybe_unused const struct dynamic_odt single_S[4] = {
88 	{	/* cs0 */
89 		FSL_DDR_ODT_NEVER,
90 		FSL_DDR_ODT_ALL,
91 		DDR4_RTT_40_OHM,
92 		DDR4_RTT_OFF
93 	},
94 	{0, 0, 0, 0},
95 	{0, 0, 0, 0},
96 	{0, 0, 0, 0},
97 };
98 
99 static __maybe_unused const struct dynamic_odt dual_DD[4] = {
100 	{	/* cs0 */
101 		FSL_DDR_ODT_NEVER,
102 		FSL_DDR_ODT_SAME_DIMM,
103 		DDR4_RTT_120_OHM,
104 		DDR4_RTT_OFF
105 	},
106 	{	/* cs1 */
107 		FSL_DDR_ODT_OTHER_DIMM,
108 		FSL_DDR_ODT_OTHER_DIMM,
109 		DDR4_RTT_34_OHM,
110 		DDR4_RTT_OFF
111 	},
112 	{	/* cs2 */
113 		FSL_DDR_ODT_NEVER,
114 		FSL_DDR_ODT_SAME_DIMM,
115 		DDR4_RTT_120_OHM,
116 		DDR4_RTT_OFF
117 	},
118 	{	/* cs3 */
119 		FSL_DDR_ODT_OTHER_DIMM,
120 		FSL_DDR_ODT_OTHER_DIMM,
121 		DDR4_RTT_34_OHM,
122 		DDR4_RTT_OFF
123 	}
124 };
125 
126 static __maybe_unused const struct dynamic_odt dual_DS[4] = {
127 	{	/* cs0 */
128 		FSL_DDR_ODT_NEVER,
129 		FSL_DDR_ODT_SAME_DIMM,
130 		DDR4_RTT_120_OHM,
131 		DDR4_RTT_OFF
132 	},
133 	{	/* cs1 */
134 		FSL_DDR_ODT_OTHER_DIMM,
135 		FSL_DDR_ODT_OTHER_DIMM,
136 		DDR4_RTT_34_OHM,
137 		DDR4_RTT_OFF
138 	},
139 	{	/* cs2 */
140 		FSL_DDR_ODT_OTHER_DIMM,
141 		FSL_DDR_ODT_ALL,
142 		DDR4_RTT_34_OHM,
143 		DDR4_RTT_120_OHM
144 	},
145 	{0, 0, 0, 0}
146 };
147 static __maybe_unused const struct dynamic_odt dual_SD[4] = {
148 	{	/* cs0 */
149 		FSL_DDR_ODT_OTHER_DIMM,
150 		FSL_DDR_ODT_ALL,
151 		DDR4_RTT_34_OHM,
152 		DDR4_RTT_120_OHM
153 	},
154 	{0, 0, 0, 0},
155 	{	/* cs2 */
156 		FSL_DDR_ODT_NEVER,
157 		FSL_DDR_ODT_SAME_DIMM,
158 		DDR4_RTT_120_OHM,
159 		DDR4_RTT_OFF
160 	},
161 	{	/* cs3 */
162 		FSL_DDR_ODT_OTHER_DIMM,
163 		FSL_DDR_ODT_OTHER_DIMM,
164 		DDR4_RTT_34_OHM,
165 		DDR4_RTT_OFF
166 	}
167 };
168 
169 static __maybe_unused const struct dynamic_odt dual_SS[4] = {
170 	{	/* cs0 */
171 		FSL_DDR_ODT_OTHER_DIMM,
172 		FSL_DDR_ODT_ALL,
173 		DDR4_RTT_34_OHM,
174 		DDR4_RTT_120_OHM
175 	},
176 	{0, 0, 0, 0},
177 	{	/* cs2 */
178 		FSL_DDR_ODT_OTHER_DIMM,
179 		FSL_DDR_ODT_ALL,
180 		DDR4_RTT_34_OHM,
181 		DDR4_RTT_120_OHM
182 	},
183 	{0, 0, 0, 0}
184 };
185 
186 static __maybe_unused const struct dynamic_odt dual_D0[4] = {
187 	{	/* cs0 */
188 		FSL_DDR_ODT_NEVER,
189 		FSL_DDR_ODT_SAME_DIMM,
190 		DDR4_RTT_40_OHM,
191 		DDR4_RTT_OFF
192 	},
193 	{	/* cs1 */
194 		FSL_DDR_ODT_NEVER,
195 		FSL_DDR_ODT_NEVER,
196 		DDR4_RTT_OFF,
197 		DDR4_RTT_OFF
198 	},
199 	{0, 0, 0, 0},
200 	{0, 0, 0, 0}
201 };
202 
203 static __maybe_unused const struct dynamic_odt dual_0D[4] = {
204 	{0, 0, 0, 0},
205 	{0, 0, 0, 0},
206 	{	/* cs2 */
207 		FSL_DDR_ODT_NEVER,
208 		FSL_DDR_ODT_SAME_DIMM,
209 		DDR4_RTT_40_OHM,
210 		DDR4_RTT_OFF
211 	},
212 	{	/* cs3 */
213 		FSL_DDR_ODT_NEVER,
214 		FSL_DDR_ODT_NEVER,
215 		DDR4_RTT_OFF,
216 		DDR4_RTT_OFF
217 	}
218 };
219 
220 static __maybe_unused const struct dynamic_odt dual_S0[4] = {
221 	{	/* cs0 */
222 		FSL_DDR_ODT_NEVER,
223 		FSL_DDR_ODT_CS,
224 		DDR4_RTT_40_OHM,
225 		DDR4_RTT_OFF
226 	},
227 	{0, 0, 0, 0},
228 	{0, 0, 0, 0},
229 	{0, 0, 0, 0}
230 
231 };
232 
233 static __maybe_unused const struct dynamic_odt dual_0S[4] = {
234 	{0, 0, 0, 0},
235 	{0, 0, 0, 0},
236 	{	/* cs2 */
237 		FSL_DDR_ODT_NEVER,
238 		FSL_DDR_ODT_CS,
239 		DDR4_RTT_40_OHM,
240 		DDR4_RTT_OFF
241 	},
242 	{0, 0, 0, 0}
243 
244 };
245 
246 static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
247 	{	/* cs0 */
248 		FSL_DDR_ODT_NEVER,
249 		FSL_DDR_ODT_CS,
250 		DDR4_RTT_120_OHM,
251 		DDR4_RTT_OFF
252 	},
253 	{	/* cs1 */
254 		FSL_DDR_ODT_NEVER,
255 		FSL_DDR_ODT_CS,
256 		DDR4_RTT_120_OHM,
257 		DDR4_RTT_OFF
258 	},
259 	{	/* cs2 */
260 		FSL_DDR_ODT_NEVER,
261 		FSL_DDR_ODT_CS,
262 		DDR4_RTT_120_OHM,
263 		DDR4_RTT_OFF
264 	},
265 	{	/* cs3 */
266 		FSL_DDR_ODT_NEVER,
267 		FSL_DDR_ODT_CS,
268 		DDR4_RTT_120_OHM,
269 		DDR4_RTT_OFF
270 	}
271 };
272 #elif defined(CONFIG_SYS_FSL_DDR3)
273 static __maybe_unused const struct dynamic_odt single_Q[4] = {
274 	{	/* cs0 */
275 		FSL_DDR_ODT_NEVER,
276 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
277 		DDR3_RTT_20_OHM,
278 		DDR3_RTT_120_OHM
279 	},
280 	{	/* cs1 */
281 		FSL_DDR_ODT_NEVER,
282 		FSL_DDR_ODT_NEVER,	/* tied high */
283 		DDR3_RTT_OFF,
284 		DDR3_RTT_120_OHM
285 	},
286 	{	/* cs2 */
287 		FSL_DDR_ODT_NEVER,
288 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
289 		DDR3_RTT_20_OHM,
290 		DDR3_RTT_120_OHM
291 	},
292 	{	/* cs3 */
293 		FSL_DDR_ODT_NEVER,
294 		FSL_DDR_ODT_NEVER,	/* tied high */
295 		DDR3_RTT_OFF,
296 		DDR3_RTT_120_OHM
297 	}
298 };
299 
300 static __maybe_unused const struct dynamic_odt single_D[4] = {
301 	{	/* cs0 */
302 		FSL_DDR_ODT_NEVER,
303 		FSL_DDR_ODT_ALL,
304 		DDR3_RTT_40_OHM,
305 		DDR3_RTT_OFF
306 	},
307 	{	/* cs1 */
308 		FSL_DDR_ODT_NEVER,
309 		FSL_DDR_ODT_NEVER,
310 		DDR3_RTT_OFF,
311 		DDR3_RTT_OFF
312 	},
313 	{0, 0, 0, 0},
314 	{0, 0, 0, 0}
315 };
316 
317 static __maybe_unused const struct dynamic_odt single_S[4] = {
318 	{	/* cs0 */
319 		FSL_DDR_ODT_NEVER,
320 		FSL_DDR_ODT_ALL,
321 		DDR3_RTT_40_OHM,
322 		DDR3_RTT_OFF
323 	},
324 	{0, 0, 0, 0},
325 	{0, 0, 0, 0},
326 	{0, 0, 0, 0},
327 };
328 
329 static __maybe_unused const struct dynamic_odt dual_DD[4] = {
330 	{	/* cs0 */
331 		FSL_DDR_ODT_NEVER,
332 		FSL_DDR_ODT_SAME_DIMM,
333 		DDR3_RTT_120_OHM,
334 		DDR3_RTT_OFF
335 	},
336 	{	/* cs1 */
337 		FSL_DDR_ODT_OTHER_DIMM,
338 		FSL_DDR_ODT_OTHER_DIMM,
339 		DDR3_RTT_30_OHM,
340 		DDR3_RTT_OFF
341 	},
342 	{	/* cs2 */
343 		FSL_DDR_ODT_NEVER,
344 		FSL_DDR_ODT_SAME_DIMM,
345 		DDR3_RTT_120_OHM,
346 		DDR3_RTT_OFF
347 	},
348 	{	/* cs3 */
349 		FSL_DDR_ODT_OTHER_DIMM,
350 		FSL_DDR_ODT_OTHER_DIMM,
351 		DDR3_RTT_30_OHM,
352 		DDR3_RTT_OFF
353 	}
354 };
355 
356 static __maybe_unused const struct dynamic_odt dual_DS[4] = {
357 	{	/* cs0 */
358 		FSL_DDR_ODT_NEVER,
359 		FSL_DDR_ODT_SAME_DIMM,
360 		DDR3_RTT_120_OHM,
361 		DDR3_RTT_OFF
362 	},
363 	{	/* cs1 */
364 		FSL_DDR_ODT_OTHER_DIMM,
365 		FSL_DDR_ODT_OTHER_DIMM,
366 		DDR3_RTT_30_OHM,
367 		DDR3_RTT_OFF
368 	},
369 	{	/* cs2 */
370 		FSL_DDR_ODT_OTHER_DIMM,
371 		FSL_DDR_ODT_ALL,
372 		DDR3_RTT_20_OHM,
373 		DDR3_RTT_120_OHM
374 	},
375 	{0, 0, 0, 0}
376 };
377 static __maybe_unused const struct dynamic_odt dual_SD[4] = {
378 	{	/* cs0 */
379 		FSL_DDR_ODT_OTHER_DIMM,
380 		FSL_DDR_ODT_ALL,
381 		DDR3_RTT_20_OHM,
382 		DDR3_RTT_120_OHM
383 	},
384 	{0, 0, 0, 0},
385 	{	/* cs2 */
386 		FSL_DDR_ODT_NEVER,
387 		FSL_DDR_ODT_SAME_DIMM,
388 		DDR3_RTT_120_OHM,
389 		DDR3_RTT_OFF
390 	},
391 	{	/* cs3 */
392 		FSL_DDR_ODT_OTHER_DIMM,
393 		FSL_DDR_ODT_OTHER_DIMM,
394 		DDR3_RTT_20_OHM,
395 		DDR3_RTT_OFF
396 	}
397 };
398 
399 static __maybe_unused const struct dynamic_odt dual_SS[4] = {
400 	{	/* cs0 */
401 		FSL_DDR_ODT_OTHER_DIMM,
402 		FSL_DDR_ODT_ALL,
403 		DDR3_RTT_30_OHM,
404 		DDR3_RTT_120_OHM
405 	},
406 	{0, 0, 0, 0},
407 	{	/* cs2 */
408 		FSL_DDR_ODT_OTHER_DIMM,
409 		FSL_DDR_ODT_ALL,
410 		DDR3_RTT_30_OHM,
411 		DDR3_RTT_120_OHM
412 	},
413 	{0, 0, 0, 0}
414 };
415 
416 static __maybe_unused const struct dynamic_odt dual_D0[4] = {
417 	{	/* cs0 */
418 		FSL_DDR_ODT_NEVER,
419 		FSL_DDR_ODT_SAME_DIMM,
420 		DDR3_RTT_40_OHM,
421 		DDR3_RTT_OFF
422 	},
423 	{	/* cs1 */
424 		FSL_DDR_ODT_NEVER,
425 		FSL_DDR_ODT_NEVER,
426 		DDR3_RTT_OFF,
427 		DDR3_RTT_OFF
428 	},
429 	{0, 0, 0, 0},
430 	{0, 0, 0, 0}
431 };
432 
433 static __maybe_unused const struct dynamic_odt dual_0D[4] = {
434 	{0, 0, 0, 0},
435 	{0, 0, 0, 0},
436 	{	/* cs2 */
437 		FSL_DDR_ODT_NEVER,
438 		FSL_DDR_ODT_SAME_DIMM,
439 		DDR3_RTT_40_OHM,
440 		DDR3_RTT_OFF
441 	},
442 	{	/* cs3 */
443 		FSL_DDR_ODT_NEVER,
444 		FSL_DDR_ODT_NEVER,
445 		DDR3_RTT_OFF,
446 		DDR3_RTT_OFF
447 	}
448 };
449 
450 static __maybe_unused const struct dynamic_odt dual_S0[4] = {
451 	{	/* cs0 */
452 		FSL_DDR_ODT_NEVER,
453 		FSL_DDR_ODT_CS,
454 		DDR3_RTT_40_OHM,
455 		DDR3_RTT_OFF
456 	},
457 	{0, 0, 0, 0},
458 	{0, 0, 0, 0},
459 	{0, 0, 0, 0}
460 
461 };
462 
463 static __maybe_unused const struct dynamic_odt dual_0S[4] = {
464 	{0, 0, 0, 0},
465 	{0, 0, 0, 0},
466 	{	/* cs2 */
467 		FSL_DDR_ODT_NEVER,
468 		FSL_DDR_ODT_CS,
469 		DDR3_RTT_40_OHM,
470 		DDR3_RTT_OFF
471 	},
472 	{0, 0, 0, 0}
473 
474 };
475 
476 static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
477 	{	/* cs0 */
478 		FSL_DDR_ODT_NEVER,
479 		FSL_DDR_ODT_CS,
480 		DDR3_RTT_120_OHM,
481 		DDR3_RTT_OFF
482 	},
483 	{	/* cs1 */
484 		FSL_DDR_ODT_NEVER,
485 		FSL_DDR_ODT_CS,
486 		DDR3_RTT_120_OHM,
487 		DDR3_RTT_OFF
488 	},
489 	{	/* cs2 */
490 		FSL_DDR_ODT_NEVER,
491 		FSL_DDR_ODT_CS,
492 		DDR3_RTT_120_OHM,
493 		DDR3_RTT_OFF
494 	},
495 	{	/* cs3 */
496 		FSL_DDR_ODT_NEVER,
497 		FSL_DDR_ODT_CS,
498 		DDR3_RTT_120_OHM,
499 		DDR3_RTT_OFF
500 	}
501 };
502 #else	/* CONFIG_SYS_FSL_DDR3 */
503 static __maybe_unused const struct dynamic_odt single_Q[4] = {
504 	{0, 0, 0, 0},
505 	{0, 0, 0, 0},
506 	{0, 0, 0, 0},
507 	{0, 0, 0, 0}
508 };
509 
510 static __maybe_unused const struct dynamic_odt single_D[4] = {
511 	{	/* cs0 */
512 		FSL_DDR_ODT_NEVER,
513 		FSL_DDR_ODT_ALL,
514 		DDR2_RTT_150_OHM,
515 		DDR2_RTT_OFF
516 	},
517 	{	/* cs1 */
518 		FSL_DDR_ODT_NEVER,
519 		FSL_DDR_ODT_NEVER,
520 		DDR2_RTT_OFF,
521 		DDR2_RTT_OFF
522 	},
523 	{0, 0, 0, 0},
524 	{0, 0, 0, 0}
525 };
526 
527 static __maybe_unused const struct dynamic_odt single_S[4] = {
528 	{	/* cs0 */
529 		FSL_DDR_ODT_NEVER,
530 		FSL_DDR_ODT_ALL,
531 		DDR2_RTT_150_OHM,
532 		DDR2_RTT_OFF
533 	},
534 	{0, 0, 0, 0},
535 	{0, 0, 0, 0},
536 	{0, 0, 0, 0},
537 };
538 
539 static __maybe_unused const struct dynamic_odt dual_DD[4] = {
540 	{	/* cs0 */
541 		FSL_DDR_ODT_OTHER_DIMM,
542 		FSL_DDR_ODT_OTHER_DIMM,
543 		DDR2_RTT_75_OHM,
544 		DDR2_RTT_OFF
545 	},
546 	{	/* cs1 */
547 		FSL_DDR_ODT_NEVER,
548 		FSL_DDR_ODT_NEVER,
549 		DDR2_RTT_OFF,
550 		DDR2_RTT_OFF
551 	},
552 	{	/* cs2 */
553 		FSL_DDR_ODT_OTHER_DIMM,
554 		FSL_DDR_ODT_OTHER_DIMM,
555 		DDR2_RTT_75_OHM,
556 		DDR2_RTT_OFF
557 	},
558 	{	/* cs3 */
559 		FSL_DDR_ODT_NEVER,
560 		FSL_DDR_ODT_NEVER,
561 		DDR2_RTT_OFF,
562 		DDR2_RTT_OFF
563 	}
564 };
565 
566 static __maybe_unused const struct dynamic_odt dual_DS[4] = {
567 	{	/* cs0 */
568 		FSL_DDR_ODT_OTHER_DIMM,
569 		FSL_DDR_ODT_OTHER_DIMM,
570 		DDR2_RTT_75_OHM,
571 		DDR2_RTT_OFF
572 	},
573 	{	/* cs1 */
574 		FSL_DDR_ODT_NEVER,
575 		FSL_DDR_ODT_NEVER,
576 		DDR2_RTT_OFF,
577 		DDR2_RTT_OFF
578 	},
579 	{	/* cs2 */
580 		FSL_DDR_ODT_OTHER_DIMM,
581 		FSL_DDR_ODT_OTHER_DIMM,
582 		DDR2_RTT_75_OHM,
583 		DDR2_RTT_OFF
584 	},
585 	{0, 0, 0, 0}
586 };
587 
588 static __maybe_unused const struct dynamic_odt dual_SD[4] = {
589 	{	/* cs0 */
590 		FSL_DDR_ODT_OTHER_DIMM,
591 		FSL_DDR_ODT_OTHER_DIMM,
592 		DDR2_RTT_75_OHM,
593 		DDR2_RTT_OFF
594 	},
595 	{0, 0, 0, 0},
596 	{	/* cs2 */
597 		FSL_DDR_ODT_OTHER_DIMM,
598 		FSL_DDR_ODT_OTHER_DIMM,
599 		DDR2_RTT_75_OHM,
600 		DDR2_RTT_OFF
601 	},
602 	{	/* cs3 */
603 		FSL_DDR_ODT_NEVER,
604 		FSL_DDR_ODT_NEVER,
605 		DDR2_RTT_OFF,
606 		DDR2_RTT_OFF
607 	}
608 };
609 
610 static __maybe_unused const struct dynamic_odt dual_SS[4] = {
611 	{	/* cs0 */
612 		FSL_DDR_ODT_OTHER_DIMM,
613 		FSL_DDR_ODT_OTHER_DIMM,
614 		DDR2_RTT_75_OHM,
615 		DDR2_RTT_OFF
616 	},
617 	{0, 0, 0, 0},
618 	{	/* cs2 */
619 		FSL_DDR_ODT_OTHER_DIMM,
620 		FSL_DDR_ODT_OTHER_DIMM,
621 		DDR2_RTT_75_OHM,
622 		DDR2_RTT_OFF
623 	},
624 	{0, 0, 0, 0}
625 };
626 
627 static __maybe_unused const struct dynamic_odt dual_D0[4] = {
628 	{	/* cs0 */
629 		FSL_DDR_ODT_NEVER,
630 		FSL_DDR_ODT_ALL,
631 		DDR2_RTT_150_OHM,
632 		DDR2_RTT_OFF
633 	},
634 	{	/* cs1 */
635 		FSL_DDR_ODT_NEVER,
636 		FSL_DDR_ODT_NEVER,
637 		DDR2_RTT_OFF,
638 		DDR2_RTT_OFF
639 	},
640 	{0, 0, 0, 0},
641 	{0, 0, 0, 0}
642 };
643 
644 static __maybe_unused const struct dynamic_odt dual_0D[4] = {
645 	{0, 0, 0, 0},
646 	{0, 0, 0, 0},
647 	{	/* cs2 */
648 		FSL_DDR_ODT_NEVER,
649 		FSL_DDR_ODT_ALL,
650 		DDR2_RTT_150_OHM,
651 		DDR2_RTT_OFF
652 	},
653 	{	/* cs3 */
654 		FSL_DDR_ODT_NEVER,
655 		FSL_DDR_ODT_NEVER,
656 		DDR2_RTT_OFF,
657 		DDR2_RTT_OFF
658 	}
659 };
660 
661 static __maybe_unused const struct dynamic_odt dual_S0[4] = {
662 	{	/* cs0 */
663 		FSL_DDR_ODT_NEVER,
664 		FSL_DDR_ODT_CS,
665 		DDR2_RTT_150_OHM,
666 		DDR2_RTT_OFF
667 	},
668 	{0, 0, 0, 0},
669 	{0, 0, 0, 0},
670 	{0, 0, 0, 0}
671 
672 };
673 
674 static __maybe_unused const struct dynamic_odt dual_0S[4] = {
675 	{0, 0, 0, 0},
676 	{0, 0, 0, 0},
677 	{	/* cs2 */
678 		FSL_DDR_ODT_NEVER,
679 		FSL_DDR_ODT_CS,
680 		DDR2_RTT_150_OHM,
681 		DDR2_RTT_OFF
682 	},
683 	{0, 0, 0, 0}
684 
685 };
686 
687 static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
688 	{	/* cs0 */
689 		FSL_DDR_ODT_NEVER,
690 		FSL_DDR_ODT_CS,
691 		DDR2_RTT_75_OHM,
692 		DDR2_RTT_OFF
693 	},
694 	{	/* cs1 */
695 		FSL_DDR_ODT_NEVER,
696 		FSL_DDR_ODT_NEVER,
697 		DDR2_RTT_OFF,
698 		DDR2_RTT_OFF
699 	},
700 	{	/* cs2 */
701 		FSL_DDR_ODT_NEVER,
702 		FSL_DDR_ODT_CS,
703 		DDR2_RTT_75_OHM,
704 		DDR2_RTT_OFF
705 	},
706 	{	/* cs3 */
707 		FSL_DDR_ODT_NEVER,
708 		FSL_DDR_ODT_NEVER,
709 		DDR2_RTT_OFF,
710 		DDR2_RTT_OFF
711 	}
712 };
713 #endif
714 
715 /*
716  * Automatically seleect bank interleaving mode based on DIMMs
717  * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
718  * This function only deal with one or two slots per controller.
719  */
720 static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
721 {
722 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
723 	if (pdimm[0].n_ranks == 4)
724 		return FSL_DDR_CS0_CS1_CS2_CS3;
725 	else if (pdimm[0].n_ranks == 2)
726 		return FSL_DDR_CS0_CS1;
727 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
728 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
729 	if (pdimm[0].n_ranks == 4)
730 		return FSL_DDR_CS0_CS1_CS2_CS3;
731 #endif
732 	if (pdimm[0].n_ranks == 2) {
733 		if (pdimm[1].n_ranks == 2)
734 			return FSL_DDR_CS0_CS1_CS2_CS3;
735 		else
736 			return FSL_DDR_CS0_CS1;
737 	}
738 #endif
739 	return 0;
740 }
741 
742 unsigned int populate_memctl_options(const common_timing_params_t *common_dimm,
743 			memctl_options_t *popts,
744 			dimm_params_t *pdimm,
745 			unsigned int ctrl_num)
746 {
747 	unsigned int i;
748 	char buf[HWCONFIG_BUFFER_SIZE];
749 #if defined(CONFIG_SYS_FSL_DDR3) || \
750 	defined(CONFIG_SYS_FSL_DDR2) || \
751 	defined(CONFIG_SYS_FSL_DDR4)
752 	const struct dynamic_odt *pdodt = odt_unknown;
753 #endif
754 #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
755 	ulong ddr_freq;
756 #endif
757 
758 	/*
759 	 * Extract hwconfig from environment since we have not properly setup
760 	 * the environment but need it for ddr config params
761 	 */
762 	if (env_get_f("hwconfig", buf, sizeof(buf)) < 0)
763 		buf[0] = '\0';
764 
765 #if defined(CONFIG_SYS_FSL_DDR3) || \
766 	defined(CONFIG_SYS_FSL_DDR2) || \
767 	defined(CONFIG_SYS_FSL_DDR4)
768 	/* Chip select options. */
769 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
770 	switch (pdimm[0].n_ranks) {
771 	case 1:
772 		pdodt = single_S;
773 		break;
774 	case 2:
775 		pdodt = single_D;
776 		break;
777 	case 4:
778 		pdodt = single_Q;
779 		break;
780 	}
781 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
782 	switch (pdimm[0].n_ranks) {
783 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
784 	case 4:
785 		pdodt = single_Q;
786 		if (pdimm[1].n_ranks)
787 			printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
788 		break;
789 #endif
790 	case 2:
791 		switch (pdimm[1].n_ranks) {
792 		case 2:
793 			pdodt = dual_DD;
794 			break;
795 		case 1:
796 			pdodt = dual_DS;
797 			break;
798 		case 0:
799 			pdodt = dual_D0;
800 			break;
801 		}
802 		break;
803 	case 1:
804 		switch (pdimm[1].n_ranks) {
805 		case 2:
806 			pdodt = dual_SD;
807 			break;
808 		case 1:
809 			pdodt = dual_SS;
810 			break;
811 		case 0:
812 			pdodt = dual_S0;
813 			break;
814 		}
815 		break;
816 	case 0:
817 		switch (pdimm[1].n_ranks) {
818 		case 2:
819 			pdodt = dual_0D;
820 			break;
821 		case 1:
822 			pdodt = dual_0S;
823 			break;
824 		}
825 		break;
826 	}
827 #endif	/* CONFIG_DIMM_SLOTS_PER_CTLR */
828 #endif	/* CONFIG_SYS_FSL_DDR2, 3, 4 */
829 
830 	/* Pick chip-select local options. */
831 	for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
832 #if defined(CONFIG_SYS_FSL_DDR3) || \
833 	defined(CONFIG_SYS_FSL_DDR2) || \
834 	defined(CONFIG_SYS_FSL_DDR4)
835 		popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
836 		popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
837 		popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
838 		popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
839 #else
840 		popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
841 		popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
842 #endif
843 		popts->cs_local_opts[i].auto_precharge = 0;
844 	}
845 
846 	/* Pick interleaving mode. */
847 
848 	/*
849 	 * 0 = no interleaving
850 	 * 1 = interleaving between 2 controllers
851 	 */
852 	popts->memctl_interleaving = 0;
853 
854 	/*
855 	 * 0 = cacheline
856 	 * 1 = page
857 	 * 2 = (logical) bank
858 	 * 3 = superbank (only if CS interleaving is enabled)
859 	 */
860 	popts->memctl_interleaving_mode = 0;
861 
862 	/*
863 	 * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
864 	 * 1: page:      bit to the left of the column bits selects the memctl
865 	 * 2: bank:      bit to the left of the bank bits selects the memctl
866 	 * 3: superbank: bit to the left of the chip select selects the memctl
867 	 *
868 	 * NOTE: ba_intlv (rank interleaving) is independent of memory
869 	 * controller interleaving; it is only within a memory controller.
870 	 * Must use superbank interleaving if rank interleaving is used and
871 	 * memory controller interleaving is enabled.
872 	 */
873 
874 	/*
875 	 * 0 = no
876 	 * 0x40 = CS0,CS1
877 	 * 0x20 = CS2,CS3
878 	 * 0x60 = CS0,CS1 + CS2,CS3
879 	 * 0x04 = CS0,CS1,CS2,CS3
880 	 */
881 	popts->ba_intlv_ctl = 0;
882 
883 	/* Memory Organization Parameters */
884 	popts->registered_dimm_en = common_dimm->all_dimms_registered;
885 
886 	/* Operational Mode Paramters */
887 
888 	/* Pick ECC modes */
889 	popts->ecc_mode = 0;		  /* 0 = disabled, 1 = enabled */
890 #ifdef CONFIG_DDR_ECC
891 	if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
892 		if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
893 			popts->ecc_mode = 1;
894 	} else
895 		popts->ecc_mode = 1;
896 #endif
897 	/* 1 = use memory controler to init data */
898 	popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0;
899 
900 	/*
901 	 * Choose DQS config
902 	 * 0 for DDR1
903 	 * 1 for DDR2
904 	 */
905 #if defined(CONFIG_SYS_FSL_DDR1)
906 	popts->dqs_config = 0;
907 #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
908 	popts->dqs_config = 1;
909 #endif
910 
911 	/* Choose self-refresh during sleep. */
912 	popts->self_refresh_in_sleep = 1;
913 
914 	/* Choose dynamic power management mode. */
915 	popts->dynamic_power = 0;
916 
917 	/*
918 	 * check first dimm for primary sdram width
919 	 * presuming all dimms are similar
920 	 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
921 	 */
922 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
923 	if (pdimm[0].n_ranks != 0) {
924 		if ((pdimm[0].data_width >= 64) && \
925 			(pdimm[0].data_width <= 72))
926 			popts->data_bus_width = 0;
927 		else if ((pdimm[0].data_width >= 32) && \
928 			(pdimm[0].data_width <= 40))
929 			popts->data_bus_width = 1;
930 		else {
931 			panic("Error: data width %u is invalid!\n",
932 				pdimm[0].data_width);
933 		}
934 	}
935 #else
936 	if (pdimm[0].n_ranks != 0) {
937 		if (pdimm[0].primary_sdram_width == 64)
938 			popts->data_bus_width = 0;
939 		else if (pdimm[0].primary_sdram_width == 32)
940 			popts->data_bus_width = 1;
941 		else if (pdimm[0].primary_sdram_width == 16)
942 			popts->data_bus_width = 2;
943 		else {
944 			panic("Error: primary sdram width %u is invalid!\n",
945 				pdimm[0].primary_sdram_width);
946 		}
947 	}
948 #endif
949 
950 	popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
951 
952 	/* Choose burst length. */
953 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
954 #if defined(CONFIG_E500MC)
955 	popts->otf_burst_chop_en = 0;	/* on-the-fly burst chop disable */
956 	popts->burst_length = DDR_BL8;	/* Fixed 8-beat burst len */
957 #else
958 	if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
959 		/* 32-bit or 16-bit bus */
960 		popts->otf_burst_chop_en = 0;
961 		popts->burst_length = DDR_BL8;
962 	} else {
963 		popts->otf_burst_chop_en = 1;	/* on-the-fly burst chop */
964 		popts->burst_length = DDR_OTF;	/* on-the-fly BC4 and BL8 */
965 	}
966 #endif
967 #else
968 	popts->burst_length = DDR_BL4;	/* has to be 4 for DDR2 */
969 #endif
970 
971 	/* Choose ddr controller address mirror mode */
972 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
973 	for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) {
974 		if (pdimm[i].n_ranks) {
975 			popts->mirrored_dimm = pdimm[i].mirrored_dimm;
976 			break;
977 		}
978 	}
979 #endif
980 
981 	/* Global Timing Parameters. */
982 	debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num));
983 
984 	/* Pick a caslat override. */
985 	popts->cas_latency_override = 0;
986 	popts->cas_latency_override_value = 3;
987 	if (popts->cas_latency_override) {
988 		debug("using caslat override value = %u\n",
989 		       popts->cas_latency_override_value);
990 	}
991 
992 	/* Decide whether to use the computed derated latency */
993 	popts->use_derated_caslat = 0;
994 
995 	/* Choose an additive latency. */
996 	popts->additive_latency_override = 0;
997 	popts->additive_latency_override_value = 3;
998 	if (popts->additive_latency_override) {
999 		debug("using additive latency override value = %u\n",
1000 		       popts->additive_latency_override_value);
1001 	}
1002 
1003 	/*
1004 	 * 2T_EN setting
1005 	 *
1006 	 * Factors to consider for 2T_EN:
1007 	 *	- number of DIMMs installed
1008 	 *	- number of components, number of active ranks
1009 	 *	- how much time you want to spend playing around
1010 	 */
1011 	popts->twot_en = 0;
1012 	popts->threet_en = 0;
1013 
1014 	/* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
1015 	if (popts->registered_dimm_en)
1016 		popts->ap_en = 1; /* 0 = disable,  1 = enable */
1017 	else
1018 		popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
1019 
1020 	if (hwconfig_sub_f("fsl_ddr", "parity", buf)) {
1021 		if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) {
1022 			if (popts->registered_dimm_en ||
1023 			    (CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4))
1024 				popts->ap_en = 1;
1025 		}
1026 	}
1027 
1028 	/*
1029 	 * BSTTOPRE precharge interval
1030 	 *
1031 	 * Set this to 0 for global auto precharge
1032 	 * The value of 0x100 has been used for DDR1, DDR2, DDR3.
1033 	 * It is not wrong. Any value should be OK. The performance depends on
1034 	 * applications. There is no one good value for all. One way to set
1035 	 * is to use 1/4 of refint value.
1036 	 */
1037 	popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps)
1038 			 >> 2;
1039 
1040 	/*
1041 	 * Window for four activates -- tFAW
1042 	 *
1043 	 * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
1044 	 * FIXME: varies depending upon number of column addresses or data
1045 	 * FIXME: width, was considering looking at pdimm->primary_sdram_width
1046 	 */
1047 #if defined(CONFIG_SYS_FSL_DDR1)
1048 	popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1);
1049 
1050 #elif defined(CONFIG_SYS_FSL_DDR2)
1051 	/*
1052 	 * x4/x8;  some datasheets have 35000
1053 	 * x16 wide columns only?  Use 50000?
1054 	 */
1055 	popts->tfaw_window_four_activates_ps = 37500;
1056 
1057 #else
1058 	popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
1059 #endif
1060 	popts->zq_en = 0;
1061 	popts->wrlvl_en = 0;
1062 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
1063 	/*
1064 	 * due to ddr3 dimm is fly-by topology
1065 	 * we suggest to enable write leveling to
1066 	 * meet the tQDSS under different loading.
1067 	 */
1068 	popts->wrlvl_en = 1;
1069 	popts->zq_en = 1;
1070 	popts->wrlvl_override = 0;
1071 #endif
1072 
1073 	/*
1074 	 * Check interleaving configuration from environment.
1075 	 * Please refer to doc/README.fsl-ddr for the detail.
1076 	 *
1077 	 * If memory controller interleaving is enabled, then the data
1078 	 * bus widths must be programmed identically for all memory controllers.
1079 	 *
1080 	 * Attempt to set all controllers to the same chip select
1081 	 * interleaving mode. It will do a best effort to get the
1082 	 * requested ranks interleaved together such that the result
1083 	 * should be a subset of the requested configuration.
1084 	 *
1085 	 * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
1086 	 * with 256 Byte is enabled.
1087 	 */
1088 #if (CONFIG_SYS_NUM_DDR_CTLRS > 1)
1089 	if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
1090 #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1091 		;
1092 #else
1093 		goto done;
1094 #endif
1095 	if (pdimm[0].n_ranks == 0) {
1096 		printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
1097 		popts->memctl_interleaving = 0;
1098 		goto done;
1099 	}
1100 	popts->memctl_interleaving = 1;
1101 #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1102 	popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
1103 	popts->memctl_interleaving = 1;
1104 	debug("256 Byte interleaving\n");
1105 #else
1106 	/*
1107 	 * test null first. if CONFIG_HWCONFIG is not defined
1108 	 * hwconfig_arg_cmp returns non-zero
1109 	 */
1110 	if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
1111 				    "null", buf)) {
1112 		popts->memctl_interleaving = 0;
1113 		debug("memory controller interleaving disabled.\n");
1114 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1115 					"ctlr_intlv",
1116 					"cacheline", buf)) {
1117 		popts->memctl_interleaving_mode =
1118 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1119 			0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
1120 		popts->memctl_interleaving =
1121 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1122 			0 : 1;
1123 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1124 					"ctlr_intlv",
1125 					"page", buf)) {
1126 		popts->memctl_interleaving_mode =
1127 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1128 			0 : FSL_DDR_PAGE_INTERLEAVING;
1129 		popts->memctl_interleaving =
1130 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1131 			0 : 1;
1132 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1133 					"ctlr_intlv",
1134 					"bank", buf)) {
1135 		popts->memctl_interleaving_mode =
1136 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1137 			0 : FSL_DDR_BANK_INTERLEAVING;
1138 		popts->memctl_interleaving =
1139 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1140 			0 : 1;
1141 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1142 					"ctlr_intlv",
1143 					"superbank", buf)) {
1144 		popts->memctl_interleaving_mode =
1145 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1146 			0 : FSL_DDR_SUPERBANK_INTERLEAVING;
1147 		popts->memctl_interleaving =
1148 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1149 			0 : 1;
1150 #if (CONFIG_SYS_NUM_DDR_CTLRS == 3)
1151 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1152 					"ctlr_intlv",
1153 					"3way_1KB", buf)) {
1154 		popts->memctl_interleaving_mode =
1155 			FSL_DDR_3WAY_1KB_INTERLEAVING;
1156 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1157 					"ctlr_intlv",
1158 					"3way_4KB", buf)) {
1159 		popts->memctl_interleaving_mode =
1160 			FSL_DDR_3WAY_4KB_INTERLEAVING;
1161 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1162 					"ctlr_intlv",
1163 					"3way_8KB", buf)) {
1164 		popts->memctl_interleaving_mode =
1165 			FSL_DDR_3WAY_8KB_INTERLEAVING;
1166 #elif (CONFIG_SYS_NUM_DDR_CTLRS == 4)
1167 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1168 					"ctlr_intlv",
1169 					"4way_1KB", buf)) {
1170 		popts->memctl_interleaving_mode =
1171 			FSL_DDR_4WAY_1KB_INTERLEAVING;
1172 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1173 					"ctlr_intlv",
1174 					"4way_4KB", buf)) {
1175 		popts->memctl_interleaving_mode =
1176 			FSL_DDR_4WAY_4KB_INTERLEAVING;
1177 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1178 					"ctlr_intlv",
1179 					"4way_8KB", buf)) {
1180 		popts->memctl_interleaving_mode =
1181 			FSL_DDR_4WAY_8KB_INTERLEAVING;
1182 #endif
1183 	} else {
1184 		popts->memctl_interleaving = 0;
1185 		printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
1186 	}
1187 #endif	/* CONFIG_SYS_FSL_DDR_INTLV_256B */
1188 done:
1189 #endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */
1190 	if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
1191 		(CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
1192 		/* test null first. if CONFIG_HWCONFIG is not defined,
1193 		 * hwconfig_subarg_cmp_f returns non-zero */
1194 		if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1195 					    "null", buf))
1196 			debug("bank interleaving disabled.\n");
1197 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1198 						 "cs0_cs1", buf))
1199 			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
1200 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1201 						 "cs2_cs3", buf))
1202 			popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
1203 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1204 						 "cs0_cs1_and_cs2_cs3", buf))
1205 			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
1206 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1207 						 "cs0_cs1_cs2_cs3", buf))
1208 			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
1209 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1210 						"auto", buf))
1211 			popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
1212 		else
1213 			printf("hwconfig has unrecognized parameter for bank_intlv.\n");
1214 		switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
1215 		case FSL_DDR_CS0_CS1_CS2_CS3:
1216 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1217 			if (pdimm[0].n_ranks < 4) {
1218 				popts->ba_intlv_ctl = 0;
1219 				printf("Not enough bank(chip-select) for "
1220 					"CS0+CS1+CS2+CS3 on controller %d, "
1221 					"interleaving disabled!\n", ctrl_num);
1222 			}
1223 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1224 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
1225 			if (pdimm[0].n_ranks == 4)
1226 				break;
1227 #endif
1228 			if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
1229 				popts->ba_intlv_ctl = 0;
1230 				printf("Not enough bank(chip-select) for "
1231 					"CS0+CS1+CS2+CS3 on controller %d, "
1232 					"interleaving disabled!\n", ctrl_num);
1233 			}
1234 			if (pdimm[0].capacity != pdimm[1].capacity) {
1235 				popts->ba_intlv_ctl = 0;
1236 				printf("Not identical DIMM size for "
1237 					"CS0+CS1+CS2+CS3 on controller %d, "
1238 					"interleaving disabled!\n", ctrl_num);
1239 			}
1240 #endif
1241 			break;
1242 		case FSL_DDR_CS0_CS1:
1243 			if (pdimm[0].n_ranks < 2) {
1244 				popts->ba_intlv_ctl = 0;
1245 				printf("Not enough bank(chip-select) for "
1246 					"CS0+CS1 on controller %d, "
1247 					"interleaving disabled!\n", ctrl_num);
1248 			}
1249 			break;
1250 		case FSL_DDR_CS2_CS3:
1251 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1252 			if (pdimm[0].n_ranks < 4) {
1253 				popts->ba_intlv_ctl = 0;
1254 				printf("Not enough bank(chip-select) for CS2+CS3 "
1255 					"on controller %d, interleaving disabled!\n", ctrl_num);
1256 			}
1257 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1258 			if (pdimm[1].n_ranks < 2) {
1259 				popts->ba_intlv_ctl = 0;
1260 				printf("Not enough bank(chip-select) for CS2+CS3 "
1261 					"on controller %d, interleaving disabled!\n", ctrl_num);
1262 			}
1263 #endif
1264 			break;
1265 		case FSL_DDR_CS0_CS1_AND_CS2_CS3:
1266 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1267 			if (pdimm[0].n_ranks < 4) {
1268 				popts->ba_intlv_ctl = 0;
1269 				printf("Not enough bank(CS) for CS0+CS1 and "
1270 					"CS2+CS3 on controller %d, "
1271 					"interleaving disabled!\n", ctrl_num);
1272 			}
1273 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1274 			if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
1275 				popts->ba_intlv_ctl = 0;
1276 				printf("Not enough bank(CS) for CS0+CS1 and "
1277 					"CS2+CS3 on controller %d, "
1278 					"interleaving disabled!\n", ctrl_num);
1279 			}
1280 #endif
1281 			break;
1282 		default:
1283 			popts->ba_intlv_ctl = 0;
1284 			break;
1285 		}
1286 	}
1287 
1288 	if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
1289 		if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
1290 			popts->addr_hash = 0;
1291 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
1292 					       "true", buf))
1293 			popts->addr_hash = 1;
1294 	}
1295 
1296 	if (pdimm[0].n_ranks == 4)
1297 		popts->quad_rank_present = 1;
1298 
1299 	popts->package_3ds = pdimm->package_3ds;
1300 
1301 #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
1302 	ddr_freq = get_ddr_freq(ctrl_num) / 1000000;
1303 	if (popts->registered_dimm_en) {
1304 		popts->rcw_override = 1;
1305 		popts->rcw_1 = 0x000a5a00;
1306 		if (ddr_freq <= 800)
1307 			popts->rcw_2 = 0x00000000;
1308 		else if (ddr_freq <= 1066)
1309 			popts->rcw_2 = 0x00100000;
1310 		else if (ddr_freq <= 1333)
1311 			popts->rcw_2 = 0x00200000;
1312 		else
1313 			popts->rcw_2 = 0x00300000;
1314 	}
1315 #endif
1316 
1317 	fsl_ddr_board_options(popts, pdimm, ctrl_num);
1318 
1319 	return 0;
1320 }
1321 
1322 void check_interleaving_options(fsl_ddr_info_t *pinfo)
1323 {
1324 	int i, j, k, check_n_ranks, intlv_invalid = 0;
1325 	unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
1326 	unsigned long long check_rank_density;
1327 	struct dimm_params_s *dimm;
1328 	int first_ctrl = pinfo->first_ctrl;
1329 	int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
1330 
1331 	/*
1332 	 * Check if all controllers are configured for memory
1333 	 * controller interleaving. Identical dimms are recommended. At least
1334 	 * the size, row and col address should be checked.
1335 	 */
1336 	j = 0;
1337 	check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks;
1338 	check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density;
1339 	check_n_row_addr =  pinfo->dimm_params[first_ctrl][0].n_row_addr;
1340 	check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr;
1341 	check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode;
1342 	for (i = first_ctrl; i <= last_ctrl; i++) {
1343 		dimm = &pinfo->dimm_params[i][0];
1344 		if (!pinfo->memctl_opts[i].memctl_interleaving) {
1345 			continue;
1346 		} else if (((check_rank_density != dimm->rank_density) ||
1347 		     (check_n_ranks != dimm->n_ranks) ||
1348 		     (check_n_row_addr != dimm->n_row_addr) ||
1349 		     (check_n_col_addr != dimm->n_col_addr) ||
1350 		     (check_intlv !=
1351 			pinfo->memctl_opts[i].memctl_interleaving_mode))){
1352 			intlv_invalid = 1;
1353 			break;
1354 		} else {
1355 			j++;
1356 		}
1357 
1358 	}
1359 	if (intlv_invalid) {
1360 		for (i = first_ctrl; i <= last_ctrl; i++)
1361 			pinfo->memctl_opts[i].memctl_interleaving = 0;
1362 		printf("Not all DIMMs are identical. "
1363 			"Memory controller interleaving disabled.\n");
1364 	} else {
1365 		switch (check_intlv) {
1366 		case FSL_DDR_256B_INTERLEAVING:
1367 		case FSL_DDR_CACHE_LINE_INTERLEAVING:
1368 		case FSL_DDR_PAGE_INTERLEAVING:
1369 		case FSL_DDR_BANK_INTERLEAVING:
1370 		case FSL_DDR_SUPERBANK_INTERLEAVING:
1371 #if (3 == CONFIG_SYS_NUM_DDR_CTLRS)
1372 				k = 2;
1373 #else
1374 				k = CONFIG_SYS_NUM_DDR_CTLRS;
1375 #endif
1376 			break;
1377 		case FSL_DDR_3WAY_1KB_INTERLEAVING:
1378 		case FSL_DDR_3WAY_4KB_INTERLEAVING:
1379 		case FSL_DDR_3WAY_8KB_INTERLEAVING:
1380 		case FSL_DDR_4WAY_1KB_INTERLEAVING:
1381 		case FSL_DDR_4WAY_4KB_INTERLEAVING:
1382 		case FSL_DDR_4WAY_8KB_INTERLEAVING:
1383 		default:
1384 			k = CONFIG_SYS_NUM_DDR_CTLRS;
1385 			break;
1386 		}
1387 		debug("%d of %d controllers are interleaving.\n", j, k);
1388 		if (j && (j != k)) {
1389 			for (i = first_ctrl; i <= last_ctrl; i++)
1390 				pinfo->memctl_opts[i].memctl_interleaving = 0;
1391 			if ((last_ctrl - first_ctrl) > 1)
1392 				puts("Not all controllers have compatible interleaving mode. All disabled.\n");
1393 		}
1394 	}
1395 	debug("Checking interleaving options completed\n");
1396 }
1397 
1398 int fsl_use_spd(void)
1399 {
1400 	int use_spd = 0;
1401 
1402 #ifdef CONFIG_DDR_SPD
1403 	char buf[HWCONFIG_BUFFER_SIZE];
1404 
1405 	/*
1406 	 * Extract hwconfig from environment since we have not properly setup
1407 	 * the environment but need it for ddr config params
1408 	 */
1409 	if (env_get_f("hwconfig", buf, sizeof(buf)) < 0)
1410 		buf[0] = '\0';
1411 
1412 	/* if hwconfig is not enabled, or "sdram" is not defined, use spd */
1413 	if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
1414 		if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
1415 			use_spd = 1;
1416 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
1417 					       "fixed", buf))
1418 			use_spd = 0;
1419 		else
1420 			use_spd = 1;
1421 	} else
1422 		use_spd = 1;
1423 #endif
1424 
1425 	return use_spd;
1426 }
1427