xref: /openbmc/u-boot/drivers/ddr/fsl/options.c (revision 99bec1aead5927c54f4364bfe10823a86fe0dad2)
1  /*
2   * Copyright 2008, 2010-2016 Freescale Semiconductor, Inc.
3   * Copyright 2017-2018 NXP Semiconductor
4   *
5   * SPDX-License-Identifier:	GPL-2.0+
6   */
7  
8  #include <common.h>
9  #include <hwconfig.h>
10  #include <fsl_ddr_sdram.h>
11  
12  #include <fsl_ddr.h>
13  #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
14  	defined(CONFIG_ARM)
15  #include <asm/arch/clock.h>
16  #endif
17  
18  /*
19   * Use our own stack based buffer before relocation to allow accessing longer
20   * hwconfig strings that might be in the environment before we've relocated.
21   * This is pretty fragile on both the use of stack and if the buffer is big
22   * enough. However we will get a warning from env_get_f() for the latter.
23   */
24  
25  /* Board-specific functions defined in each board's ddr.c */
26  extern void fsl_ddr_board_options(memctl_options_t *popts,
27  		dimm_params_t *pdimm,
28  		unsigned int ctrl_num);
29  
30  struct dynamic_odt {
31  	unsigned int odt_rd_cfg;
32  	unsigned int odt_wr_cfg;
33  	unsigned int odt_rtt_norm;
34  	unsigned int odt_rtt_wr;
35  };
36  
37  #ifdef CONFIG_SYS_FSL_DDR4
38  /* Quad rank is not verified yet due availability.
39   * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
40   */
41  static __maybe_unused const struct dynamic_odt single_Q[4] = {
42  	{	/* cs0 */
43  		FSL_DDR_ODT_NEVER,
44  		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
45  		DDR4_RTT_34_OHM,	/* unverified */
46  		DDR4_RTT_120_OHM
47  	},
48  	{	/* cs1 */
49  		FSL_DDR_ODT_NEVER,
50  		FSL_DDR_ODT_NEVER,
51  		DDR4_RTT_OFF,
52  		DDR4_RTT_120_OHM
53  	},
54  	{	/* cs2 */
55  		FSL_DDR_ODT_NEVER,
56  		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
57  		DDR4_RTT_34_OHM,
58  		DDR4_RTT_120_OHM
59  	},
60  	{	/* cs3 */
61  		FSL_DDR_ODT_NEVER,
62  		FSL_DDR_ODT_NEVER,	/* tied high */
63  		DDR4_RTT_OFF,
64  		DDR4_RTT_120_OHM
65  	}
66  };
67  
68  static __maybe_unused const struct dynamic_odt single_D[4] = {
69  	{	/* cs0 */
70  		FSL_DDR_ODT_NEVER,
71  		FSL_DDR_ODT_ALL,
72  		DDR4_RTT_40_OHM,
73  		DDR4_RTT_OFF
74  	},
75  	{	/* cs1 */
76  		FSL_DDR_ODT_NEVER,
77  		FSL_DDR_ODT_NEVER,
78  		DDR4_RTT_OFF,
79  		DDR4_RTT_OFF
80  	},
81  	{0, 0, 0, 0},
82  	{0, 0, 0, 0}
83  };
84  
85  static __maybe_unused const struct dynamic_odt single_S[4] = {
86  	{	/* cs0 */
87  		FSL_DDR_ODT_NEVER,
88  		FSL_DDR_ODT_ALL,
89  		DDR4_RTT_40_OHM,
90  		DDR4_RTT_OFF
91  	},
92  	{0, 0, 0, 0},
93  	{0, 0, 0, 0},
94  	{0, 0, 0, 0},
95  };
96  
97  static __maybe_unused const struct dynamic_odt dual_DD[4] = {
98  	{	/* cs0 */
99  		FSL_DDR_ODT_NEVER,
100  		FSL_DDR_ODT_SAME_DIMM,
101  		DDR4_RTT_120_OHM,
102  		DDR4_RTT_OFF
103  	},
104  	{	/* cs1 */
105  		FSL_DDR_ODT_OTHER_DIMM,
106  		FSL_DDR_ODT_OTHER_DIMM,
107  		DDR4_RTT_34_OHM,
108  		DDR4_RTT_OFF
109  	},
110  	{	/* cs2 */
111  		FSL_DDR_ODT_NEVER,
112  		FSL_DDR_ODT_SAME_DIMM,
113  		DDR4_RTT_120_OHM,
114  		DDR4_RTT_OFF
115  	},
116  	{	/* cs3 */
117  		FSL_DDR_ODT_OTHER_DIMM,
118  		FSL_DDR_ODT_OTHER_DIMM,
119  		DDR4_RTT_34_OHM,
120  		DDR4_RTT_OFF
121  	}
122  };
123  
124  static __maybe_unused const struct dynamic_odt dual_DS[4] = {
125  	{	/* cs0 */
126  		FSL_DDR_ODT_NEVER,
127  		FSL_DDR_ODT_SAME_DIMM,
128  		DDR4_RTT_120_OHM,
129  		DDR4_RTT_OFF
130  	},
131  	{	/* cs1 */
132  		FSL_DDR_ODT_OTHER_DIMM,
133  		FSL_DDR_ODT_OTHER_DIMM,
134  		DDR4_RTT_34_OHM,
135  		DDR4_RTT_OFF
136  	},
137  	{	/* cs2 */
138  		FSL_DDR_ODT_OTHER_DIMM,
139  		FSL_DDR_ODT_ALL,
140  		DDR4_RTT_34_OHM,
141  		DDR4_RTT_120_OHM
142  	},
143  	{0, 0, 0, 0}
144  };
145  static __maybe_unused const struct dynamic_odt dual_SD[4] = {
146  	{	/* cs0 */
147  		FSL_DDR_ODT_OTHER_DIMM,
148  		FSL_DDR_ODT_ALL,
149  		DDR4_RTT_34_OHM,
150  		DDR4_RTT_120_OHM
151  	},
152  	{0, 0, 0, 0},
153  	{	/* cs2 */
154  		FSL_DDR_ODT_NEVER,
155  		FSL_DDR_ODT_SAME_DIMM,
156  		DDR4_RTT_120_OHM,
157  		DDR4_RTT_OFF
158  	},
159  	{	/* cs3 */
160  		FSL_DDR_ODT_OTHER_DIMM,
161  		FSL_DDR_ODT_OTHER_DIMM,
162  		DDR4_RTT_34_OHM,
163  		DDR4_RTT_OFF
164  	}
165  };
166  
167  static __maybe_unused const struct dynamic_odt dual_SS[4] = {
168  	{	/* cs0 */
169  		FSL_DDR_ODT_OTHER_DIMM,
170  		FSL_DDR_ODT_ALL,
171  		DDR4_RTT_34_OHM,
172  		DDR4_RTT_120_OHM
173  	},
174  	{0, 0, 0, 0},
175  	{	/* cs2 */
176  		FSL_DDR_ODT_OTHER_DIMM,
177  		FSL_DDR_ODT_ALL,
178  		DDR4_RTT_34_OHM,
179  		DDR4_RTT_120_OHM
180  	},
181  	{0, 0, 0, 0}
182  };
183  
184  static __maybe_unused const struct dynamic_odt dual_D0[4] = {
185  	{	/* cs0 */
186  		FSL_DDR_ODT_NEVER,
187  		FSL_DDR_ODT_SAME_DIMM,
188  		DDR4_RTT_40_OHM,
189  		DDR4_RTT_OFF
190  	},
191  	{	/* cs1 */
192  		FSL_DDR_ODT_NEVER,
193  		FSL_DDR_ODT_NEVER,
194  		DDR4_RTT_OFF,
195  		DDR4_RTT_OFF
196  	},
197  	{0, 0, 0, 0},
198  	{0, 0, 0, 0}
199  };
200  
201  static __maybe_unused const struct dynamic_odt dual_0D[4] = {
202  	{0, 0, 0, 0},
203  	{0, 0, 0, 0},
204  	{	/* cs2 */
205  		FSL_DDR_ODT_NEVER,
206  		FSL_DDR_ODT_SAME_DIMM,
207  		DDR4_RTT_40_OHM,
208  		DDR4_RTT_OFF
209  	},
210  	{	/* cs3 */
211  		FSL_DDR_ODT_NEVER,
212  		FSL_DDR_ODT_NEVER,
213  		DDR4_RTT_OFF,
214  		DDR4_RTT_OFF
215  	}
216  };
217  
218  static __maybe_unused const struct dynamic_odt dual_S0[4] = {
219  	{	/* cs0 */
220  		FSL_DDR_ODT_NEVER,
221  		FSL_DDR_ODT_CS,
222  		DDR4_RTT_40_OHM,
223  		DDR4_RTT_OFF
224  	},
225  	{0, 0, 0, 0},
226  	{0, 0, 0, 0},
227  	{0, 0, 0, 0}
228  
229  };
230  
231  static __maybe_unused const struct dynamic_odt dual_0S[4] = {
232  	{0, 0, 0, 0},
233  	{0, 0, 0, 0},
234  	{	/* cs2 */
235  		FSL_DDR_ODT_NEVER,
236  		FSL_DDR_ODT_CS,
237  		DDR4_RTT_40_OHM,
238  		DDR4_RTT_OFF
239  	},
240  	{0, 0, 0, 0}
241  
242  };
243  
244  static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
245  	{	/* cs0 */
246  		FSL_DDR_ODT_NEVER,
247  		FSL_DDR_ODT_CS,
248  		DDR4_RTT_120_OHM,
249  		DDR4_RTT_OFF
250  	},
251  	{	/* cs1 */
252  		FSL_DDR_ODT_NEVER,
253  		FSL_DDR_ODT_CS,
254  		DDR4_RTT_120_OHM,
255  		DDR4_RTT_OFF
256  	},
257  	{	/* cs2 */
258  		FSL_DDR_ODT_NEVER,
259  		FSL_DDR_ODT_CS,
260  		DDR4_RTT_120_OHM,
261  		DDR4_RTT_OFF
262  	},
263  	{	/* cs3 */
264  		FSL_DDR_ODT_NEVER,
265  		FSL_DDR_ODT_CS,
266  		DDR4_RTT_120_OHM,
267  		DDR4_RTT_OFF
268  	}
269  };
270  #elif defined(CONFIG_SYS_FSL_DDR3)
271  static __maybe_unused const struct dynamic_odt single_Q[4] = {
272  	{	/* cs0 */
273  		FSL_DDR_ODT_NEVER,
274  		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
275  		DDR3_RTT_20_OHM,
276  		DDR3_RTT_120_OHM
277  	},
278  	{	/* cs1 */
279  		FSL_DDR_ODT_NEVER,
280  		FSL_DDR_ODT_NEVER,	/* tied high */
281  		DDR3_RTT_OFF,
282  		DDR3_RTT_120_OHM
283  	},
284  	{	/* cs2 */
285  		FSL_DDR_ODT_NEVER,
286  		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
287  		DDR3_RTT_20_OHM,
288  		DDR3_RTT_120_OHM
289  	},
290  	{	/* cs3 */
291  		FSL_DDR_ODT_NEVER,
292  		FSL_DDR_ODT_NEVER,	/* tied high */
293  		DDR3_RTT_OFF,
294  		DDR3_RTT_120_OHM
295  	}
296  };
297  
298  static __maybe_unused const struct dynamic_odt single_D[4] = {
299  	{	/* cs0 */
300  		FSL_DDR_ODT_NEVER,
301  		FSL_DDR_ODT_ALL,
302  		DDR3_RTT_40_OHM,
303  		DDR3_RTT_OFF
304  	},
305  	{	/* cs1 */
306  		FSL_DDR_ODT_NEVER,
307  		FSL_DDR_ODT_NEVER,
308  		DDR3_RTT_OFF,
309  		DDR3_RTT_OFF
310  	},
311  	{0, 0, 0, 0},
312  	{0, 0, 0, 0}
313  };
314  
315  static __maybe_unused const struct dynamic_odt single_S[4] = {
316  	{	/* cs0 */
317  		FSL_DDR_ODT_NEVER,
318  		FSL_DDR_ODT_ALL,
319  		DDR3_RTT_40_OHM,
320  		DDR3_RTT_OFF
321  	},
322  	{0, 0, 0, 0},
323  	{0, 0, 0, 0},
324  	{0, 0, 0, 0},
325  };
326  
327  static __maybe_unused const struct dynamic_odt dual_DD[4] = {
328  	{	/* cs0 */
329  		FSL_DDR_ODT_NEVER,
330  		FSL_DDR_ODT_SAME_DIMM,
331  		DDR3_RTT_120_OHM,
332  		DDR3_RTT_OFF
333  	},
334  	{	/* cs1 */
335  		FSL_DDR_ODT_OTHER_DIMM,
336  		FSL_DDR_ODT_OTHER_DIMM,
337  		DDR3_RTT_30_OHM,
338  		DDR3_RTT_OFF
339  	},
340  	{	/* cs2 */
341  		FSL_DDR_ODT_NEVER,
342  		FSL_DDR_ODT_SAME_DIMM,
343  		DDR3_RTT_120_OHM,
344  		DDR3_RTT_OFF
345  	},
346  	{	/* cs3 */
347  		FSL_DDR_ODT_OTHER_DIMM,
348  		FSL_DDR_ODT_OTHER_DIMM,
349  		DDR3_RTT_30_OHM,
350  		DDR3_RTT_OFF
351  	}
352  };
353  
354  static __maybe_unused const struct dynamic_odt dual_DS[4] = {
355  	{	/* cs0 */
356  		FSL_DDR_ODT_NEVER,
357  		FSL_DDR_ODT_SAME_DIMM,
358  		DDR3_RTT_120_OHM,
359  		DDR3_RTT_OFF
360  	},
361  	{	/* cs1 */
362  		FSL_DDR_ODT_OTHER_DIMM,
363  		FSL_DDR_ODT_OTHER_DIMM,
364  		DDR3_RTT_30_OHM,
365  		DDR3_RTT_OFF
366  	},
367  	{	/* cs2 */
368  		FSL_DDR_ODT_OTHER_DIMM,
369  		FSL_DDR_ODT_ALL,
370  		DDR3_RTT_20_OHM,
371  		DDR3_RTT_120_OHM
372  	},
373  	{0, 0, 0, 0}
374  };
375  static __maybe_unused const struct dynamic_odt dual_SD[4] = {
376  	{	/* cs0 */
377  		FSL_DDR_ODT_OTHER_DIMM,
378  		FSL_DDR_ODT_ALL,
379  		DDR3_RTT_20_OHM,
380  		DDR3_RTT_120_OHM
381  	},
382  	{0, 0, 0, 0},
383  	{	/* cs2 */
384  		FSL_DDR_ODT_NEVER,
385  		FSL_DDR_ODT_SAME_DIMM,
386  		DDR3_RTT_120_OHM,
387  		DDR3_RTT_OFF
388  	},
389  	{	/* cs3 */
390  		FSL_DDR_ODT_OTHER_DIMM,
391  		FSL_DDR_ODT_OTHER_DIMM,
392  		DDR3_RTT_20_OHM,
393  		DDR3_RTT_OFF
394  	}
395  };
396  
397  static __maybe_unused const struct dynamic_odt dual_SS[4] = {
398  	{	/* cs0 */
399  		FSL_DDR_ODT_OTHER_DIMM,
400  		FSL_DDR_ODT_ALL,
401  		DDR3_RTT_30_OHM,
402  		DDR3_RTT_120_OHM
403  	},
404  	{0, 0, 0, 0},
405  	{	/* cs2 */
406  		FSL_DDR_ODT_OTHER_DIMM,
407  		FSL_DDR_ODT_ALL,
408  		DDR3_RTT_30_OHM,
409  		DDR3_RTT_120_OHM
410  	},
411  	{0, 0, 0, 0}
412  };
413  
414  static __maybe_unused const struct dynamic_odt dual_D0[4] = {
415  	{	/* cs0 */
416  		FSL_DDR_ODT_NEVER,
417  		FSL_DDR_ODT_SAME_DIMM,
418  		DDR3_RTT_40_OHM,
419  		DDR3_RTT_OFF
420  	},
421  	{	/* cs1 */
422  		FSL_DDR_ODT_NEVER,
423  		FSL_DDR_ODT_NEVER,
424  		DDR3_RTT_OFF,
425  		DDR3_RTT_OFF
426  	},
427  	{0, 0, 0, 0},
428  	{0, 0, 0, 0}
429  };
430  
431  static __maybe_unused const struct dynamic_odt dual_0D[4] = {
432  	{0, 0, 0, 0},
433  	{0, 0, 0, 0},
434  	{	/* cs2 */
435  		FSL_DDR_ODT_NEVER,
436  		FSL_DDR_ODT_SAME_DIMM,
437  		DDR3_RTT_40_OHM,
438  		DDR3_RTT_OFF
439  	},
440  	{	/* cs3 */
441  		FSL_DDR_ODT_NEVER,
442  		FSL_DDR_ODT_NEVER,
443  		DDR3_RTT_OFF,
444  		DDR3_RTT_OFF
445  	}
446  };
447  
448  static __maybe_unused const struct dynamic_odt dual_S0[4] = {
449  	{	/* cs0 */
450  		FSL_DDR_ODT_NEVER,
451  		FSL_DDR_ODT_CS,
452  		DDR3_RTT_40_OHM,
453  		DDR3_RTT_OFF
454  	},
455  	{0, 0, 0, 0},
456  	{0, 0, 0, 0},
457  	{0, 0, 0, 0}
458  
459  };
460  
461  static __maybe_unused const struct dynamic_odt dual_0S[4] = {
462  	{0, 0, 0, 0},
463  	{0, 0, 0, 0},
464  	{	/* cs2 */
465  		FSL_DDR_ODT_NEVER,
466  		FSL_DDR_ODT_CS,
467  		DDR3_RTT_40_OHM,
468  		DDR3_RTT_OFF
469  	},
470  	{0, 0, 0, 0}
471  
472  };
473  
474  static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
475  	{	/* cs0 */
476  		FSL_DDR_ODT_NEVER,
477  		FSL_DDR_ODT_CS,
478  		DDR3_RTT_120_OHM,
479  		DDR3_RTT_OFF
480  	},
481  	{	/* cs1 */
482  		FSL_DDR_ODT_NEVER,
483  		FSL_DDR_ODT_CS,
484  		DDR3_RTT_120_OHM,
485  		DDR3_RTT_OFF
486  	},
487  	{	/* cs2 */
488  		FSL_DDR_ODT_NEVER,
489  		FSL_DDR_ODT_CS,
490  		DDR3_RTT_120_OHM,
491  		DDR3_RTT_OFF
492  	},
493  	{	/* cs3 */
494  		FSL_DDR_ODT_NEVER,
495  		FSL_DDR_ODT_CS,
496  		DDR3_RTT_120_OHM,
497  		DDR3_RTT_OFF
498  	}
499  };
500  #else	/* CONFIG_SYS_FSL_DDR3 */
501  static __maybe_unused const struct dynamic_odt single_Q[4] = {
502  	{0, 0, 0, 0},
503  	{0, 0, 0, 0},
504  	{0, 0, 0, 0},
505  	{0, 0, 0, 0}
506  };
507  
508  static __maybe_unused const struct dynamic_odt single_D[4] = {
509  	{	/* cs0 */
510  		FSL_DDR_ODT_NEVER,
511  		FSL_DDR_ODT_ALL,
512  		DDR2_RTT_150_OHM,
513  		DDR2_RTT_OFF
514  	},
515  	{	/* cs1 */
516  		FSL_DDR_ODT_NEVER,
517  		FSL_DDR_ODT_NEVER,
518  		DDR2_RTT_OFF,
519  		DDR2_RTT_OFF
520  	},
521  	{0, 0, 0, 0},
522  	{0, 0, 0, 0}
523  };
524  
525  static __maybe_unused const struct dynamic_odt single_S[4] = {
526  	{	/* cs0 */
527  		FSL_DDR_ODT_NEVER,
528  		FSL_DDR_ODT_ALL,
529  		DDR2_RTT_150_OHM,
530  		DDR2_RTT_OFF
531  	},
532  	{0, 0, 0, 0},
533  	{0, 0, 0, 0},
534  	{0, 0, 0, 0},
535  };
536  
537  static __maybe_unused const struct dynamic_odt dual_DD[4] = {
538  	{	/* cs0 */
539  		FSL_DDR_ODT_OTHER_DIMM,
540  		FSL_DDR_ODT_OTHER_DIMM,
541  		DDR2_RTT_75_OHM,
542  		DDR2_RTT_OFF
543  	},
544  	{	/* cs1 */
545  		FSL_DDR_ODT_NEVER,
546  		FSL_DDR_ODT_NEVER,
547  		DDR2_RTT_OFF,
548  		DDR2_RTT_OFF
549  	},
550  	{	/* cs2 */
551  		FSL_DDR_ODT_OTHER_DIMM,
552  		FSL_DDR_ODT_OTHER_DIMM,
553  		DDR2_RTT_75_OHM,
554  		DDR2_RTT_OFF
555  	},
556  	{	/* cs3 */
557  		FSL_DDR_ODT_NEVER,
558  		FSL_DDR_ODT_NEVER,
559  		DDR2_RTT_OFF,
560  		DDR2_RTT_OFF
561  	}
562  };
563  
564  static __maybe_unused const struct dynamic_odt dual_DS[4] = {
565  	{	/* cs0 */
566  		FSL_DDR_ODT_OTHER_DIMM,
567  		FSL_DDR_ODT_OTHER_DIMM,
568  		DDR2_RTT_75_OHM,
569  		DDR2_RTT_OFF
570  	},
571  	{	/* cs1 */
572  		FSL_DDR_ODT_NEVER,
573  		FSL_DDR_ODT_NEVER,
574  		DDR2_RTT_OFF,
575  		DDR2_RTT_OFF
576  	},
577  	{	/* cs2 */
578  		FSL_DDR_ODT_OTHER_DIMM,
579  		FSL_DDR_ODT_OTHER_DIMM,
580  		DDR2_RTT_75_OHM,
581  		DDR2_RTT_OFF
582  	},
583  	{0, 0, 0, 0}
584  };
585  
586  static __maybe_unused const struct dynamic_odt dual_SD[4] = {
587  	{	/* cs0 */
588  		FSL_DDR_ODT_OTHER_DIMM,
589  		FSL_DDR_ODT_OTHER_DIMM,
590  		DDR2_RTT_75_OHM,
591  		DDR2_RTT_OFF
592  	},
593  	{0, 0, 0, 0},
594  	{	/* cs2 */
595  		FSL_DDR_ODT_OTHER_DIMM,
596  		FSL_DDR_ODT_OTHER_DIMM,
597  		DDR2_RTT_75_OHM,
598  		DDR2_RTT_OFF
599  	},
600  	{	/* cs3 */
601  		FSL_DDR_ODT_NEVER,
602  		FSL_DDR_ODT_NEVER,
603  		DDR2_RTT_OFF,
604  		DDR2_RTT_OFF
605  	}
606  };
607  
608  static __maybe_unused const struct dynamic_odt dual_SS[4] = {
609  	{	/* cs0 */
610  		FSL_DDR_ODT_OTHER_DIMM,
611  		FSL_DDR_ODT_OTHER_DIMM,
612  		DDR2_RTT_75_OHM,
613  		DDR2_RTT_OFF
614  	},
615  	{0, 0, 0, 0},
616  	{	/* cs2 */
617  		FSL_DDR_ODT_OTHER_DIMM,
618  		FSL_DDR_ODT_OTHER_DIMM,
619  		DDR2_RTT_75_OHM,
620  		DDR2_RTT_OFF
621  	},
622  	{0, 0, 0, 0}
623  };
624  
625  static __maybe_unused const struct dynamic_odt dual_D0[4] = {
626  	{	/* cs0 */
627  		FSL_DDR_ODT_NEVER,
628  		FSL_DDR_ODT_ALL,
629  		DDR2_RTT_150_OHM,
630  		DDR2_RTT_OFF
631  	},
632  	{	/* cs1 */
633  		FSL_DDR_ODT_NEVER,
634  		FSL_DDR_ODT_NEVER,
635  		DDR2_RTT_OFF,
636  		DDR2_RTT_OFF
637  	},
638  	{0, 0, 0, 0},
639  	{0, 0, 0, 0}
640  };
641  
642  static __maybe_unused const struct dynamic_odt dual_0D[4] = {
643  	{0, 0, 0, 0},
644  	{0, 0, 0, 0},
645  	{	/* cs2 */
646  		FSL_DDR_ODT_NEVER,
647  		FSL_DDR_ODT_ALL,
648  		DDR2_RTT_150_OHM,
649  		DDR2_RTT_OFF
650  	},
651  	{	/* cs3 */
652  		FSL_DDR_ODT_NEVER,
653  		FSL_DDR_ODT_NEVER,
654  		DDR2_RTT_OFF,
655  		DDR2_RTT_OFF
656  	}
657  };
658  
659  static __maybe_unused const struct dynamic_odt dual_S0[4] = {
660  	{	/* cs0 */
661  		FSL_DDR_ODT_NEVER,
662  		FSL_DDR_ODT_CS,
663  		DDR2_RTT_150_OHM,
664  		DDR2_RTT_OFF
665  	},
666  	{0, 0, 0, 0},
667  	{0, 0, 0, 0},
668  	{0, 0, 0, 0}
669  
670  };
671  
672  static __maybe_unused const struct dynamic_odt dual_0S[4] = {
673  	{0, 0, 0, 0},
674  	{0, 0, 0, 0},
675  	{	/* cs2 */
676  		FSL_DDR_ODT_NEVER,
677  		FSL_DDR_ODT_CS,
678  		DDR2_RTT_150_OHM,
679  		DDR2_RTT_OFF
680  	},
681  	{0, 0, 0, 0}
682  
683  };
684  
685  static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
686  	{	/* cs0 */
687  		FSL_DDR_ODT_NEVER,
688  		FSL_DDR_ODT_CS,
689  		DDR2_RTT_75_OHM,
690  		DDR2_RTT_OFF
691  	},
692  	{	/* cs1 */
693  		FSL_DDR_ODT_NEVER,
694  		FSL_DDR_ODT_NEVER,
695  		DDR2_RTT_OFF,
696  		DDR2_RTT_OFF
697  	},
698  	{	/* cs2 */
699  		FSL_DDR_ODT_NEVER,
700  		FSL_DDR_ODT_CS,
701  		DDR2_RTT_75_OHM,
702  		DDR2_RTT_OFF
703  	},
704  	{	/* cs3 */
705  		FSL_DDR_ODT_NEVER,
706  		FSL_DDR_ODT_NEVER,
707  		DDR2_RTT_OFF,
708  		DDR2_RTT_OFF
709  	}
710  };
711  #endif
712  
713  /*
714   * Automatically seleect bank interleaving mode based on DIMMs
715   * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
716   * This function only deal with one or two slots per controller.
717   */
718  static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
719  {
720  #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
721  	if (pdimm[0].n_ranks == 4)
722  		return FSL_DDR_CS0_CS1_CS2_CS3;
723  	else if (pdimm[0].n_ranks == 2)
724  		return FSL_DDR_CS0_CS1;
725  #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
726  #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
727  	if (pdimm[0].n_ranks == 4)
728  		return FSL_DDR_CS0_CS1_CS2_CS3;
729  #endif
730  	if (pdimm[0].n_ranks == 2) {
731  		if (pdimm[1].n_ranks == 2)
732  			return FSL_DDR_CS0_CS1_CS2_CS3;
733  		else
734  			return FSL_DDR_CS0_CS1;
735  	}
736  #endif
737  	return 0;
738  }
739  
740  unsigned int populate_memctl_options(const common_timing_params_t *common_dimm,
741  			memctl_options_t *popts,
742  			dimm_params_t *pdimm,
743  			unsigned int ctrl_num)
744  {
745  	unsigned int i;
746  	char buffer[HWCONFIG_BUFFER_SIZE];
747  	char *buf = NULL;
748  #if defined(CONFIG_SYS_FSL_DDR3) || \
749  	defined(CONFIG_SYS_FSL_DDR2) || \
750  	defined(CONFIG_SYS_FSL_DDR4)
751  	const struct dynamic_odt *pdodt = odt_unknown;
752  #endif
753  #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
754  	ulong ddr_freq;
755  #endif
756  
757  	/*
758  	 * Extract hwconfig from environment since we have not properly setup
759  	 * the environment but need it for ddr config params
760  	 */
761  	if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
762  		buf = buffer;
763  
764  #if defined(CONFIG_SYS_FSL_DDR3) || \
765  	defined(CONFIG_SYS_FSL_DDR2) || \
766  	defined(CONFIG_SYS_FSL_DDR4)
767  	/* Chip select options. */
768  #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
769  	switch (pdimm[0].n_ranks) {
770  	case 1:
771  		pdodt = single_S;
772  		break;
773  	case 2:
774  		pdodt = single_D;
775  		break;
776  	case 4:
777  		pdodt = single_Q;
778  		break;
779  	}
780  #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
781  	switch (pdimm[0].n_ranks) {
782  #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
783  	case 4:
784  		pdodt = single_Q;
785  		if (pdimm[1].n_ranks)
786  			printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
787  		break;
788  #endif
789  	case 2:
790  		switch (pdimm[1].n_ranks) {
791  		case 2:
792  			pdodt = dual_DD;
793  			break;
794  		case 1:
795  			pdodt = dual_DS;
796  			break;
797  		case 0:
798  			pdodt = dual_D0;
799  			break;
800  		}
801  		break;
802  	case 1:
803  		switch (pdimm[1].n_ranks) {
804  		case 2:
805  			pdodt = dual_SD;
806  			break;
807  		case 1:
808  			pdodt = dual_SS;
809  			break;
810  		case 0:
811  			pdodt = dual_S0;
812  			break;
813  		}
814  		break;
815  	case 0:
816  		switch (pdimm[1].n_ranks) {
817  		case 2:
818  			pdodt = dual_0D;
819  			break;
820  		case 1:
821  			pdodt = dual_0S;
822  			break;
823  		}
824  		break;
825  	}
826  #endif	/* CONFIG_DIMM_SLOTS_PER_CTLR */
827  #endif	/* CONFIG_SYS_FSL_DDR2, 3, 4 */
828  
829  	/* Pick chip-select local options. */
830  	for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
831  #if defined(CONFIG_SYS_FSL_DDR3) || \
832  	defined(CONFIG_SYS_FSL_DDR2) || \
833  	defined(CONFIG_SYS_FSL_DDR4)
834  		popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
835  		popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
836  		popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
837  		popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
838  #else
839  		popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
840  		popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
841  #endif
842  		popts->cs_local_opts[i].auto_precharge = 0;
843  	}
844  
845  	/* Pick interleaving mode. */
846  
847  	/*
848  	 * 0 = no interleaving
849  	 * 1 = interleaving between 2 controllers
850  	 */
851  	popts->memctl_interleaving = 0;
852  
853  	/*
854  	 * 0 = cacheline
855  	 * 1 = page
856  	 * 2 = (logical) bank
857  	 * 3 = superbank (only if CS interleaving is enabled)
858  	 */
859  	popts->memctl_interleaving_mode = 0;
860  
861  	/*
862  	 * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
863  	 * 1: page:      bit to the left of the column bits selects the memctl
864  	 * 2: bank:      bit to the left of the bank bits selects the memctl
865  	 * 3: superbank: bit to the left of the chip select selects the memctl
866  	 *
867  	 * NOTE: ba_intlv (rank interleaving) is independent of memory
868  	 * controller interleaving; it is only within a memory controller.
869  	 * Must use superbank interleaving if rank interleaving is used and
870  	 * memory controller interleaving is enabled.
871  	 */
872  
873  	/*
874  	 * 0 = no
875  	 * 0x40 = CS0,CS1
876  	 * 0x20 = CS2,CS3
877  	 * 0x60 = CS0,CS1 + CS2,CS3
878  	 * 0x04 = CS0,CS1,CS2,CS3
879  	 */
880  	popts->ba_intlv_ctl = 0;
881  
882  	/* Memory Organization Parameters */
883  	popts->registered_dimm_en = common_dimm->all_dimms_registered;
884  
885  	/* Operational Mode Paramters */
886  
887  	/* Pick ECC modes */
888  	popts->ecc_mode = 0;		  /* 0 = disabled, 1 = enabled */
889  #ifdef CONFIG_DDR_ECC
890  	if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
891  		if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
892  			popts->ecc_mode = 1;
893  	} else
894  		popts->ecc_mode = 1;
895  #endif
896  	/* 1 = use memory controler to init data */
897  	popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0;
898  
899  	/*
900  	 * Choose DQS config
901  	 * 0 for DDR1
902  	 * 1 for DDR2
903  	 */
904  #if defined(CONFIG_SYS_FSL_DDR1)
905  	popts->dqs_config = 0;
906  #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
907  	popts->dqs_config = 1;
908  #endif
909  
910  	/* Choose self-refresh during sleep. */
911  	popts->self_refresh_in_sleep = 1;
912  
913  	/* Choose dynamic power management mode. */
914  	popts->dynamic_power = 0;
915  
916  	/*
917  	 * check first dimm for primary sdram width
918  	 * presuming all dimms are similar
919  	 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
920  	 */
921  #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
922  	if (pdimm[0].n_ranks != 0) {
923  		if ((pdimm[0].data_width >= 64) && \
924  			(pdimm[0].data_width <= 72))
925  			popts->data_bus_width = 0;
926  		else if ((pdimm[0].data_width >= 32) && \
927  			(pdimm[0].data_width <= 40))
928  			popts->data_bus_width = 1;
929  		else {
930  			panic("Error: data width %u is invalid!\n",
931  				pdimm[0].data_width);
932  		}
933  	}
934  #else
935  	if (pdimm[0].n_ranks != 0) {
936  		if (pdimm[0].primary_sdram_width == 64)
937  			popts->data_bus_width = 0;
938  		else if (pdimm[0].primary_sdram_width == 32)
939  			popts->data_bus_width = 1;
940  		else if (pdimm[0].primary_sdram_width == 16)
941  			popts->data_bus_width = 2;
942  		else {
943  			panic("Error: primary sdram width %u is invalid!\n",
944  				pdimm[0].primary_sdram_width);
945  		}
946  	}
947  #endif
948  
949  	popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
950  
951  	/* Choose burst length. */
952  #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
953  #if defined(CONFIG_E500MC)
954  	popts->otf_burst_chop_en = 0;	/* on-the-fly burst chop disable */
955  	popts->burst_length = DDR_BL8;	/* Fixed 8-beat burst len */
956  #else
957  	if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
958  		/* 32-bit or 16-bit bus */
959  		popts->otf_burst_chop_en = 0;
960  		popts->burst_length = DDR_BL8;
961  	} else {
962  		popts->otf_burst_chop_en = 1;	/* on-the-fly burst chop */
963  		popts->burst_length = DDR_OTF;	/* on-the-fly BC4 and BL8 */
964  	}
965  #endif
966  #else
967  	popts->burst_length = DDR_BL4;	/* has to be 4 for DDR2 */
968  #endif
969  
970  	/* Choose ddr controller address mirror mode */
971  #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
972  	for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) {
973  		if (pdimm[i].n_ranks) {
974  			popts->mirrored_dimm = pdimm[i].mirrored_dimm;
975  			break;
976  		}
977  	}
978  #endif
979  
980  	/* Global Timing Parameters. */
981  	debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num));
982  
983  	/* Pick a caslat override. */
984  	popts->cas_latency_override = 0;
985  	popts->cas_latency_override_value = 3;
986  	if (popts->cas_latency_override) {
987  		debug("using caslat override value = %u\n",
988  		       popts->cas_latency_override_value);
989  	}
990  
991  	/* Decide whether to use the computed derated latency */
992  	popts->use_derated_caslat = 0;
993  
994  	/* Choose an additive latency. */
995  	popts->additive_latency_override = 0;
996  	popts->additive_latency_override_value = 3;
997  	if (popts->additive_latency_override) {
998  		debug("using additive latency override value = %u\n",
999  		       popts->additive_latency_override_value);
1000  	}
1001  
1002  	/*
1003  	 * 2T_EN setting
1004  	 *
1005  	 * Factors to consider for 2T_EN:
1006  	 *	- number of DIMMs installed
1007  	 *	- number of components, number of active ranks
1008  	 *	- how much time you want to spend playing around
1009  	 */
1010  	popts->twot_en = 0;
1011  	popts->threet_en = 0;
1012  
1013  	/* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
1014  	if (popts->registered_dimm_en)
1015  		popts->ap_en = 1; /* 0 = disable,  1 = enable */
1016  	else
1017  		popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
1018  
1019  	if (hwconfig_sub_f("fsl_ddr", "parity", buf)) {
1020  		if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) {
1021  			if (popts->registered_dimm_en ||
1022  			    (CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4))
1023  				popts->ap_en = 1;
1024  		}
1025  	}
1026  
1027  	/*
1028  	 * BSTTOPRE precharge interval
1029  	 *
1030  	 * Set this to 0 for global auto precharge
1031  	 * The value of 0x100 has been used for DDR1, DDR2, DDR3.
1032  	 * It is not wrong. Any value should be OK. The performance depends on
1033  	 * applications. There is no one good value for all. One way to set
1034  	 * is to use 1/4 of refint value.
1035  	 */
1036  	popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps)
1037  			 >> 2;
1038  
1039  	/*
1040  	 * Window for four activates -- tFAW
1041  	 *
1042  	 * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
1043  	 * FIXME: varies depending upon number of column addresses or data
1044  	 * FIXME: width, was considering looking at pdimm->primary_sdram_width
1045  	 */
1046  #if defined(CONFIG_SYS_FSL_DDR1)
1047  	popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1);
1048  
1049  #elif defined(CONFIG_SYS_FSL_DDR2)
1050  	/*
1051  	 * x4/x8;  some datasheets have 35000
1052  	 * x16 wide columns only?  Use 50000?
1053  	 */
1054  	popts->tfaw_window_four_activates_ps = 37500;
1055  
1056  #else
1057  	popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
1058  #endif
1059  	popts->zq_en = 0;
1060  	popts->wrlvl_en = 0;
1061  #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
1062  	/*
1063  	 * due to ddr3 dimm is fly-by topology
1064  	 * we suggest to enable write leveling to
1065  	 * meet the tQDSS under different loading.
1066  	 */
1067  	popts->wrlvl_en = 1;
1068  	popts->zq_en = 1;
1069  	popts->wrlvl_override = 0;
1070  #endif
1071  
1072  	/*
1073  	 * Check interleaving configuration from environment.
1074  	 * Please refer to doc/README.fsl-ddr for the detail.
1075  	 *
1076  	 * If memory controller interleaving is enabled, then the data
1077  	 * bus widths must be programmed identically for all memory controllers.
1078  	 *
1079  	 * Attempt to set all controllers to the same chip select
1080  	 * interleaving mode. It will do a best effort to get the
1081  	 * requested ranks interleaved together such that the result
1082  	 * should be a subset of the requested configuration.
1083  	 *
1084  	 * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
1085  	 * with 256 Byte is enabled.
1086  	 */
1087  #if (CONFIG_SYS_NUM_DDR_CTLRS > 1)
1088  	if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
1089  #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1090  		;
1091  #else
1092  		goto done;
1093  #endif
1094  	if (pdimm[0].n_ranks == 0) {
1095  		printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
1096  		popts->memctl_interleaving = 0;
1097  		goto done;
1098  	}
1099  	popts->memctl_interleaving = 1;
1100  #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1101  	popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
1102  	popts->memctl_interleaving = 1;
1103  	debug("256 Byte interleaving\n");
1104  #else
1105  	/*
1106  	 * test null first. if CONFIG_HWCONFIG is not defined
1107  	 * hwconfig_arg_cmp returns non-zero
1108  	 */
1109  	if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
1110  				    "null", buf)) {
1111  		popts->memctl_interleaving = 0;
1112  		debug("memory controller interleaving disabled.\n");
1113  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1114  					"ctlr_intlv",
1115  					"cacheline", buf)) {
1116  		popts->memctl_interleaving_mode =
1117  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1118  			0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
1119  		popts->memctl_interleaving =
1120  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1121  			0 : 1;
1122  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1123  					"ctlr_intlv",
1124  					"page", buf)) {
1125  		popts->memctl_interleaving_mode =
1126  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1127  			0 : FSL_DDR_PAGE_INTERLEAVING;
1128  		popts->memctl_interleaving =
1129  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1130  			0 : 1;
1131  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1132  					"ctlr_intlv",
1133  					"bank", buf)) {
1134  		popts->memctl_interleaving_mode =
1135  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1136  			0 : FSL_DDR_BANK_INTERLEAVING;
1137  		popts->memctl_interleaving =
1138  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1139  			0 : 1;
1140  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1141  					"ctlr_intlv",
1142  					"superbank", buf)) {
1143  		popts->memctl_interleaving_mode =
1144  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1145  			0 : FSL_DDR_SUPERBANK_INTERLEAVING;
1146  		popts->memctl_interleaving =
1147  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1148  			0 : 1;
1149  #if (CONFIG_SYS_NUM_DDR_CTLRS == 3)
1150  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1151  					"ctlr_intlv",
1152  					"3way_1KB", buf)) {
1153  		popts->memctl_interleaving_mode =
1154  			FSL_DDR_3WAY_1KB_INTERLEAVING;
1155  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1156  					"ctlr_intlv",
1157  					"3way_4KB", buf)) {
1158  		popts->memctl_interleaving_mode =
1159  			FSL_DDR_3WAY_4KB_INTERLEAVING;
1160  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1161  					"ctlr_intlv",
1162  					"3way_8KB", buf)) {
1163  		popts->memctl_interleaving_mode =
1164  			FSL_DDR_3WAY_8KB_INTERLEAVING;
1165  #elif (CONFIG_SYS_NUM_DDR_CTLRS == 4)
1166  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1167  					"ctlr_intlv",
1168  					"4way_1KB", buf)) {
1169  		popts->memctl_interleaving_mode =
1170  			FSL_DDR_4WAY_1KB_INTERLEAVING;
1171  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1172  					"ctlr_intlv",
1173  					"4way_4KB", buf)) {
1174  		popts->memctl_interleaving_mode =
1175  			FSL_DDR_4WAY_4KB_INTERLEAVING;
1176  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1177  					"ctlr_intlv",
1178  					"4way_8KB", buf)) {
1179  		popts->memctl_interleaving_mode =
1180  			FSL_DDR_4WAY_8KB_INTERLEAVING;
1181  #endif
1182  	} else {
1183  		popts->memctl_interleaving = 0;
1184  		printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
1185  	}
1186  #endif	/* CONFIG_SYS_FSL_DDR_INTLV_256B */
1187  done:
1188  #endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */
1189  	if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
1190  		(CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
1191  		/* test null first. if CONFIG_HWCONFIG is not defined,
1192  		 * hwconfig_subarg_cmp_f returns non-zero */
1193  		if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1194  					    "null", buf))
1195  			debug("bank interleaving disabled.\n");
1196  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1197  						 "cs0_cs1", buf))
1198  			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
1199  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1200  						 "cs2_cs3", buf))
1201  			popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
1202  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1203  						 "cs0_cs1_and_cs2_cs3", buf))
1204  			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
1205  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1206  						 "cs0_cs1_cs2_cs3", buf))
1207  			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
1208  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1209  						"auto", buf))
1210  			popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
1211  		else
1212  			printf("hwconfig has unrecognized parameter for bank_intlv.\n");
1213  		switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
1214  		case FSL_DDR_CS0_CS1_CS2_CS3:
1215  #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1216  			if (pdimm[0].n_ranks < 4) {
1217  				popts->ba_intlv_ctl = 0;
1218  				printf("Not enough bank(chip-select) for "
1219  					"CS0+CS1+CS2+CS3 on controller %d, "
1220  					"interleaving disabled!\n", ctrl_num);
1221  			}
1222  #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1223  #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
1224  			if (pdimm[0].n_ranks == 4)
1225  				break;
1226  #endif
1227  			if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
1228  				popts->ba_intlv_ctl = 0;
1229  				printf("Not enough bank(chip-select) for "
1230  					"CS0+CS1+CS2+CS3 on controller %d, "
1231  					"interleaving disabled!\n", ctrl_num);
1232  			}
1233  			if (pdimm[0].capacity != pdimm[1].capacity) {
1234  				popts->ba_intlv_ctl = 0;
1235  				printf("Not identical DIMM size for "
1236  					"CS0+CS1+CS2+CS3 on controller %d, "
1237  					"interleaving disabled!\n", ctrl_num);
1238  			}
1239  #endif
1240  			break;
1241  		case FSL_DDR_CS0_CS1:
1242  			if (pdimm[0].n_ranks < 2) {
1243  				popts->ba_intlv_ctl = 0;
1244  				printf("Not enough bank(chip-select) for "
1245  					"CS0+CS1 on controller %d, "
1246  					"interleaving disabled!\n", ctrl_num);
1247  			}
1248  			break;
1249  		case FSL_DDR_CS2_CS3:
1250  #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1251  			if (pdimm[0].n_ranks < 4) {
1252  				popts->ba_intlv_ctl = 0;
1253  				printf("Not enough bank(chip-select) for CS2+CS3 "
1254  					"on controller %d, interleaving disabled!\n", ctrl_num);
1255  			}
1256  #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1257  			if (pdimm[1].n_ranks < 2) {
1258  				popts->ba_intlv_ctl = 0;
1259  				printf("Not enough bank(chip-select) for CS2+CS3 "
1260  					"on controller %d, interleaving disabled!\n", ctrl_num);
1261  			}
1262  #endif
1263  			break;
1264  		case FSL_DDR_CS0_CS1_AND_CS2_CS3:
1265  #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1266  			if (pdimm[0].n_ranks < 4) {
1267  				popts->ba_intlv_ctl = 0;
1268  				printf("Not enough bank(CS) for CS0+CS1 and "
1269  					"CS2+CS3 on controller %d, "
1270  					"interleaving disabled!\n", ctrl_num);
1271  			}
1272  #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1273  			if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
1274  				popts->ba_intlv_ctl = 0;
1275  				printf("Not enough bank(CS) for CS0+CS1 and "
1276  					"CS2+CS3 on controller %d, "
1277  					"interleaving disabled!\n", ctrl_num);
1278  			}
1279  #endif
1280  			break;
1281  		default:
1282  			popts->ba_intlv_ctl = 0;
1283  			break;
1284  		}
1285  	}
1286  
1287  	if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
1288  		if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
1289  			popts->addr_hash = 0;
1290  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
1291  					       "true", buf))
1292  			popts->addr_hash = 1;
1293  	}
1294  
1295  	if (pdimm[0].n_ranks == 4)
1296  		popts->quad_rank_present = 1;
1297  
1298  	popts->package_3ds = pdimm->package_3ds;
1299  
1300  #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
1301  	ddr_freq = get_ddr_freq(ctrl_num) / 1000000;
1302  	if (popts->registered_dimm_en) {
1303  		popts->rcw_override = 1;
1304  		popts->rcw_1 = 0x000a5a00;
1305  		if (ddr_freq <= 800)
1306  			popts->rcw_2 = 0x00000000;
1307  		else if (ddr_freq <= 1066)
1308  			popts->rcw_2 = 0x00100000;
1309  		else if (ddr_freq <= 1333)
1310  			popts->rcw_2 = 0x00200000;
1311  		else
1312  			popts->rcw_2 = 0x00300000;
1313  	}
1314  #endif
1315  
1316  	fsl_ddr_board_options(popts, pdimm, ctrl_num);
1317  
1318  	return 0;
1319  }
1320  
1321  void check_interleaving_options(fsl_ddr_info_t *pinfo)
1322  {
1323  	int i, j, k, check_n_ranks, intlv_invalid = 0;
1324  	unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
1325  	unsigned long long check_rank_density;
1326  	struct dimm_params_s *dimm;
1327  	int first_ctrl = pinfo->first_ctrl;
1328  	int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
1329  
1330  	/*
1331  	 * Check if all controllers are configured for memory
1332  	 * controller interleaving. Identical dimms are recommended. At least
1333  	 * the size, row and col address should be checked.
1334  	 */
1335  	j = 0;
1336  	check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks;
1337  	check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density;
1338  	check_n_row_addr =  pinfo->dimm_params[first_ctrl][0].n_row_addr;
1339  	check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr;
1340  	check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode;
1341  	for (i = first_ctrl; i <= last_ctrl; i++) {
1342  		dimm = &pinfo->dimm_params[i][0];
1343  		if (!pinfo->memctl_opts[i].memctl_interleaving) {
1344  			continue;
1345  		} else if (((check_rank_density != dimm->rank_density) ||
1346  		     (check_n_ranks != dimm->n_ranks) ||
1347  		     (check_n_row_addr != dimm->n_row_addr) ||
1348  		     (check_n_col_addr != dimm->n_col_addr) ||
1349  		     (check_intlv !=
1350  			pinfo->memctl_opts[i].memctl_interleaving_mode))){
1351  			intlv_invalid = 1;
1352  			break;
1353  		} else {
1354  			j++;
1355  		}
1356  
1357  	}
1358  	if (intlv_invalid) {
1359  		for (i = first_ctrl; i <= last_ctrl; i++)
1360  			pinfo->memctl_opts[i].memctl_interleaving = 0;
1361  		printf("Not all DIMMs are identical. "
1362  			"Memory controller interleaving disabled.\n");
1363  	} else {
1364  		switch (check_intlv) {
1365  		case FSL_DDR_256B_INTERLEAVING:
1366  		case FSL_DDR_CACHE_LINE_INTERLEAVING:
1367  		case FSL_DDR_PAGE_INTERLEAVING:
1368  		case FSL_DDR_BANK_INTERLEAVING:
1369  		case FSL_DDR_SUPERBANK_INTERLEAVING:
1370  #if (3 == CONFIG_SYS_NUM_DDR_CTLRS)
1371  				k = 2;
1372  #else
1373  				k = CONFIG_SYS_NUM_DDR_CTLRS;
1374  #endif
1375  			break;
1376  		case FSL_DDR_3WAY_1KB_INTERLEAVING:
1377  		case FSL_DDR_3WAY_4KB_INTERLEAVING:
1378  		case FSL_DDR_3WAY_8KB_INTERLEAVING:
1379  		case FSL_DDR_4WAY_1KB_INTERLEAVING:
1380  		case FSL_DDR_4WAY_4KB_INTERLEAVING:
1381  		case FSL_DDR_4WAY_8KB_INTERLEAVING:
1382  		default:
1383  			k = CONFIG_SYS_NUM_DDR_CTLRS;
1384  			break;
1385  		}
1386  		debug("%d of %d controllers are interleaving.\n", j, k);
1387  		if (j && (j != k)) {
1388  			for (i = first_ctrl; i <= last_ctrl; i++)
1389  				pinfo->memctl_opts[i].memctl_interleaving = 0;
1390  			if ((last_ctrl - first_ctrl) > 1)
1391  				puts("Not all controllers have compatible interleaving mode. All disabled.\n");
1392  		}
1393  	}
1394  	debug("Checking interleaving options completed\n");
1395  }
1396  
1397  int fsl_use_spd(void)
1398  {
1399  	int use_spd = 0;
1400  
1401  #ifdef CONFIG_DDR_SPD
1402  	char buffer[HWCONFIG_BUFFER_SIZE];
1403  	char *buf = NULL;
1404  
1405  	/*
1406  	 * Extract hwconfig from environment since we have not properly setup
1407  	 * the environment but need it for ddr config params
1408  	 */
1409  	if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
1410  		buf = buffer;
1411  
1412  	/* if hwconfig is not enabled, or "sdram" is not defined, use spd */
1413  	if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
1414  		if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
1415  			use_spd = 1;
1416  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
1417  					       "fixed", buf))
1418  			use_spd = 0;
1419  		else
1420  			use_spd = 1;
1421  	} else
1422  		use_spd = 1;
1423  #endif
1424  
1425  	return use_spd;
1426  }
1427