xref: /openbmc/u-boot/drivers/ddr/fsl/options.c (revision 445277b9d1efcc239c8c05560e4db312ea4f078e)
1  /*
2   * Copyright 2008, 2010-2014 Freescale Semiconductor, Inc.
3   *
4   * SPDX-License-Identifier:	GPL-2.0+
5   */
6  
7  #include <common.h>
8  #include <hwconfig.h>
9  #include <fsl_ddr_sdram.h>
10  
11  #include <fsl_ddr.h>
12  #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
13  	defined(CONFIG_ARM)
14  #include <asm/arch/clock.h>
15  #endif
16  
17  /*
18   * Use our own stack based buffer before relocation to allow accessing longer
19   * hwconfig strings that might be in the environment before we've relocated.
20   * This is pretty fragile on both the use of stack and if the buffer is big
21   * enough. However we will get a warning from env_get_f() for the latter.
22   */
23  
24  /* Board-specific functions defined in each board's ddr.c */
25  extern void fsl_ddr_board_options(memctl_options_t *popts,
26  		dimm_params_t *pdimm,
27  		unsigned int ctrl_num);
28  
29  struct dynamic_odt {
30  	unsigned int odt_rd_cfg;
31  	unsigned int odt_wr_cfg;
32  	unsigned int odt_rtt_norm;
33  	unsigned int odt_rtt_wr;
34  };
35  
36  #ifdef CONFIG_SYS_FSL_DDR4
37  /* Quad rank is not verified yet due availability.
38   * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
39   */
40  static __maybe_unused const struct dynamic_odt single_Q[4] = {
41  	{	/* cs0 */
42  		FSL_DDR_ODT_NEVER,
43  		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
44  		DDR4_RTT_34_OHM,	/* unverified */
45  		DDR4_RTT_120_OHM
46  	},
47  	{	/* cs1 */
48  		FSL_DDR_ODT_NEVER,
49  		FSL_DDR_ODT_NEVER,
50  		DDR4_RTT_OFF,
51  		DDR4_RTT_120_OHM
52  	},
53  	{	/* cs2 */
54  		FSL_DDR_ODT_NEVER,
55  		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
56  		DDR4_RTT_34_OHM,
57  		DDR4_RTT_120_OHM
58  	},
59  	{	/* cs3 */
60  		FSL_DDR_ODT_NEVER,
61  		FSL_DDR_ODT_NEVER,	/* tied high */
62  		DDR4_RTT_OFF,
63  		DDR4_RTT_120_OHM
64  	}
65  };
66  
67  static __maybe_unused const struct dynamic_odt single_D[4] = {
68  	{	/* cs0 */
69  		FSL_DDR_ODT_NEVER,
70  		FSL_DDR_ODT_ALL,
71  		DDR4_RTT_40_OHM,
72  		DDR4_RTT_OFF
73  	},
74  	{	/* cs1 */
75  		FSL_DDR_ODT_NEVER,
76  		FSL_DDR_ODT_NEVER,
77  		DDR4_RTT_OFF,
78  		DDR4_RTT_OFF
79  	},
80  	{0, 0, 0, 0},
81  	{0, 0, 0, 0}
82  };
83  
84  static __maybe_unused const struct dynamic_odt single_S[4] = {
85  	{	/* cs0 */
86  		FSL_DDR_ODT_NEVER,
87  		FSL_DDR_ODT_ALL,
88  		DDR4_RTT_40_OHM,
89  		DDR4_RTT_OFF
90  	},
91  	{0, 0, 0, 0},
92  	{0, 0, 0, 0},
93  	{0, 0, 0, 0},
94  };
95  
96  static __maybe_unused const struct dynamic_odt dual_DD[4] = {
97  	{	/* cs0 */
98  		FSL_DDR_ODT_NEVER,
99  		FSL_DDR_ODT_SAME_DIMM,
100  		DDR4_RTT_120_OHM,
101  		DDR4_RTT_OFF
102  	},
103  	{	/* cs1 */
104  		FSL_DDR_ODT_OTHER_DIMM,
105  		FSL_DDR_ODT_OTHER_DIMM,
106  		DDR4_RTT_34_OHM,
107  		DDR4_RTT_OFF
108  	},
109  	{	/* cs2 */
110  		FSL_DDR_ODT_NEVER,
111  		FSL_DDR_ODT_SAME_DIMM,
112  		DDR4_RTT_120_OHM,
113  		DDR4_RTT_OFF
114  	},
115  	{	/* cs3 */
116  		FSL_DDR_ODT_OTHER_DIMM,
117  		FSL_DDR_ODT_OTHER_DIMM,
118  		DDR4_RTT_34_OHM,
119  		DDR4_RTT_OFF
120  	}
121  };
122  
123  static __maybe_unused const struct dynamic_odt dual_DS[4] = {
124  	{	/* cs0 */
125  		FSL_DDR_ODT_NEVER,
126  		FSL_DDR_ODT_SAME_DIMM,
127  		DDR4_RTT_120_OHM,
128  		DDR4_RTT_OFF
129  	},
130  	{	/* cs1 */
131  		FSL_DDR_ODT_OTHER_DIMM,
132  		FSL_DDR_ODT_OTHER_DIMM,
133  		DDR4_RTT_34_OHM,
134  		DDR4_RTT_OFF
135  	},
136  	{	/* cs2 */
137  		FSL_DDR_ODT_OTHER_DIMM,
138  		FSL_DDR_ODT_ALL,
139  		DDR4_RTT_34_OHM,
140  		DDR4_RTT_120_OHM
141  	},
142  	{0, 0, 0, 0}
143  };
144  static __maybe_unused const struct dynamic_odt dual_SD[4] = {
145  	{	/* cs0 */
146  		FSL_DDR_ODT_OTHER_DIMM,
147  		FSL_DDR_ODT_ALL,
148  		DDR4_RTT_34_OHM,
149  		DDR4_RTT_120_OHM
150  	},
151  	{0, 0, 0, 0},
152  	{	/* cs2 */
153  		FSL_DDR_ODT_NEVER,
154  		FSL_DDR_ODT_SAME_DIMM,
155  		DDR4_RTT_120_OHM,
156  		DDR4_RTT_OFF
157  	},
158  	{	/* cs3 */
159  		FSL_DDR_ODT_OTHER_DIMM,
160  		FSL_DDR_ODT_OTHER_DIMM,
161  		DDR4_RTT_34_OHM,
162  		DDR4_RTT_OFF
163  	}
164  };
165  
166  static __maybe_unused const struct dynamic_odt dual_SS[4] = {
167  	{	/* cs0 */
168  		FSL_DDR_ODT_OTHER_DIMM,
169  		FSL_DDR_ODT_ALL,
170  		DDR4_RTT_34_OHM,
171  		DDR4_RTT_120_OHM
172  	},
173  	{0, 0, 0, 0},
174  	{	/* cs2 */
175  		FSL_DDR_ODT_OTHER_DIMM,
176  		FSL_DDR_ODT_ALL,
177  		DDR4_RTT_34_OHM,
178  		DDR4_RTT_120_OHM
179  	},
180  	{0, 0, 0, 0}
181  };
182  
183  static __maybe_unused const struct dynamic_odt dual_D0[4] = {
184  	{	/* cs0 */
185  		FSL_DDR_ODT_NEVER,
186  		FSL_DDR_ODT_SAME_DIMM,
187  		DDR4_RTT_40_OHM,
188  		DDR4_RTT_OFF
189  	},
190  	{	/* cs1 */
191  		FSL_DDR_ODT_NEVER,
192  		FSL_DDR_ODT_NEVER,
193  		DDR4_RTT_OFF,
194  		DDR4_RTT_OFF
195  	},
196  	{0, 0, 0, 0},
197  	{0, 0, 0, 0}
198  };
199  
200  static __maybe_unused const struct dynamic_odt dual_0D[4] = {
201  	{0, 0, 0, 0},
202  	{0, 0, 0, 0},
203  	{	/* cs2 */
204  		FSL_DDR_ODT_NEVER,
205  		FSL_DDR_ODT_SAME_DIMM,
206  		DDR4_RTT_40_OHM,
207  		DDR4_RTT_OFF
208  	},
209  	{	/* cs3 */
210  		FSL_DDR_ODT_NEVER,
211  		FSL_DDR_ODT_NEVER,
212  		DDR4_RTT_OFF,
213  		DDR4_RTT_OFF
214  	}
215  };
216  
217  static __maybe_unused const struct dynamic_odt dual_S0[4] = {
218  	{	/* cs0 */
219  		FSL_DDR_ODT_NEVER,
220  		FSL_DDR_ODT_CS,
221  		DDR4_RTT_40_OHM,
222  		DDR4_RTT_OFF
223  	},
224  	{0, 0, 0, 0},
225  	{0, 0, 0, 0},
226  	{0, 0, 0, 0}
227  
228  };
229  
230  static __maybe_unused const struct dynamic_odt dual_0S[4] = {
231  	{0, 0, 0, 0},
232  	{0, 0, 0, 0},
233  	{	/* cs2 */
234  		FSL_DDR_ODT_NEVER,
235  		FSL_DDR_ODT_CS,
236  		DDR4_RTT_40_OHM,
237  		DDR4_RTT_OFF
238  	},
239  	{0, 0, 0, 0}
240  
241  };
242  
243  static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
244  	{	/* cs0 */
245  		FSL_DDR_ODT_NEVER,
246  		FSL_DDR_ODT_CS,
247  		DDR4_RTT_120_OHM,
248  		DDR4_RTT_OFF
249  	},
250  	{	/* cs1 */
251  		FSL_DDR_ODT_NEVER,
252  		FSL_DDR_ODT_CS,
253  		DDR4_RTT_120_OHM,
254  		DDR4_RTT_OFF
255  	},
256  	{	/* cs2 */
257  		FSL_DDR_ODT_NEVER,
258  		FSL_DDR_ODT_CS,
259  		DDR4_RTT_120_OHM,
260  		DDR4_RTT_OFF
261  	},
262  	{	/* cs3 */
263  		FSL_DDR_ODT_NEVER,
264  		FSL_DDR_ODT_CS,
265  		DDR4_RTT_120_OHM,
266  		DDR4_RTT_OFF
267  	}
268  };
269  #elif defined(CONFIG_SYS_FSL_DDR3)
270  static __maybe_unused const struct dynamic_odt single_Q[4] = {
271  	{	/* cs0 */
272  		FSL_DDR_ODT_NEVER,
273  		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
274  		DDR3_RTT_20_OHM,
275  		DDR3_RTT_120_OHM
276  	},
277  	{	/* cs1 */
278  		FSL_DDR_ODT_NEVER,
279  		FSL_DDR_ODT_NEVER,	/* tied high */
280  		DDR3_RTT_OFF,
281  		DDR3_RTT_120_OHM
282  	},
283  	{	/* cs2 */
284  		FSL_DDR_ODT_NEVER,
285  		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
286  		DDR3_RTT_20_OHM,
287  		DDR3_RTT_120_OHM
288  	},
289  	{	/* cs3 */
290  		FSL_DDR_ODT_NEVER,
291  		FSL_DDR_ODT_NEVER,	/* tied high */
292  		DDR3_RTT_OFF,
293  		DDR3_RTT_120_OHM
294  	}
295  };
296  
297  static __maybe_unused const struct dynamic_odt single_D[4] = {
298  	{	/* cs0 */
299  		FSL_DDR_ODT_NEVER,
300  		FSL_DDR_ODT_ALL,
301  		DDR3_RTT_40_OHM,
302  		DDR3_RTT_OFF
303  	},
304  	{	/* cs1 */
305  		FSL_DDR_ODT_NEVER,
306  		FSL_DDR_ODT_NEVER,
307  		DDR3_RTT_OFF,
308  		DDR3_RTT_OFF
309  	},
310  	{0, 0, 0, 0},
311  	{0, 0, 0, 0}
312  };
313  
314  static __maybe_unused const struct dynamic_odt single_S[4] = {
315  	{	/* cs0 */
316  		FSL_DDR_ODT_NEVER,
317  		FSL_DDR_ODT_ALL,
318  		DDR3_RTT_40_OHM,
319  		DDR3_RTT_OFF
320  	},
321  	{0, 0, 0, 0},
322  	{0, 0, 0, 0},
323  	{0, 0, 0, 0},
324  };
325  
326  static __maybe_unused const struct dynamic_odt dual_DD[4] = {
327  	{	/* cs0 */
328  		FSL_DDR_ODT_NEVER,
329  		FSL_DDR_ODT_SAME_DIMM,
330  		DDR3_RTT_120_OHM,
331  		DDR3_RTT_OFF
332  	},
333  	{	/* cs1 */
334  		FSL_DDR_ODT_OTHER_DIMM,
335  		FSL_DDR_ODT_OTHER_DIMM,
336  		DDR3_RTT_30_OHM,
337  		DDR3_RTT_OFF
338  	},
339  	{	/* cs2 */
340  		FSL_DDR_ODT_NEVER,
341  		FSL_DDR_ODT_SAME_DIMM,
342  		DDR3_RTT_120_OHM,
343  		DDR3_RTT_OFF
344  	},
345  	{	/* cs3 */
346  		FSL_DDR_ODT_OTHER_DIMM,
347  		FSL_DDR_ODT_OTHER_DIMM,
348  		DDR3_RTT_30_OHM,
349  		DDR3_RTT_OFF
350  	}
351  };
352  
353  static __maybe_unused const struct dynamic_odt dual_DS[4] = {
354  	{	/* cs0 */
355  		FSL_DDR_ODT_NEVER,
356  		FSL_DDR_ODT_SAME_DIMM,
357  		DDR3_RTT_120_OHM,
358  		DDR3_RTT_OFF
359  	},
360  	{	/* cs1 */
361  		FSL_DDR_ODT_OTHER_DIMM,
362  		FSL_DDR_ODT_OTHER_DIMM,
363  		DDR3_RTT_30_OHM,
364  		DDR3_RTT_OFF
365  	},
366  	{	/* cs2 */
367  		FSL_DDR_ODT_OTHER_DIMM,
368  		FSL_DDR_ODT_ALL,
369  		DDR3_RTT_20_OHM,
370  		DDR3_RTT_120_OHM
371  	},
372  	{0, 0, 0, 0}
373  };
374  static __maybe_unused const struct dynamic_odt dual_SD[4] = {
375  	{	/* cs0 */
376  		FSL_DDR_ODT_OTHER_DIMM,
377  		FSL_DDR_ODT_ALL,
378  		DDR3_RTT_20_OHM,
379  		DDR3_RTT_120_OHM
380  	},
381  	{0, 0, 0, 0},
382  	{	/* cs2 */
383  		FSL_DDR_ODT_NEVER,
384  		FSL_DDR_ODT_SAME_DIMM,
385  		DDR3_RTT_120_OHM,
386  		DDR3_RTT_OFF
387  	},
388  	{	/* cs3 */
389  		FSL_DDR_ODT_OTHER_DIMM,
390  		FSL_DDR_ODT_OTHER_DIMM,
391  		DDR3_RTT_20_OHM,
392  		DDR3_RTT_OFF
393  	}
394  };
395  
396  static __maybe_unused const struct dynamic_odt dual_SS[4] = {
397  	{	/* cs0 */
398  		FSL_DDR_ODT_OTHER_DIMM,
399  		FSL_DDR_ODT_ALL,
400  		DDR3_RTT_30_OHM,
401  		DDR3_RTT_120_OHM
402  	},
403  	{0, 0, 0, 0},
404  	{	/* cs2 */
405  		FSL_DDR_ODT_OTHER_DIMM,
406  		FSL_DDR_ODT_ALL,
407  		DDR3_RTT_30_OHM,
408  		DDR3_RTT_120_OHM
409  	},
410  	{0, 0, 0, 0}
411  };
412  
413  static __maybe_unused const struct dynamic_odt dual_D0[4] = {
414  	{	/* cs0 */
415  		FSL_DDR_ODT_NEVER,
416  		FSL_DDR_ODT_SAME_DIMM,
417  		DDR3_RTT_40_OHM,
418  		DDR3_RTT_OFF
419  	},
420  	{	/* cs1 */
421  		FSL_DDR_ODT_NEVER,
422  		FSL_DDR_ODT_NEVER,
423  		DDR3_RTT_OFF,
424  		DDR3_RTT_OFF
425  	},
426  	{0, 0, 0, 0},
427  	{0, 0, 0, 0}
428  };
429  
430  static __maybe_unused const struct dynamic_odt dual_0D[4] = {
431  	{0, 0, 0, 0},
432  	{0, 0, 0, 0},
433  	{	/* cs2 */
434  		FSL_DDR_ODT_NEVER,
435  		FSL_DDR_ODT_SAME_DIMM,
436  		DDR3_RTT_40_OHM,
437  		DDR3_RTT_OFF
438  	},
439  	{	/* cs3 */
440  		FSL_DDR_ODT_NEVER,
441  		FSL_DDR_ODT_NEVER,
442  		DDR3_RTT_OFF,
443  		DDR3_RTT_OFF
444  	}
445  };
446  
447  static __maybe_unused const struct dynamic_odt dual_S0[4] = {
448  	{	/* cs0 */
449  		FSL_DDR_ODT_NEVER,
450  		FSL_DDR_ODT_CS,
451  		DDR3_RTT_40_OHM,
452  		DDR3_RTT_OFF
453  	},
454  	{0, 0, 0, 0},
455  	{0, 0, 0, 0},
456  	{0, 0, 0, 0}
457  
458  };
459  
460  static __maybe_unused const struct dynamic_odt dual_0S[4] = {
461  	{0, 0, 0, 0},
462  	{0, 0, 0, 0},
463  	{	/* cs2 */
464  		FSL_DDR_ODT_NEVER,
465  		FSL_DDR_ODT_CS,
466  		DDR3_RTT_40_OHM,
467  		DDR3_RTT_OFF
468  	},
469  	{0, 0, 0, 0}
470  
471  };
472  
473  static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
474  	{	/* cs0 */
475  		FSL_DDR_ODT_NEVER,
476  		FSL_DDR_ODT_CS,
477  		DDR3_RTT_120_OHM,
478  		DDR3_RTT_OFF
479  	},
480  	{	/* cs1 */
481  		FSL_DDR_ODT_NEVER,
482  		FSL_DDR_ODT_CS,
483  		DDR3_RTT_120_OHM,
484  		DDR3_RTT_OFF
485  	},
486  	{	/* cs2 */
487  		FSL_DDR_ODT_NEVER,
488  		FSL_DDR_ODT_CS,
489  		DDR3_RTT_120_OHM,
490  		DDR3_RTT_OFF
491  	},
492  	{	/* cs3 */
493  		FSL_DDR_ODT_NEVER,
494  		FSL_DDR_ODT_CS,
495  		DDR3_RTT_120_OHM,
496  		DDR3_RTT_OFF
497  	}
498  };
499  #else	/* CONFIG_SYS_FSL_DDR3 */
500  static __maybe_unused const struct dynamic_odt single_Q[4] = {
501  	{0, 0, 0, 0},
502  	{0, 0, 0, 0},
503  	{0, 0, 0, 0},
504  	{0, 0, 0, 0}
505  };
506  
507  static __maybe_unused const struct dynamic_odt single_D[4] = {
508  	{	/* cs0 */
509  		FSL_DDR_ODT_NEVER,
510  		FSL_DDR_ODT_ALL,
511  		DDR2_RTT_150_OHM,
512  		DDR2_RTT_OFF
513  	},
514  	{	/* cs1 */
515  		FSL_DDR_ODT_NEVER,
516  		FSL_DDR_ODT_NEVER,
517  		DDR2_RTT_OFF,
518  		DDR2_RTT_OFF
519  	},
520  	{0, 0, 0, 0},
521  	{0, 0, 0, 0}
522  };
523  
524  static __maybe_unused const struct dynamic_odt single_S[4] = {
525  	{	/* cs0 */
526  		FSL_DDR_ODT_NEVER,
527  		FSL_DDR_ODT_ALL,
528  		DDR2_RTT_150_OHM,
529  		DDR2_RTT_OFF
530  	},
531  	{0, 0, 0, 0},
532  	{0, 0, 0, 0},
533  	{0, 0, 0, 0},
534  };
535  
536  static __maybe_unused const struct dynamic_odt dual_DD[4] = {
537  	{	/* cs0 */
538  		FSL_DDR_ODT_OTHER_DIMM,
539  		FSL_DDR_ODT_OTHER_DIMM,
540  		DDR2_RTT_75_OHM,
541  		DDR2_RTT_OFF
542  	},
543  	{	/* cs1 */
544  		FSL_DDR_ODT_NEVER,
545  		FSL_DDR_ODT_NEVER,
546  		DDR2_RTT_OFF,
547  		DDR2_RTT_OFF
548  	},
549  	{	/* cs2 */
550  		FSL_DDR_ODT_OTHER_DIMM,
551  		FSL_DDR_ODT_OTHER_DIMM,
552  		DDR2_RTT_75_OHM,
553  		DDR2_RTT_OFF
554  	},
555  	{	/* cs3 */
556  		FSL_DDR_ODT_NEVER,
557  		FSL_DDR_ODT_NEVER,
558  		DDR2_RTT_OFF,
559  		DDR2_RTT_OFF
560  	}
561  };
562  
563  static __maybe_unused const struct dynamic_odt dual_DS[4] = {
564  	{	/* cs0 */
565  		FSL_DDR_ODT_OTHER_DIMM,
566  		FSL_DDR_ODT_OTHER_DIMM,
567  		DDR2_RTT_75_OHM,
568  		DDR2_RTT_OFF
569  	},
570  	{	/* cs1 */
571  		FSL_DDR_ODT_NEVER,
572  		FSL_DDR_ODT_NEVER,
573  		DDR2_RTT_OFF,
574  		DDR2_RTT_OFF
575  	},
576  	{	/* cs2 */
577  		FSL_DDR_ODT_OTHER_DIMM,
578  		FSL_DDR_ODT_OTHER_DIMM,
579  		DDR2_RTT_75_OHM,
580  		DDR2_RTT_OFF
581  	},
582  	{0, 0, 0, 0}
583  };
584  
585  static __maybe_unused const struct dynamic_odt dual_SD[4] = {
586  	{	/* cs0 */
587  		FSL_DDR_ODT_OTHER_DIMM,
588  		FSL_DDR_ODT_OTHER_DIMM,
589  		DDR2_RTT_75_OHM,
590  		DDR2_RTT_OFF
591  	},
592  	{0, 0, 0, 0},
593  	{	/* cs2 */
594  		FSL_DDR_ODT_OTHER_DIMM,
595  		FSL_DDR_ODT_OTHER_DIMM,
596  		DDR2_RTT_75_OHM,
597  		DDR2_RTT_OFF
598  	},
599  	{	/* cs3 */
600  		FSL_DDR_ODT_NEVER,
601  		FSL_DDR_ODT_NEVER,
602  		DDR2_RTT_OFF,
603  		DDR2_RTT_OFF
604  	}
605  };
606  
607  static __maybe_unused const struct dynamic_odt dual_SS[4] = {
608  	{	/* cs0 */
609  		FSL_DDR_ODT_OTHER_DIMM,
610  		FSL_DDR_ODT_OTHER_DIMM,
611  		DDR2_RTT_75_OHM,
612  		DDR2_RTT_OFF
613  	},
614  	{0, 0, 0, 0},
615  	{	/* cs2 */
616  		FSL_DDR_ODT_OTHER_DIMM,
617  		FSL_DDR_ODT_OTHER_DIMM,
618  		DDR2_RTT_75_OHM,
619  		DDR2_RTT_OFF
620  	},
621  	{0, 0, 0, 0}
622  };
623  
624  static __maybe_unused const struct dynamic_odt dual_D0[4] = {
625  	{	/* cs0 */
626  		FSL_DDR_ODT_NEVER,
627  		FSL_DDR_ODT_ALL,
628  		DDR2_RTT_150_OHM,
629  		DDR2_RTT_OFF
630  	},
631  	{	/* cs1 */
632  		FSL_DDR_ODT_NEVER,
633  		FSL_DDR_ODT_NEVER,
634  		DDR2_RTT_OFF,
635  		DDR2_RTT_OFF
636  	},
637  	{0, 0, 0, 0},
638  	{0, 0, 0, 0}
639  };
640  
641  static __maybe_unused const struct dynamic_odt dual_0D[4] = {
642  	{0, 0, 0, 0},
643  	{0, 0, 0, 0},
644  	{	/* cs2 */
645  		FSL_DDR_ODT_NEVER,
646  		FSL_DDR_ODT_ALL,
647  		DDR2_RTT_150_OHM,
648  		DDR2_RTT_OFF
649  	},
650  	{	/* cs3 */
651  		FSL_DDR_ODT_NEVER,
652  		FSL_DDR_ODT_NEVER,
653  		DDR2_RTT_OFF,
654  		DDR2_RTT_OFF
655  	}
656  };
657  
658  static __maybe_unused const struct dynamic_odt dual_S0[4] = {
659  	{	/* cs0 */
660  		FSL_DDR_ODT_NEVER,
661  		FSL_DDR_ODT_CS,
662  		DDR2_RTT_150_OHM,
663  		DDR2_RTT_OFF
664  	},
665  	{0, 0, 0, 0},
666  	{0, 0, 0, 0},
667  	{0, 0, 0, 0}
668  
669  };
670  
671  static __maybe_unused const struct dynamic_odt dual_0S[4] = {
672  	{0, 0, 0, 0},
673  	{0, 0, 0, 0},
674  	{	/* cs2 */
675  		FSL_DDR_ODT_NEVER,
676  		FSL_DDR_ODT_CS,
677  		DDR2_RTT_150_OHM,
678  		DDR2_RTT_OFF
679  	},
680  	{0, 0, 0, 0}
681  
682  };
683  
684  static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
685  	{	/* cs0 */
686  		FSL_DDR_ODT_NEVER,
687  		FSL_DDR_ODT_CS,
688  		DDR2_RTT_75_OHM,
689  		DDR2_RTT_OFF
690  	},
691  	{	/* cs1 */
692  		FSL_DDR_ODT_NEVER,
693  		FSL_DDR_ODT_NEVER,
694  		DDR2_RTT_OFF,
695  		DDR2_RTT_OFF
696  	},
697  	{	/* cs2 */
698  		FSL_DDR_ODT_NEVER,
699  		FSL_DDR_ODT_CS,
700  		DDR2_RTT_75_OHM,
701  		DDR2_RTT_OFF
702  	},
703  	{	/* cs3 */
704  		FSL_DDR_ODT_NEVER,
705  		FSL_DDR_ODT_NEVER,
706  		DDR2_RTT_OFF,
707  		DDR2_RTT_OFF
708  	}
709  };
710  #endif
711  
712  /*
713   * Automatically seleect bank interleaving mode based on DIMMs
714   * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
715   * This function only deal with one or two slots per controller.
716   */
717  static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
718  {
719  #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
720  	if (pdimm[0].n_ranks == 4)
721  		return FSL_DDR_CS0_CS1_CS2_CS3;
722  	else if (pdimm[0].n_ranks == 2)
723  		return FSL_DDR_CS0_CS1;
724  #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
725  #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
726  	if (pdimm[0].n_ranks == 4)
727  		return FSL_DDR_CS0_CS1_CS2_CS3;
728  #endif
729  	if (pdimm[0].n_ranks == 2) {
730  		if (pdimm[1].n_ranks == 2)
731  			return FSL_DDR_CS0_CS1_CS2_CS3;
732  		else
733  			return FSL_DDR_CS0_CS1;
734  	}
735  #endif
736  	return 0;
737  }
738  
739  unsigned int populate_memctl_options(const common_timing_params_t *common_dimm,
740  			memctl_options_t *popts,
741  			dimm_params_t *pdimm,
742  			unsigned int ctrl_num)
743  {
744  	unsigned int i;
745  	char buffer[HWCONFIG_BUFFER_SIZE];
746  	char *buf = NULL;
747  #if defined(CONFIG_SYS_FSL_DDR3) || \
748  	defined(CONFIG_SYS_FSL_DDR2) || \
749  	defined(CONFIG_SYS_FSL_DDR4)
750  	const struct dynamic_odt *pdodt = odt_unknown;
751  #endif
752  	ulong ddr_freq;
753  
754  	/*
755  	 * Extract hwconfig from environment since we have not properly setup
756  	 * the environment but need it for ddr config params
757  	 */
758  	if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
759  		buf = buffer;
760  
761  #if defined(CONFIG_SYS_FSL_DDR3) || \
762  	defined(CONFIG_SYS_FSL_DDR2) || \
763  	defined(CONFIG_SYS_FSL_DDR4)
764  	/* Chip select options. */
765  #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
766  	switch (pdimm[0].n_ranks) {
767  	case 1:
768  		pdodt = single_S;
769  		break;
770  	case 2:
771  		pdodt = single_D;
772  		break;
773  	case 4:
774  		pdodt = single_Q;
775  		break;
776  	}
777  #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
778  	switch (pdimm[0].n_ranks) {
779  #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
780  	case 4:
781  		pdodt = single_Q;
782  		if (pdimm[1].n_ranks)
783  			printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
784  		break;
785  #endif
786  	case 2:
787  		switch (pdimm[1].n_ranks) {
788  		case 2:
789  			pdodt = dual_DD;
790  			break;
791  		case 1:
792  			pdodt = dual_DS;
793  			break;
794  		case 0:
795  			pdodt = dual_D0;
796  			break;
797  		}
798  		break;
799  	case 1:
800  		switch (pdimm[1].n_ranks) {
801  		case 2:
802  			pdodt = dual_SD;
803  			break;
804  		case 1:
805  			pdodt = dual_SS;
806  			break;
807  		case 0:
808  			pdodt = dual_S0;
809  			break;
810  		}
811  		break;
812  	case 0:
813  		switch (pdimm[1].n_ranks) {
814  		case 2:
815  			pdodt = dual_0D;
816  			break;
817  		case 1:
818  			pdodt = dual_0S;
819  			break;
820  		}
821  		break;
822  	}
823  #endif	/* CONFIG_DIMM_SLOTS_PER_CTLR */
824  #endif	/* CONFIG_SYS_FSL_DDR2, 3, 4 */
825  
826  	/* Pick chip-select local options. */
827  	for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
828  #if defined(CONFIG_SYS_FSL_DDR3) || \
829  	defined(CONFIG_SYS_FSL_DDR2) || \
830  	defined(CONFIG_SYS_FSL_DDR4)
831  		popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
832  		popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
833  		popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
834  		popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
835  #else
836  		popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
837  		popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
838  #endif
839  		popts->cs_local_opts[i].auto_precharge = 0;
840  	}
841  
842  	/* Pick interleaving mode. */
843  
844  	/*
845  	 * 0 = no interleaving
846  	 * 1 = interleaving between 2 controllers
847  	 */
848  	popts->memctl_interleaving = 0;
849  
850  	/*
851  	 * 0 = cacheline
852  	 * 1 = page
853  	 * 2 = (logical) bank
854  	 * 3 = superbank (only if CS interleaving is enabled)
855  	 */
856  	popts->memctl_interleaving_mode = 0;
857  
858  	/*
859  	 * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
860  	 * 1: page:      bit to the left of the column bits selects the memctl
861  	 * 2: bank:      bit to the left of the bank bits selects the memctl
862  	 * 3: superbank: bit to the left of the chip select selects the memctl
863  	 *
864  	 * NOTE: ba_intlv (rank interleaving) is independent of memory
865  	 * controller interleaving; it is only within a memory controller.
866  	 * Must use superbank interleaving if rank interleaving is used and
867  	 * memory controller interleaving is enabled.
868  	 */
869  
870  	/*
871  	 * 0 = no
872  	 * 0x40 = CS0,CS1
873  	 * 0x20 = CS2,CS3
874  	 * 0x60 = CS0,CS1 + CS2,CS3
875  	 * 0x04 = CS0,CS1,CS2,CS3
876  	 */
877  	popts->ba_intlv_ctl = 0;
878  
879  	/* Memory Organization Parameters */
880  	popts->registered_dimm_en = common_dimm->all_dimms_registered;
881  
882  	/* Operational Mode Paramters */
883  
884  	/* Pick ECC modes */
885  	popts->ecc_mode = 0;		  /* 0 = disabled, 1 = enabled */
886  #ifdef CONFIG_DDR_ECC
887  	if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
888  		if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
889  			popts->ecc_mode = 1;
890  	} else
891  		popts->ecc_mode = 1;
892  #endif
893  	/* 1 = use memory controler to init data */
894  	popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0;
895  
896  	/*
897  	 * Choose DQS config
898  	 * 0 for DDR1
899  	 * 1 for DDR2
900  	 */
901  #if defined(CONFIG_SYS_FSL_DDR1)
902  	popts->dqs_config = 0;
903  #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
904  	popts->dqs_config = 1;
905  #endif
906  
907  	/* Choose self-refresh during sleep. */
908  	popts->self_refresh_in_sleep = 1;
909  
910  	/* Choose dynamic power management mode. */
911  	popts->dynamic_power = 0;
912  
913  	/*
914  	 * check first dimm for primary sdram width
915  	 * presuming all dimms are similar
916  	 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
917  	 */
918  #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
919  	if (pdimm[0].n_ranks != 0) {
920  		if ((pdimm[0].data_width >= 64) && \
921  			(pdimm[0].data_width <= 72))
922  			popts->data_bus_width = 0;
923  		else if ((pdimm[0].data_width >= 32) && \
924  			(pdimm[0].data_width <= 40))
925  			popts->data_bus_width = 1;
926  		else {
927  			panic("Error: data width %u is invalid!\n",
928  				pdimm[0].data_width);
929  		}
930  	}
931  #else
932  	if (pdimm[0].n_ranks != 0) {
933  		if (pdimm[0].primary_sdram_width == 64)
934  			popts->data_bus_width = 0;
935  		else if (pdimm[0].primary_sdram_width == 32)
936  			popts->data_bus_width = 1;
937  		else if (pdimm[0].primary_sdram_width == 16)
938  			popts->data_bus_width = 2;
939  		else {
940  			panic("Error: primary sdram width %u is invalid!\n",
941  				pdimm[0].primary_sdram_width);
942  		}
943  	}
944  #endif
945  
946  	popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
947  
948  	/* Choose burst length. */
949  #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
950  #if defined(CONFIG_E500MC)
951  	popts->otf_burst_chop_en = 0;	/* on-the-fly burst chop disable */
952  	popts->burst_length = DDR_BL8;	/* Fixed 8-beat burst len */
953  #else
954  	if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
955  		/* 32-bit or 16-bit bus */
956  		popts->otf_burst_chop_en = 0;
957  		popts->burst_length = DDR_BL8;
958  	} else {
959  		popts->otf_burst_chop_en = 1;	/* on-the-fly burst chop */
960  		popts->burst_length = DDR_OTF;	/* on-the-fly BC4 and BL8 */
961  	}
962  #endif
963  #else
964  	popts->burst_length = DDR_BL4;	/* has to be 4 for DDR2 */
965  #endif
966  
967  	/* Choose ddr controller address mirror mode */
968  #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
969  	for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) {
970  		if (pdimm[i].n_ranks) {
971  			popts->mirrored_dimm = pdimm[i].mirrored_dimm;
972  			break;
973  		}
974  	}
975  #endif
976  
977  	/* Global Timing Parameters. */
978  	debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num));
979  
980  	/* Pick a caslat override. */
981  	popts->cas_latency_override = 0;
982  	popts->cas_latency_override_value = 3;
983  	if (popts->cas_latency_override) {
984  		debug("using caslat override value = %u\n",
985  		       popts->cas_latency_override_value);
986  	}
987  
988  	/* Decide whether to use the computed derated latency */
989  	popts->use_derated_caslat = 0;
990  
991  	/* Choose an additive latency. */
992  	popts->additive_latency_override = 0;
993  	popts->additive_latency_override_value = 3;
994  	if (popts->additive_latency_override) {
995  		debug("using additive latency override value = %u\n",
996  		       popts->additive_latency_override_value);
997  	}
998  
999  	/*
1000  	 * 2T_EN setting
1001  	 *
1002  	 * Factors to consider for 2T_EN:
1003  	 *	- number of DIMMs installed
1004  	 *	- number of components, number of active ranks
1005  	 *	- how much time you want to spend playing around
1006  	 */
1007  	popts->twot_en = 0;
1008  	popts->threet_en = 0;
1009  
1010  	/* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
1011  	if (popts->registered_dimm_en)
1012  		popts->ap_en = 1; /* 0 = disable,  1 = enable */
1013  	else
1014  		popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
1015  
1016  	if (hwconfig_sub_f("fsl_ddr", "parity", buf)) {
1017  		if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) {
1018  			if (popts->registered_dimm_en ||
1019  			    (CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4))
1020  				popts->ap_en = 1;
1021  		}
1022  	}
1023  
1024  	/*
1025  	 * BSTTOPRE precharge interval
1026  	 *
1027  	 * Set this to 0 for global auto precharge
1028  	 * The value of 0x100 has been used for DDR1, DDR2, DDR3.
1029  	 * It is not wrong. Any value should be OK. The performance depends on
1030  	 * applications. There is no one good value for all. One way to set
1031  	 * is to use 1/4 of refint value.
1032  	 */
1033  	popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps)
1034  			 >> 2;
1035  
1036  	/*
1037  	 * Window for four activates -- tFAW
1038  	 *
1039  	 * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
1040  	 * FIXME: varies depending upon number of column addresses or data
1041  	 * FIXME: width, was considering looking at pdimm->primary_sdram_width
1042  	 */
1043  #if defined(CONFIG_SYS_FSL_DDR1)
1044  	popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1);
1045  
1046  #elif defined(CONFIG_SYS_FSL_DDR2)
1047  	/*
1048  	 * x4/x8;  some datasheets have 35000
1049  	 * x16 wide columns only?  Use 50000?
1050  	 */
1051  	popts->tfaw_window_four_activates_ps = 37500;
1052  
1053  #else
1054  	popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
1055  #endif
1056  	popts->zq_en = 0;
1057  	popts->wrlvl_en = 0;
1058  #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
1059  	/*
1060  	 * due to ddr3 dimm is fly-by topology
1061  	 * we suggest to enable write leveling to
1062  	 * meet the tQDSS under different loading.
1063  	 */
1064  	popts->wrlvl_en = 1;
1065  	popts->zq_en = 1;
1066  	popts->wrlvl_override = 0;
1067  #endif
1068  
1069  	/*
1070  	 * Check interleaving configuration from environment.
1071  	 * Please refer to doc/README.fsl-ddr for the detail.
1072  	 *
1073  	 * If memory controller interleaving is enabled, then the data
1074  	 * bus widths must be programmed identically for all memory controllers.
1075  	 *
1076  	 * Attempt to set all controllers to the same chip select
1077  	 * interleaving mode. It will do a best effort to get the
1078  	 * requested ranks interleaved together such that the result
1079  	 * should be a subset of the requested configuration.
1080  	 *
1081  	 * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
1082  	 * with 256 Byte is enabled.
1083  	 */
1084  #if (CONFIG_SYS_NUM_DDR_CTLRS > 1)
1085  	if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
1086  #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1087  		;
1088  #else
1089  		goto done;
1090  #endif
1091  	if (pdimm[0].n_ranks == 0) {
1092  		printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
1093  		popts->memctl_interleaving = 0;
1094  		goto done;
1095  	}
1096  	popts->memctl_interleaving = 1;
1097  #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1098  	popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
1099  	popts->memctl_interleaving = 1;
1100  	debug("256 Byte interleaving\n");
1101  #else
1102  	/*
1103  	 * test null first. if CONFIG_HWCONFIG is not defined
1104  	 * hwconfig_arg_cmp returns non-zero
1105  	 */
1106  	if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
1107  				    "null", buf)) {
1108  		popts->memctl_interleaving = 0;
1109  		debug("memory controller interleaving disabled.\n");
1110  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1111  					"ctlr_intlv",
1112  					"cacheline", buf)) {
1113  		popts->memctl_interleaving_mode =
1114  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1115  			0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
1116  		popts->memctl_interleaving =
1117  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1118  			0 : 1;
1119  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1120  					"ctlr_intlv",
1121  					"page", buf)) {
1122  		popts->memctl_interleaving_mode =
1123  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1124  			0 : FSL_DDR_PAGE_INTERLEAVING;
1125  		popts->memctl_interleaving =
1126  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1127  			0 : 1;
1128  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1129  					"ctlr_intlv",
1130  					"bank", buf)) {
1131  		popts->memctl_interleaving_mode =
1132  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1133  			0 : FSL_DDR_BANK_INTERLEAVING;
1134  		popts->memctl_interleaving =
1135  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1136  			0 : 1;
1137  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1138  					"ctlr_intlv",
1139  					"superbank", buf)) {
1140  		popts->memctl_interleaving_mode =
1141  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1142  			0 : FSL_DDR_SUPERBANK_INTERLEAVING;
1143  		popts->memctl_interleaving =
1144  			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1145  			0 : 1;
1146  #if (CONFIG_SYS_NUM_DDR_CTLRS == 3)
1147  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1148  					"ctlr_intlv",
1149  					"3way_1KB", buf)) {
1150  		popts->memctl_interleaving_mode =
1151  			FSL_DDR_3WAY_1KB_INTERLEAVING;
1152  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1153  					"ctlr_intlv",
1154  					"3way_4KB", buf)) {
1155  		popts->memctl_interleaving_mode =
1156  			FSL_DDR_3WAY_4KB_INTERLEAVING;
1157  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1158  					"ctlr_intlv",
1159  					"3way_8KB", buf)) {
1160  		popts->memctl_interleaving_mode =
1161  			FSL_DDR_3WAY_8KB_INTERLEAVING;
1162  #elif (CONFIG_SYS_NUM_DDR_CTLRS == 4)
1163  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1164  					"ctlr_intlv",
1165  					"4way_1KB", buf)) {
1166  		popts->memctl_interleaving_mode =
1167  			FSL_DDR_4WAY_1KB_INTERLEAVING;
1168  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1169  					"ctlr_intlv",
1170  					"4way_4KB", buf)) {
1171  		popts->memctl_interleaving_mode =
1172  			FSL_DDR_4WAY_4KB_INTERLEAVING;
1173  	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1174  					"ctlr_intlv",
1175  					"4way_8KB", buf)) {
1176  		popts->memctl_interleaving_mode =
1177  			FSL_DDR_4WAY_8KB_INTERLEAVING;
1178  #endif
1179  	} else {
1180  		popts->memctl_interleaving = 0;
1181  		printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
1182  	}
1183  #endif	/* CONFIG_SYS_FSL_DDR_INTLV_256B */
1184  done:
1185  #endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */
1186  	if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
1187  		(CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
1188  		/* test null first. if CONFIG_HWCONFIG is not defined,
1189  		 * hwconfig_subarg_cmp_f returns non-zero */
1190  		if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1191  					    "null", buf))
1192  			debug("bank interleaving disabled.\n");
1193  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1194  						 "cs0_cs1", buf))
1195  			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
1196  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1197  						 "cs2_cs3", buf))
1198  			popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
1199  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1200  						 "cs0_cs1_and_cs2_cs3", buf))
1201  			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
1202  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1203  						 "cs0_cs1_cs2_cs3", buf))
1204  			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
1205  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1206  						"auto", buf))
1207  			popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
1208  		else
1209  			printf("hwconfig has unrecognized parameter for bank_intlv.\n");
1210  		switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
1211  		case FSL_DDR_CS0_CS1_CS2_CS3:
1212  #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1213  			if (pdimm[0].n_ranks < 4) {
1214  				popts->ba_intlv_ctl = 0;
1215  				printf("Not enough bank(chip-select) for "
1216  					"CS0+CS1+CS2+CS3 on controller %d, "
1217  					"interleaving disabled!\n", ctrl_num);
1218  			}
1219  #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1220  #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
1221  			if (pdimm[0].n_ranks == 4)
1222  				break;
1223  #endif
1224  			if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
1225  				popts->ba_intlv_ctl = 0;
1226  				printf("Not enough bank(chip-select) for "
1227  					"CS0+CS1+CS2+CS3 on controller %d, "
1228  					"interleaving disabled!\n", ctrl_num);
1229  			}
1230  			if (pdimm[0].capacity != pdimm[1].capacity) {
1231  				popts->ba_intlv_ctl = 0;
1232  				printf("Not identical DIMM size for "
1233  					"CS0+CS1+CS2+CS3 on controller %d, "
1234  					"interleaving disabled!\n", ctrl_num);
1235  			}
1236  #endif
1237  			break;
1238  		case FSL_DDR_CS0_CS1:
1239  			if (pdimm[0].n_ranks < 2) {
1240  				popts->ba_intlv_ctl = 0;
1241  				printf("Not enough bank(chip-select) for "
1242  					"CS0+CS1 on controller %d, "
1243  					"interleaving disabled!\n", ctrl_num);
1244  			}
1245  			break;
1246  		case FSL_DDR_CS2_CS3:
1247  #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1248  			if (pdimm[0].n_ranks < 4) {
1249  				popts->ba_intlv_ctl = 0;
1250  				printf("Not enough bank(chip-select) for CS2+CS3 "
1251  					"on controller %d, interleaving disabled!\n", ctrl_num);
1252  			}
1253  #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1254  			if (pdimm[1].n_ranks < 2) {
1255  				popts->ba_intlv_ctl = 0;
1256  				printf("Not enough bank(chip-select) for CS2+CS3 "
1257  					"on controller %d, interleaving disabled!\n", ctrl_num);
1258  			}
1259  #endif
1260  			break;
1261  		case FSL_DDR_CS0_CS1_AND_CS2_CS3:
1262  #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1263  			if (pdimm[0].n_ranks < 4) {
1264  				popts->ba_intlv_ctl = 0;
1265  				printf("Not enough bank(CS) for CS0+CS1 and "
1266  					"CS2+CS3 on controller %d, "
1267  					"interleaving disabled!\n", ctrl_num);
1268  			}
1269  #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1270  			if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
1271  				popts->ba_intlv_ctl = 0;
1272  				printf("Not enough bank(CS) for CS0+CS1 and "
1273  					"CS2+CS3 on controller %d, "
1274  					"interleaving disabled!\n", ctrl_num);
1275  			}
1276  #endif
1277  			break;
1278  		default:
1279  			popts->ba_intlv_ctl = 0;
1280  			break;
1281  		}
1282  	}
1283  
1284  	if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
1285  		if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
1286  			popts->addr_hash = 0;
1287  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
1288  					       "true", buf))
1289  			popts->addr_hash = 1;
1290  	}
1291  
1292  	if (pdimm[0].n_ranks == 4)
1293  		popts->quad_rank_present = 1;
1294  
1295  	ddr_freq = get_ddr_freq(ctrl_num) / 1000000;
1296  	if (popts->registered_dimm_en) {
1297  		popts->rcw_override = 1;
1298  		popts->rcw_1 = 0x000a5a00;
1299  		if (ddr_freq <= 800)
1300  			popts->rcw_2 = 0x00000000;
1301  		else if (ddr_freq <= 1066)
1302  			popts->rcw_2 = 0x00100000;
1303  		else if (ddr_freq <= 1333)
1304  			popts->rcw_2 = 0x00200000;
1305  		else
1306  			popts->rcw_2 = 0x00300000;
1307  	}
1308  
1309  	fsl_ddr_board_options(popts, pdimm, ctrl_num);
1310  
1311  	return 0;
1312  }
1313  
1314  void check_interleaving_options(fsl_ddr_info_t *pinfo)
1315  {
1316  	int i, j, k, check_n_ranks, intlv_invalid = 0;
1317  	unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
1318  	unsigned long long check_rank_density;
1319  	struct dimm_params_s *dimm;
1320  	int first_ctrl = pinfo->first_ctrl;
1321  	int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
1322  
1323  	/*
1324  	 * Check if all controllers are configured for memory
1325  	 * controller interleaving. Identical dimms are recommended. At least
1326  	 * the size, row and col address should be checked.
1327  	 */
1328  	j = 0;
1329  	check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks;
1330  	check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density;
1331  	check_n_row_addr =  pinfo->dimm_params[first_ctrl][0].n_row_addr;
1332  	check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr;
1333  	check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode;
1334  	for (i = first_ctrl; i <= last_ctrl; i++) {
1335  		dimm = &pinfo->dimm_params[i][0];
1336  		if (!pinfo->memctl_opts[i].memctl_interleaving) {
1337  			continue;
1338  		} else if (((check_rank_density != dimm->rank_density) ||
1339  		     (check_n_ranks != dimm->n_ranks) ||
1340  		     (check_n_row_addr != dimm->n_row_addr) ||
1341  		     (check_n_col_addr != dimm->n_col_addr) ||
1342  		     (check_intlv !=
1343  			pinfo->memctl_opts[i].memctl_interleaving_mode))){
1344  			intlv_invalid = 1;
1345  			break;
1346  		} else {
1347  			j++;
1348  		}
1349  
1350  	}
1351  	if (intlv_invalid) {
1352  		for (i = first_ctrl; i <= last_ctrl; i++)
1353  			pinfo->memctl_opts[i].memctl_interleaving = 0;
1354  		printf("Not all DIMMs are identical. "
1355  			"Memory controller interleaving disabled.\n");
1356  	} else {
1357  		switch (check_intlv) {
1358  		case FSL_DDR_256B_INTERLEAVING:
1359  		case FSL_DDR_CACHE_LINE_INTERLEAVING:
1360  		case FSL_DDR_PAGE_INTERLEAVING:
1361  		case FSL_DDR_BANK_INTERLEAVING:
1362  		case FSL_DDR_SUPERBANK_INTERLEAVING:
1363  #if (3 == CONFIG_SYS_NUM_DDR_CTLRS)
1364  				k = 2;
1365  #else
1366  				k = CONFIG_SYS_NUM_DDR_CTLRS;
1367  #endif
1368  			break;
1369  		case FSL_DDR_3WAY_1KB_INTERLEAVING:
1370  		case FSL_DDR_3WAY_4KB_INTERLEAVING:
1371  		case FSL_DDR_3WAY_8KB_INTERLEAVING:
1372  		case FSL_DDR_4WAY_1KB_INTERLEAVING:
1373  		case FSL_DDR_4WAY_4KB_INTERLEAVING:
1374  		case FSL_DDR_4WAY_8KB_INTERLEAVING:
1375  		default:
1376  			k = CONFIG_SYS_NUM_DDR_CTLRS;
1377  			break;
1378  		}
1379  		debug("%d of %d controllers are interleaving.\n", j, k);
1380  		if (j && (j != k)) {
1381  			for (i = first_ctrl; i <= last_ctrl; i++)
1382  				pinfo->memctl_opts[i].memctl_interleaving = 0;
1383  			if ((last_ctrl - first_ctrl) > 1)
1384  				puts("Not all controllers have compatible interleaving mode. All disabled.\n");
1385  		}
1386  	}
1387  	debug("Checking interleaving options completed\n");
1388  }
1389  
1390  int fsl_use_spd(void)
1391  {
1392  	int use_spd = 0;
1393  
1394  #ifdef CONFIG_DDR_SPD
1395  	char buffer[HWCONFIG_BUFFER_SIZE];
1396  	char *buf = NULL;
1397  
1398  	/*
1399  	 * Extract hwconfig from environment since we have not properly setup
1400  	 * the environment but need it for ddr config params
1401  	 */
1402  	if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
1403  		buf = buffer;
1404  
1405  	/* if hwconfig is not enabled, or "sdram" is not defined, use spd */
1406  	if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
1407  		if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
1408  			use_spd = 1;
1409  		else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
1410  					       "fixed", buf))
1411  			use_spd = 0;
1412  		else
1413  			use_spd = 1;
1414  	} else
1415  		use_spd = 1;
1416  #endif
1417  
1418  	return use_spd;
1419  }
1420