xref: /openbmc/u-boot/arch/x86/cpu/quark/smc.c (revision d9b23e26)
1 /*
2  * Copyright (C) 2013, Intel Corporation
3  * Copyright (C) 2015, Bin Meng <bmeng.cn@gmail.com>
4  *
5  * Ported from Intel released Quark UEFI BIOS
6  * QuarkSocPkg/QuarkNorthCluster/MemoryInit/Pei
7  *
8  * SPDX-License-Identifier:	Intel
9  */
10 
11 #include <common.h>
12 #include <pci.h>
13 #include <asm/arch/device.h>
14 #include <asm/arch/mrc.h>
15 #include <asm/arch/msg_port.h>
16 #include "mrc_util.h"
17 #include "hte.h"
18 #include "smc.h"
19 
20 /* t_rfc values (in picoseconds) per density */
21 static const uint32_t t_rfc[5] = {
22 	90000,	/* 512Mb */
23 	110000,	/* 1Gb */
24 	160000,	/* 2Gb */
25 	300000,	/* 4Gb */
26 	350000,	/* 8Gb */
27 };
28 
29 /* t_ck clock period in picoseconds per speed index 800, 1066, 1333 */
30 static const uint32_t t_ck[3] = {
31 	2500,
32 	1875,
33 	1500
34 };
35 
36 /* Global variables */
37 static const uint16_t ddr_wclk[] = {193, 158};
38 static const uint16_t ddr_wctl[] = {1, 217};
39 static const uint16_t ddr_wcmd[] = {1, 220};
40 
41 #ifdef BACKUP_RCVN
42 static const uint16_t ddr_rcvn[] = {129, 498};
43 #endif
44 
45 #ifdef BACKUP_WDQS
46 static const uint16_t ddr_wdqs[] = {65, 289};
47 #endif
48 
49 #ifdef BACKUP_RDQS
50 static const uint8_t ddr_rdqs[] = {32, 24};
51 #endif
52 
53 #ifdef BACKUP_WDQ
54 static const uint16_t ddr_wdq[] = {32, 257};
55 #endif
56 
57 /* Stop self refresh driven by MCU */
58 void clear_self_refresh(struct mrc_params *mrc_params)
59 {
60 	ENTERFN();
61 
62 	/* clear the PMSTS Channel Self Refresh bits */
63 	mrc_write_mask(MEM_CTLR, PMSTS, PMSTS_DISR, PMSTS_DISR);
64 
65 	LEAVEFN();
66 }
67 
68 /* It will initialize timing registers in the MCU (DTR0..DTR4) */
69 void prog_ddr_timing_control(struct mrc_params *mrc_params)
70 {
71 	uint8_t tcl, wl;
72 	uint8_t trp, trcd, tras, twr, twtr, trrd, trtp, tfaw;
73 	uint32_t tck;
74 	u32 dtr0, dtr1, dtr2, dtr3, dtr4;
75 	u32 tmp1, tmp2;
76 
77 	ENTERFN();
78 
79 	/* mcu_init starts */
80 	mrc_post_code(0x02, 0x00);
81 
82 	dtr0 = msg_port_read(MEM_CTLR, DTR0);
83 	dtr1 = msg_port_read(MEM_CTLR, DTR1);
84 	dtr2 = msg_port_read(MEM_CTLR, DTR2);
85 	dtr3 = msg_port_read(MEM_CTLR, DTR3);
86 	dtr4 = msg_port_read(MEM_CTLR, DTR4);
87 
88 	tck = t_ck[mrc_params->ddr_speed];	/* Clock in picoseconds */
89 	tcl = mrc_params->params.cl;		/* CAS latency in clocks */
90 	trp = tcl;	/* Per CAT MRC */
91 	trcd = tcl;	/* Per CAT MRC */
92 	tras = MCEIL(mrc_params->params.ras, tck);
93 
94 	/* Per JEDEC: tWR=15000ps DDR2/3 from 800-1600 */
95 	twr = MCEIL(15000, tck);
96 
97 	twtr = MCEIL(mrc_params->params.wtr, tck);
98 	trrd = MCEIL(mrc_params->params.rrd, tck);
99 	trtp = 4;	/* Valid for 800 and 1066, use 5 for 1333 */
100 	tfaw = MCEIL(mrc_params->params.faw, tck);
101 
102 	wl = 5 + mrc_params->ddr_speed;
103 
104 	dtr0 &= ~DTR0_DFREQ_MASK;
105 	dtr0 |= mrc_params->ddr_speed;
106 	dtr0 &= ~DTR0_TCL_MASK;
107 	tmp1 = tcl - 5;
108 	dtr0 |= ((tcl - 5) << 12);
109 	dtr0 &= ~DTR0_TRP_MASK;
110 	dtr0 |= ((trp - 5) << 4);	/* 5 bit DRAM Clock */
111 	dtr0 &= ~DTR0_TRCD_MASK;
112 	dtr0 |= ((trcd - 5) << 8);	/* 5 bit DRAM Clock */
113 
114 	dtr1 &= ~DTR1_TWCL_MASK;
115 	tmp2 = wl - 3;
116 	dtr1 |= (wl - 3);
117 	dtr1 &= ~DTR1_TWTP_MASK;
118 	dtr1 |= ((wl + 4 + twr - 14) << 8);	/* Change to tWTP */
119 	dtr1 &= ~DTR1_TRTP_MASK;
120 	dtr1 |= ((MMAX(trtp, 4) - 3) << 28);	/* 4 bit DRAM Clock */
121 	dtr1 &= ~DTR1_TRRD_MASK;
122 	dtr1 |= ((trrd - 4) << 24);		/* 4 bit DRAM Clock */
123 	dtr1 &= ~DTR1_TCMD_MASK;
124 	dtr1 |= (1 << 4);
125 	dtr1 &= ~DTR1_TRAS_MASK;
126 	dtr1 |= ((tras - 14) << 20);		/* 6 bit DRAM Clock */
127 	dtr1 &= ~DTR1_TFAW_MASK;
128 	dtr1 |= ((((tfaw + 1) >> 1) - 5) << 16);/* 4 bit DRAM Clock */
129 	/* Set 4 Clock CAS to CAS delay (multi-burst) */
130 	dtr1 &= ~DTR1_TCCD_MASK;
131 
132 	dtr2 &= ~DTR2_TRRDR_MASK;
133 	dtr2 |= 1;
134 	dtr2 &= ~DTR2_TWWDR_MASK;
135 	dtr2 |= (2 << 8);
136 	dtr2 &= ~DTR2_TRWDR_MASK;
137 	dtr2 |= (2 << 16);
138 
139 	dtr3 &= ~DTR3_TWRDR_MASK;
140 	dtr3 |= 2;
141 	dtr3 &= ~DTR3_TXXXX_MASK;
142 	dtr3 |= (2 << 4);
143 
144 	dtr3 &= ~DTR3_TRWSR_MASK;
145 	if (mrc_params->ddr_speed == DDRFREQ_800) {
146 		/* Extended RW delay (+1) */
147 		dtr3 |= ((tcl - 5 + 1) << 8);
148 	} else if (mrc_params->ddr_speed == DDRFREQ_1066) {
149 		/* Extended RW delay (+1) */
150 		dtr3 |= ((tcl - 5 + 1) << 8);
151 	}
152 
153 	dtr3 &= ~DTR3_TWRSR_MASK;
154 	dtr3 |= ((4 + wl + twtr - 11) << 13);
155 
156 	dtr3 &= ~DTR3_TXP_MASK;
157 	if (mrc_params->ddr_speed == DDRFREQ_800)
158 		dtr3 |= ((MMAX(0, 1 - 1)) << 22);
159 	else
160 		dtr3 |= ((MMAX(0, 2 - 1)) << 22);
161 
162 	dtr4 &= ~DTR4_WRODTSTRT_MASK;
163 	dtr4 |= 1;
164 	dtr4 &= ~DTR4_WRODTSTOP_MASK;
165 	dtr4 |= (1 << 4);
166 	dtr4 &= ~DTR4_XXXX1_MASK;
167 	dtr4 |= ((1 + tmp1 - tmp2 + 2) << 8);
168 	dtr4 &= ~DTR4_XXXX2_MASK;
169 	dtr4 |= ((1 + tmp1 - tmp2 + 2) << 12);
170 	dtr4 &= ~(DTR4_ODTDIS | DTR4_TRGSTRDIS);
171 
172 	msg_port_write(MEM_CTLR, DTR0, dtr0);
173 	msg_port_write(MEM_CTLR, DTR1, dtr1);
174 	msg_port_write(MEM_CTLR, DTR2, dtr2);
175 	msg_port_write(MEM_CTLR, DTR3, dtr3);
176 	msg_port_write(MEM_CTLR, DTR4, dtr4);
177 
178 	LEAVEFN();
179 }
180 
181 /* Configure MCU before jedec init sequence */
182 void prog_decode_before_jedec(struct mrc_params *mrc_params)
183 {
184 	u32 drp;
185 	u32 drfc;
186 	u32 dcal;
187 	u32 dsch;
188 	u32 dpmc0;
189 
190 	ENTERFN();
191 
192 	/* Disable power saving features */
193 	dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
194 	dpmc0 |= (DPMC0_CLKGTDIS | DPMC0_DISPWRDN);
195 	dpmc0 &= ~DPMC0_PCLSTO_MASK;
196 	dpmc0 &= ~DPMC0_DYNSREN;
197 	msg_port_write(MEM_CTLR, DPMC0, dpmc0);
198 
199 	/* Disable out of order transactions */
200 	dsch = msg_port_read(MEM_CTLR, DSCH);
201 	dsch |= (DSCH_OOODIS | DSCH_NEWBYPDIS);
202 	msg_port_write(MEM_CTLR, DSCH, dsch);
203 
204 	/* Disable issuing the REF command */
205 	drfc = msg_port_read(MEM_CTLR, DRFC);
206 	drfc &= ~DRFC_TREFI_MASK;
207 	msg_port_write(MEM_CTLR, DRFC, drfc);
208 
209 	/* Disable ZQ calibration short */
210 	dcal = msg_port_read(MEM_CTLR, DCAL);
211 	dcal &= ~DCAL_ZQCINT_MASK;
212 	dcal &= ~DCAL_SRXZQCL_MASK;
213 	msg_port_write(MEM_CTLR, DCAL, dcal);
214 
215 	/*
216 	 * Training performed in address mode 0, rank population has limited
217 	 * impact, however simulator complains if enabled non-existing rank.
218 	 */
219 	drp = 0;
220 	if (mrc_params->rank_enables & 1)
221 		drp |= DRP_RKEN0;
222 	if (mrc_params->rank_enables & 2)
223 		drp |= DRP_RKEN1;
224 	msg_port_write(MEM_CTLR, DRP, drp);
225 
226 	LEAVEFN();
227 }
228 
229 /*
230  * After Cold Reset, BIOS should set COLDWAKE bit to 1 before
231  * sending the WAKE message to the Dunit.
232  *
233  * For Standby Exit, or any other mode in which the DRAM is in
234  * SR, this bit must be set to 0.
235  */
236 void perform_ddr_reset(struct mrc_params *mrc_params)
237 {
238 	ENTERFN();
239 
240 	/* Set COLDWAKE bit before sending the WAKE message */
241 	mrc_write_mask(MEM_CTLR, DRMC, DRMC_COLDWAKE, DRMC_COLDWAKE);
242 
243 	/* Send wake command to DUNIT (MUST be done before JEDEC) */
244 	dram_wake_command();
245 
246 	/* Set default value */
247 	msg_port_write(MEM_CTLR, DRMC,
248 		       mrc_params->rd_odt_value == 0 ? DRMC_ODTMODE : 0);
249 
250 	LEAVEFN();
251 }
252 
253 
254 /*
255  * This function performs some initialization on the DDRIO unit.
256  * This function is dependent on BOARD_ID, DDR_SPEED, and CHANNEL_ENABLES.
257  */
258 void ddrphy_init(struct mrc_params *mrc_params)
259 {
260 	uint32_t temp;
261 	uint8_t ch;	/* channel counter */
262 	uint8_t rk;	/* rank counter */
263 	uint8_t bl_grp;	/*  byte lane group counter (2 BLs per module) */
264 	uint8_t bl_divisor = 1;	/* byte lane divisor */
265 	/* For DDR3 --> 0 == 800, 1 == 1066, 2 == 1333 */
266 	uint8_t speed = mrc_params->ddr_speed & 3;
267 	uint8_t cas;
268 	uint8_t cwl;
269 
270 	ENTERFN();
271 
272 	cas = mrc_params->params.cl;
273 	cwl = 5 + mrc_params->ddr_speed;
274 
275 	/* ddrphy_init starts */
276 	mrc_post_code(0x03, 0x00);
277 
278 	/*
279 	 * HSD#231531
280 	 * Make sure IOBUFACT is deasserted before initializing the DDR PHY
281 	 *
282 	 * HSD#234845
283 	 * Make sure WRPTRENABLE is deasserted before initializing the DDR PHY
284 	 */
285 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
286 		if (mrc_params->channel_enables & (1 << ch)) {
287 			/* Deassert DDRPHY Initialization Complete */
288 			mrc_alt_write_mask(DDRPHY,
289 				CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
290 				~(1 << 20), 1 << 20);	/* SPID_INIT_COMPLETE=0 */
291 			/* Deassert IOBUFACT */
292 			mrc_alt_write_mask(DDRPHY,
293 				CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
294 				~(1 << 2), 1 << 2);	/* IOBUFACTRST_N=0 */
295 			/* Disable WRPTR */
296 			mrc_alt_write_mask(DDRPHY,
297 				CMDPTRREG + ch * DDRIOCCC_CH_OFFSET,
298 				~(1 << 0), 1 << 0);	/* WRPTRENABLE=0 */
299 		}
300 	}
301 
302 	/* Put PHY in reset */
303 	mrc_alt_write_mask(DDRPHY, MASTERRSTN, 0, 1);
304 
305 	/* Initialize DQ01, DQ23, CMD, CLK-CTL, COMP modules */
306 
307 	/* STEP0 */
308 	mrc_post_code(0x03, 0x10);
309 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
310 		if (mrc_params->channel_enables & (1 << ch)) {
311 			/* DQ01-DQ23 */
312 			for (bl_grp = 0;
313 			     bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
314 			     bl_grp++) {
315 				/* Analog MUX select - IO2xCLKSEL */
316 				mrc_alt_write_mask(DDRPHY,
317 					DQOBSCKEBBCTL +
318 					bl_grp * DDRIODQ_BL_OFFSET +
319 					ch * DDRIODQ_CH_OFFSET,
320 					bl_grp ? 0 : (1 << 22), 1 << 22);
321 
322 				/* ODT Strength */
323 				switch (mrc_params->rd_odt_value) {
324 				case 1:
325 					temp = 0x3;
326 					break;	/* 60 ohm */
327 				case 2:
328 					temp = 0x3;
329 					break;	/* 120 ohm */
330 				case 3:
331 					temp = 0x3;
332 					break;	/* 180 ohm */
333 				default:
334 					temp = 0x3;
335 					break;	/* 120 ohm */
336 				}
337 
338 				/* ODT strength */
339 				mrc_alt_write_mask(DDRPHY,
340 					B0RXIOBUFCTL +
341 					bl_grp * DDRIODQ_BL_OFFSET +
342 					ch * DDRIODQ_CH_OFFSET,
343 					temp << 5, 0x60);
344 				/* ODT strength */
345 				mrc_alt_write_mask(DDRPHY,
346 					B1RXIOBUFCTL +
347 					bl_grp * DDRIODQ_BL_OFFSET +
348 					ch * DDRIODQ_CH_OFFSET,
349 					temp << 5, 0x60);
350 
351 				/* Dynamic ODT/DIFFAMP */
352 				temp = (cas << 24) | (cas << 16) |
353 					(cas << 8) | (cas << 0);
354 				switch (speed) {
355 				case 0:
356 					temp -= 0x01010101;
357 					break;	/* 800 */
358 				case 1:
359 					temp -= 0x02020202;
360 					break;	/* 1066 */
361 				case 2:
362 					temp -= 0x03030303;
363 					break;	/* 1333 */
364 				case 3:
365 					temp -= 0x04040404;
366 					break;	/* 1600 */
367 				}
368 
369 				/* Launch Time: ODT, DIFFAMP, ODT, DIFFAMP */
370 				mrc_alt_write_mask(DDRPHY,
371 					B01LATCTL1 +
372 					bl_grp * DDRIODQ_BL_OFFSET +
373 					ch * DDRIODQ_CH_OFFSET,
374 					temp, 0x1f1f1f1f);
375 				switch (speed) {
376 				/* HSD#234715 */
377 				case 0:
378 					temp = (0x06 << 16) | (0x07 << 8);
379 					break;	/* 800 */
380 				case 1:
381 					temp = (0x07 << 16) | (0x08 << 8);
382 					break;	/* 1066 */
383 				case 2:
384 					temp = (0x09 << 16) | (0x0a << 8);
385 					break;	/* 1333 */
386 				case 3:
387 					temp = (0x0a << 16) | (0x0b << 8);
388 					break;	/* 1600 */
389 				}
390 
391 				/* On Duration: ODT, DIFFAMP */
392 				mrc_alt_write_mask(DDRPHY,
393 					B0ONDURCTL +
394 					bl_grp * DDRIODQ_BL_OFFSET +
395 					ch * DDRIODQ_CH_OFFSET,
396 					temp, 0x003f3f00);
397 				/* On Duration: ODT, DIFFAMP */
398 				mrc_alt_write_mask(DDRPHY,
399 					B1ONDURCTL +
400 					bl_grp * DDRIODQ_BL_OFFSET +
401 					ch * DDRIODQ_CH_OFFSET,
402 					temp, 0x003f3f00);
403 
404 				switch (mrc_params->rd_odt_value) {
405 				case 0:
406 					/* override DIFFAMP=on, ODT=off */
407 					temp = (0x3f << 16) | (0x3f << 10);
408 					break;
409 				default:
410 					/* override DIFFAMP=on, ODT=on */
411 					temp = (0x3f << 16) | (0x2a << 10);
412 					break;
413 				}
414 
415 				/* Override: DIFFAMP, ODT */
416 				mrc_alt_write_mask(DDRPHY,
417 					B0OVRCTL +
418 					bl_grp * DDRIODQ_BL_OFFSET +
419 					ch * DDRIODQ_CH_OFFSET,
420 					temp, 0x003ffc00);
421 				/* Override: DIFFAMP, ODT */
422 				mrc_alt_write_mask(DDRPHY,
423 					B1OVRCTL +
424 					bl_grp * DDRIODQ_BL_OFFSET +
425 					ch * DDRIODQ_CH_OFFSET,
426 					temp, 0x003ffc00);
427 
428 				/* DLL Setup */
429 
430 				/* 1xCLK Domain Timings: tEDP,RCVEN,WDQS (PO) */
431 				mrc_alt_write_mask(DDRPHY,
432 					B0LATCTL0 +
433 					bl_grp * DDRIODQ_BL_OFFSET +
434 					ch * DDRIODQ_CH_OFFSET,
435 					((cas + 7) << 16) | ((cas - 4) << 8) |
436 					((cwl - 2) << 0), 0x003f1f1f);
437 				mrc_alt_write_mask(DDRPHY,
438 					B1LATCTL0 +
439 					bl_grp * DDRIODQ_BL_OFFSET +
440 					ch * DDRIODQ_CH_OFFSET,
441 					((cas + 7) << 16) | ((cas - 4) << 8) |
442 					((cwl - 2) << 0), 0x003f1f1f);
443 
444 				/* RCVEN Bypass (PO) */
445 				mrc_alt_write_mask(DDRPHY,
446 					B0RXIOBUFCTL +
447 					bl_grp * DDRIODQ_BL_OFFSET +
448 					ch * DDRIODQ_CH_OFFSET,
449 					0, 0x81);
450 				mrc_alt_write_mask(DDRPHY,
451 					B1RXIOBUFCTL +
452 					bl_grp * DDRIODQ_BL_OFFSET +
453 					ch * DDRIODQ_CH_OFFSET,
454 					0, 0x81);
455 
456 				/* TX */
457 				mrc_alt_write_mask(DDRPHY,
458 					DQCTL +
459 					bl_grp * DDRIODQ_BL_OFFSET +
460 					ch * DDRIODQ_CH_OFFSET,
461 					1 << 16, 1 << 16);
462 				mrc_alt_write_mask(DDRPHY,
463 					B01PTRCTL1 +
464 					bl_grp * DDRIODQ_BL_OFFSET +
465 					ch * DDRIODQ_CH_OFFSET,
466 					1 << 8, 1 << 8);
467 
468 				/* RX (PO) */
469 				/* Internal Vref Code, Enable#, Ext_or_Int (1=Ext) */
470 				mrc_alt_write_mask(DDRPHY,
471 					B0VREFCTL +
472 					bl_grp * DDRIODQ_BL_OFFSET +
473 					ch * DDRIODQ_CH_OFFSET,
474 					(0x03 << 2) | (0x0 << 1) | (0x0 << 0),
475 					0xff);
476 				/* Internal Vref Code, Enable#, Ext_or_Int (1=Ext) */
477 				mrc_alt_write_mask(DDRPHY,
478 					B1VREFCTL +
479 					bl_grp * DDRIODQ_BL_OFFSET +
480 					ch * DDRIODQ_CH_OFFSET,
481 					(0x03 << 2) | (0x0 << 1) | (0x0 << 0),
482 					0xff);
483 				/* Per-Bit De-Skew Enable */
484 				mrc_alt_write_mask(DDRPHY,
485 					B0RXIOBUFCTL +
486 					bl_grp * DDRIODQ_BL_OFFSET +
487 					ch * DDRIODQ_CH_OFFSET,
488 					0, 0x10);
489 				/* Per-Bit De-Skew Enable */
490 				mrc_alt_write_mask(DDRPHY,
491 					B1RXIOBUFCTL +
492 					bl_grp * DDRIODQ_BL_OFFSET +
493 					ch * DDRIODQ_CH_OFFSET,
494 					0, 0x10);
495 			}
496 
497 			/* CLKEBB */
498 			mrc_alt_write_mask(DDRPHY,
499 				CMDOBSCKEBBCTL + ch * DDRIOCCC_CH_OFFSET,
500 				0, 1 << 23);
501 
502 			/* Enable tristate control of cmd/address bus */
503 			mrc_alt_write_mask(DDRPHY,
504 				CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
505 				0, 0x03);
506 
507 			/* ODT RCOMP */
508 			mrc_alt_write_mask(DDRPHY,
509 				CMDRCOMPODT + ch * DDRIOCCC_CH_OFFSET,
510 				(0x03 << 5) | (0x03 << 0), 0x3ff);
511 
512 			/* CMDPM* registers must be programmed in this order */
513 
514 			/* Turn On Delays: SFR (regulator), MPLL */
515 			mrc_alt_write_mask(DDRPHY,
516 				CMDPMDLYREG4 + ch * DDRIOCCC_CH_OFFSET,
517 				0xffffffff, 0xffffffff);
518 			/*
519 			 * Delays: ASSERT_IOBUFACT_to_ALLON0_for_PM_MSG_3,
520 			 * VREG (MDLL) Turn On, ALLON0_to_DEASSERT_IOBUFACT
521 			 * for_PM_MSG_gt0, MDLL Turn On
522 			 */
523 			mrc_alt_write_mask(DDRPHY,
524 				CMDPMDLYREG3 + ch * DDRIOCCC_CH_OFFSET,
525 				0xfffff616, 0xffffffff);
526 			/* MPLL Divider Reset Delays */
527 			mrc_alt_write_mask(DDRPHY,
528 				CMDPMDLYREG2 + ch * DDRIOCCC_CH_OFFSET,
529 				0xffffffff, 0xffffffff);
530 			/* Turn Off Delays: VREG, Staggered MDLL, MDLL, PI */
531 			mrc_alt_write_mask(DDRPHY,
532 				CMDPMDLYREG1 + ch * DDRIOCCC_CH_OFFSET,
533 				0xffffffff, 0xffffffff);
534 			/* Turn On Delays: MPLL, Staggered MDLL, PI, IOBUFACT */
535 			mrc_alt_write_mask(DDRPHY,
536 				CMDPMDLYREG0 + ch * DDRIOCCC_CH_OFFSET,
537 				0xffffffff, 0xffffffff);
538 			/* Allow PUnit signals */
539 			mrc_alt_write_mask(DDRPHY,
540 				CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
541 				(0x6 << 8) | (0x1 << 6) | (0x4 << 0),
542 				0xffe00f4f);
543 			/* DLL_VREG Bias Trim, VREF Tuning for DLL_VREG */
544 			mrc_alt_write_mask(DDRPHY,
545 				CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
546 				(0x3 << 4) | (0x7 << 0), 0x7f);
547 
548 			/* CLK-CTL */
549 			mrc_alt_write_mask(DDRPHY,
550 				CCOBSCKEBBCTL + ch * DDRIOCCC_CH_OFFSET,
551 				0, 1 << 24);	/* CLKEBB */
552 			/* Buffer Enable: CS,CKE,ODT,CLK */
553 			mrc_alt_write_mask(DDRPHY,
554 				CCCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
555 				0x1f, 0x000ffff1);
556 			/* ODT RCOMP */
557 			mrc_alt_write_mask(DDRPHY,
558 				CCRCOMPODT + ch * DDRIOCCC_CH_OFFSET,
559 				(0x03 << 8) | (0x03 << 0), 0x00001f1f);
560 			/* DLL_VREG Bias Trim, VREF Tuning for DLL_VREG */
561 			mrc_alt_write_mask(DDRPHY,
562 				CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
563 				(0x3 << 4) | (0x7 << 0), 0x7f);
564 
565 			/*
566 			 * COMP (RON channel specific)
567 			 * - DQ/DQS/DM RON: 32 Ohm
568 			 * - CTRL/CMD RON: 27 Ohm
569 			 * - CLK RON: 26 Ohm
570 			 */
571 			/* RCOMP Vref PU/PD */
572 			mrc_alt_write_mask(DDRPHY,
573 				DQVREFCH0 +  ch * DDRCOMP_CH_OFFSET,
574 				(0x08 << 24) | (0x03 << 16), 0x3f3f0000);
575 			/* RCOMP Vref PU/PD */
576 			mrc_alt_write_mask(DDRPHY,
577 				CMDVREFCH0 + ch * DDRCOMP_CH_OFFSET,
578 				(0x0C << 24) | (0x03 << 16), 0x3f3f0000);
579 			/* RCOMP Vref PU/PD */
580 			mrc_alt_write_mask(DDRPHY,
581 				CLKVREFCH0 + ch * DDRCOMP_CH_OFFSET,
582 				(0x0F << 24) | (0x03 << 16), 0x3f3f0000);
583 			/* RCOMP Vref PU/PD */
584 			mrc_alt_write_mask(DDRPHY,
585 				DQSVREFCH0 + ch * DDRCOMP_CH_OFFSET,
586 				(0x08 << 24) | (0x03 << 16), 0x3f3f0000);
587 			/* RCOMP Vref PU/PD */
588 			mrc_alt_write_mask(DDRPHY,
589 				CTLVREFCH0 + ch * DDRCOMP_CH_OFFSET,
590 				(0x0C << 24) | (0x03 << 16), 0x3f3f0000);
591 
592 			/* DQS Swapped Input Enable */
593 			mrc_alt_write_mask(DDRPHY,
594 				COMPEN1CH0 + ch * DDRCOMP_CH_OFFSET,
595 				(1 << 19) | (1 << 17), 0xc00ac000);
596 
597 			/* ODT VREF = 1.5 x 274/360+274 = 0.65V (code of ~50) */
598 			/* ODT Vref PU/PD */
599 			mrc_alt_write_mask(DDRPHY,
600 				DQVREFCH0 + ch * DDRCOMP_CH_OFFSET,
601 				(0x32 << 8) | (0x03 << 0), 0x00003f3f);
602 			/* ODT Vref PU/PD */
603 			mrc_alt_write_mask(DDRPHY,
604 				DQSVREFCH0 + ch * DDRCOMP_CH_OFFSET,
605 				(0x32 << 8) | (0x03 << 0), 0x00003f3f);
606 			/* ODT Vref PU/PD */
607 			mrc_alt_write_mask(DDRPHY,
608 				CLKVREFCH0 + ch * DDRCOMP_CH_OFFSET,
609 				(0x0E << 8) | (0x05 << 0), 0x00003f3f);
610 
611 			/*
612 			 * Slew rate settings are frequency specific,
613 			 * numbers below are for 800Mhz (speed == 0)
614 			 * - DQ/DQS/DM/CLK SR: 4V/ns,
615 			 * - CTRL/CMD SR: 1.5V/ns
616 			 */
617 			temp = (0x0e << 16) | (0x0e << 12) | (0x08 << 8) |
618 				(0x0b << 4) | (0x0b << 0);
619 			/* DCOMP Delay Select: CTL,CMD,CLK,DQS,DQ */
620 			mrc_alt_write_mask(DDRPHY,
621 				DLYSELCH0 + ch * DDRCOMP_CH_OFFSET,
622 				temp, 0x000fffff);
623 			/* TCO Vref CLK,DQS,DQ */
624 			mrc_alt_write_mask(DDRPHY,
625 				TCOVREFCH0 + ch * DDRCOMP_CH_OFFSET,
626 				(0x05 << 16) | (0x05 << 8) | (0x05 << 0),
627 				0x003f3f3f);
628 			/* ODTCOMP CMD/CTL PU/PD */
629 			mrc_alt_write_mask(DDRPHY,
630 				CCBUFODTCH0 + ch * DDRCOMP_CH_OFFSET,
631 				(0x03 << 8) | (0x03 << 0),
632 				0x00001f1f);
633 			/* COMP */
634 			mrc_alt_write_mask(DDRPHY,
635 				COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
636 				0, 0xc0000100);
637 
638 #ifdef BACKUP_COMPS
639 			/* DQ COMP Overrides */
640 			/* RCOMP PU */
641 			mrc_alt_write_mask(DDRPHY,
642 				DQDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
643 				(1 << 31) | (0x0a << 16),
644 				0x801f0000);
645 			/* RCOMP PD */
646 			mrc_alt_write_mask(DDRPHY,
647 				DQDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
648 				(1 << 31) | (0x0a << 16),
649 				0x801f0000);
650 			/* DCOMP PU */
651 			mrc_alt_write_mask(DDRPHY,
652 				DQDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
653 				(1 << 31) | (0x10 << 16),
654 				0x801f0000);
655 			/* DCOMP PD */
656 			mrc_alt_write_mask(DDRPHY,
657 				DQDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
658 				(1 << 31) | (0x10 << 16),
659 				0x801f0000);
660 			/* ODTCOMP PU */
661 			mrc_alt_write_mask(DDRPHY,
662 				DQODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
663 				(1 << 31) | (0x0b << 16),
664 				0x801f0000);
665 			/* ODTCOMP PD */
666 			mrc_alt_write_mask(DDRPHY,
667 				DQODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
668 				(1 << 31) | (0x0b << 16),
669 				0x801f0000);
670 			/* TCOCOMP PU */
671 			mrc_alt_write_mask(DDRPHY,
672 				DQTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
673 				1 << 31, 1 << 31);
674 			/* TCOCOMP PD */
675 			mrc_alt_write_mask(DDRPHY,
676 				DQTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
677 				1 << 31, 1 << 31);
678 
679 			/* DQS COMP Overrides */
680 			/* RCOMP PU */
681 			mrc_alt_write_mask(DDRPHY,
682 				DQSDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
683 				(1 << 31) | (0x0a << 16),
684 				0x801f0000);
685 			/* RCOMP PD */
686 			mrc_alt_write_mask(DDRPHY,
687 				DQSDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
688 				(1 << 31) | (0x0a << 16),
689 				0x801f0000);
690 			/* DCOMP PU */
691 			mrc_alt_write_mask(DDRPHY,
692 				DQSDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
693 				(1 << 31) | (0x10 << 16),
694 				0x801f0000);
695 			/* DCOMP PD */
696 			mrc_alt_write_mask(DDRPHY,
697 				DQSDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
698 				(1 << 31) | (0x10 << 16),
699 				0x801f0000);
700 			/* ODTCOMP PU */
701 			mrc_alt_write_mask(DDRPHY,
702 				DQSODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
703 				(1 << 31) | (0x0b << 16),
704 				0x801f0000);
705 			/* ODTCOMP PD */
706 			mrc_alt_write_mask(DDRPHY,
707 				DQSODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
708 				(1 << 31) | (0x0b << 16),
709 				0x801f0000);
710 			/* TCOCOMP PU */
711 			mrc_alt_write_mask(DDRPHY,
712 				DQSTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
713 				1 << 31, 1 << 31);
714 			/* TCOCOMP PD */
715 			mrc_alt_write_mask(DDRPHY,
716 				DQSTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
717 				1 << 31, 1 << 31);
718 
719 			/* CLK COMP Overrides */
720 			/* RCOMP PU */
721 			mrc_alt_write_mask(DDRPHY,
722 				CLKDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
723 				(1 << 31) | (0x0c << 16),
724 				0x801f0000);
725 			/* RCOMP PD */
726 			mrc_alt_write_mask(DDRPHY,
727 				CLKDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
728 				(1 << 31) | (0x0c << 16),
729 				0x801f0000);
730 			/* DCOMP PU */
731 			mrc_alt_write_mask(DDRPHY,
732 				CLKDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
733 				(1 << 31) | (0x07 << 16),
734 				0x801f0000);
735 			/* DCOMP PD */
736 			mrc_alt_write_mask(DDRPHY,
737 				CLKDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
738 				(1 << 31) | (0x07 << 16),
739 				0x801f0000);
740 			/* ODTCOMP PU */
741 			mrc_alt_write_mask(DDRPHY,
742 				CLKODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
743 				(1 << 31) | (0x0b << 16),
744 				0x801f0000);
745 			/* ODTCOMP PD */
746 			mrc_alt_write_mask(DDRPHY,
747 				CLKODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
748 				(1 << 31) | (0x0b << 16),
749 				0x801f0000);
750 			/* TCOCOMP PU */
751 			mrc_alt_write_mask(DDRPHY,
752 				CLKTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
753 				1 << 31, 1 << 31);
754 			/* TCOCOMP PD */
755 			mrc_alt_write_mask(DDRPHY,
756 				CLKTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
757 				1 << 31, 1 << 31);
758 
759 			/* CMD COMP Overrides */
760 			/* RCOMP PU */
761 			mrc_alt_write_mask(DDRPHY,
762 				CMDDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
763 				(1 << 31) | (0x0d << 16),
764 				0x803f0000);
765 			/* RCOMP PD */
766 			mrc_alt_write_mask(DDRPHY,
767 				CMDDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
768 				(1 << 31) | (0x0d << 16),
769 				0x803f0000);
770 			/* DCOMP PU */
771 			mrc_alt_write_mask(DDRPHY,
772 				CMDDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
773 				(1 << 31) | (0x0a << 16),
774 				0x801f0000);
775 			/* DCOMP PD */
776 			mrc_alt_write_mask(DDRPHY,
777 				CMDDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
778 				(1 << 31) | (0x0a << 16),
779 				0x801f0000);
780 
781 			/* CTL COMP Overrides */
782 			/* RCOMP PU */
783 			mrc_alt_write_mask(DDRPHY,
784 				CTLDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
785 				(1 << 31) | (0x0d << 16),
786 				0x803f0000);
787 			/* RCOMP PD */
788 			mrc_alt_write_mask(DDRPHY,
789 				CTLDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
790 				(1 << 31) | (0x0d << 16),
791 				0x803f0000);
792 			/* DCOMP PU */
793 			mrc_alt_write_mask(DDRPHY,
794 				CTLDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
795 				(1 << 31) | (0x0a << 16),
796 				0x801f0000);
797 			/* DCOMP PD */
798 			mrc_alt_write_mask(DDRPHY,
799 				CTLDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
800 				(1 << 31) | (0x0a << 16),
801 				0x801f0000);
802 #else
803 			/* DQ TCOCOMP Overrides */
804 			/* TCOCOMP PU */
805 			mrc_alt_write_mask(DDRPHY,
806 				DQTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
807 				(1 << 31) | (0x1f << 16),
808 				0x801f0000);
809 			/* TCOCOMP PD */
810 			mrc_alt_write_mask(DDRPHY,
811 				DQTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
812 				(1 << 31) | (0x1f << 16),
813 				0x801f0000);
814 
815 			/* DQS TCOCOMP Overrides */
816 			/* TCOCOMP PU */
817 			mrc_alt_write_mask(DDRPHY,
818 				DQSTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
819 				(1 << 31) | (0x1f << 16),
820 				0x801f0000);
821 			/* TCOCOMP PD */
822 			mrc_alt_write_mask(DDRPHY,
823 				DQSTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
824 				(1 << 31) | (0x1f << 16),
825 				0x801f0000);
826 
827 			/* CLK TCOCOMP Overrides */
828 			/* TCOCOMP PU */
829 			mrc_alt_write_mask(DDRPHY,
830 				CLKTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
831 				(1 << 31) | (0x1f << 16),
832 				0x801f0000);
833 			/* TCOCOMP PD */
834 			mrc_alt_write_mask(DDRPHY,
835 				CLKTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
836 				(1 << 31) | (0x1f << 16),
837 				0x801f0000);
838 #endif
839 
840 			/* program STATIC delays */
841 #ifdef BACKUP_WCMD
842 			set_wcmd(ch, ddr_wcmd[PLATFORM_ID]);
843 #else
844 			set_wcmd(ch, ddr_wclk[PLATFORM_ID] + HALF_CLK);
845 #endif
846 
847 			for (rk = 0; rk < NUM_RANKS; rk++) {
848 				if (mrc_params->rank_enables & (1 << rk)) {
849 					set_wclk(ch, rk, ddr_wclk[PLATFORM_ID]);
850 #ifdef BACKUP_WCTL
851 					set_wctl(ch, rk, ddr_wctl[PLATFORM_ID]);
852 #else
853 					set_wctl(ch, rk, ddr_wclk[PLATFORM_ID] + HALF_CLK);
854 #endif
855 				}
856 			}
857 		}
858 	}
859 
860 	/* COMP (non channel specific) */
861 	/* RCOMP: Dither PU Enable */
862 	mrc_alt_write_mask(DDRPHY, DQANADRVPUCTL, 1 << 30, 1 << 30);
863 	/* RCOMP: Dither PD Enable */
864 	mrc_alt_write_mask(DDRPHY, DQANADRVPDCTL, 1 << 30, 1 << 30);
865 	/* RCOMP: Dither PU Enable */
866 	mrc_alt_write_mask(DDRPHY, CMDANADRVPUCTL, 1 << 30, 1 << 30);
867 	/* RCOMP: Dither PD Enable */
868 	mrc_alt_write_mask(DDRPHY, CMDANADRVPDCTL, 1 << 30, 1 << 30);
869 	/* RCOMP: Dither PU Enable */
870 	mrc_alt_write_mask(DDRPHY, CLKANADRVPUCTL, 1 << 30, 1 << 30);
871 	/* RCOMP: Dither PD Enable */
872 	mrc_alt_write_mask(DDRPHY, CLKANADRVPDCTL, 1 << 30, 1 << 30);
873 	/* RCOMP: Dither PU Enable */
874 	mrc_alt_write_mask(DDRPHY, DQSANADRVPUCTL, 1 << 30, 1 << 30);
875 	/* RCOMP: Dither PD Enable */
876 	mrc_alt_write_mask(DDRPHY, DQSANADRVPDCTL, 1 << 30, 1 << 30);
877 	/* RCOMP: Dither PU Enable */
878 	mrc_alt_write_mask(DDRPHY, CTLANADRVPUCTL, 1 << 30, 1 << 30);
879 	/* RCOMP: Dither PD Enable */
880 	mrc_alt_write_mask(DDRPHY, CTLANADRVPDCTL, 1 << 30, 1 << 30);
881 	/* ODT: Dither PU Enable */
882 	mrc_alt_write_mask(DDRPHY, DQANAODTPUCTL, 1 << 30, 1 << 30);
883 	/* ODT: Dither PD Enable */
884 	mrc_alt_write_mask(DDRPHY, DQANAODTPDCTL, 1 << 30, 1 << 30);
885 	/* ODT: Dither PU Enable */
886 	mrc_alt_write_mask(DDRPHY, CLKANAODTPUCTL, 1 << 30, 1 << 30);
887 	/* ODT: Dither PD Enable */
888 	mrc_alt_write_mask(DDRPHY, CLKANAODTPDCTL, 1 << 30, 1 << 30);
889 	/* ODT: Dither PU Enable */
890 	mrc_alt_write_mask(DDRPHY, DQSANAODTPUCTL, 1 << 30, 1 << 30);
891 	/* ODT: Dither PD Enable */
892 	mrc_alt_write_mask(DDRPHY, DQSANAODTPDCTL, 1 << 30, 1 << 30);
893 	/* DCOMP: Dither PU Enable */
894 	mrc_alt_write_mask(DDRPHY, DQANADLYPUCTL, 1 << 30, 1 << 30);
895 	/* DCOMP: Dither PD Enable */
896 	mrc_alt_write_mask(DDRPHY, DQANADLYPDCTL, 1 << 30, 1 << 30);
897 	/* DCOMP: Dither PU Enable */
898 	mrc_alt_write_mask(DDRPHY, CMDANADLYPUCTL, 1 << 30, 1 << 30);
899 	/* DCOMP: Dither PD Enable */
900 	mrc_alt_write_mask(DDRPHY, CMDANADLYPDCTL, 1 << 30, 1 << 30);
901 	/* DCOMP: Dither PU Enable */
902 	mrc_alt_write_mask(DDRPHY, CLKANADLYPUCTL, 1 << 30, 1 << 30);
903 	/* DCOMP: Dither PD Enable */
904 	mrc_alt_write_mask(DDRPHY, CLKANADLYPDCTL, 1 << 30, 1 << 30);
905 	/* DCOMP: Dither PU Enable */
906 	mrc_alt_write_mask(DDRPHY, DQSANADLYPUCTL, 1 << 30, 1 << 30);
907 	/* DCOMP: Dither PD Enable */
908 	mrc_alt_write_mask(DDRPHY, DQSANADLYPDCTL, 1 << 30, 1 << 30);
909 	/* DCOMP: Dither PU Enable */
910 	mrc_alt_write_mask(DDRPHY, CTLANADLYPUCTL, 1 << 30, 1 << 30);
911 	/* DCOMP: Dither PD Enable */
912 	mrc_alt_write_mask(DDRPHY, CTLANADLYPDCTL, 1 << 30, 1 << 30);
913 	/* TCO: Dither PU Enable */
914 	mrc_alt_write_mask(DDRPHY, DQANATCOPUCTL, 1 << 30, 1 << 30);
915 	/* TCO: Dither PD Enable */
916 	mrc_alt_write_mask(DDRPHY, DQANATCOPDCTL, 1 << 30, 1 << 30);
917 	/* TCO: Dither PU Enable */
918 	mrc_alt_write_mask(DDRPHY, CLKANATCOPUCTL, 1 << 30, 1 << 30);
919 	/* TCO: Dither PD Enable */
920 	mrc_alt_write_mask(DDRPHY, CLKANATCOPDCTL, 1 << 30, 1 << 30);
921 	/* TCO: Dither PU Enable */
922 	mrc_alt_write_mask(DDRPHY, DQSANATCOPUCTL, 1 << 30, 1 << 30);
923 	/* TCO: Dither PD Enable */
924 	mrc_alt_write_mask(DDRPHY, DQSANATCOPDCTL, 1 << 30, 1 << 30);
925 	/* TCOCOMP: Pulse Count */
926 	mrc_alt_write_mask(DDRPHY, TCOCNTCTRL, 1, 3);
927 	/* ODT: CMD/CTL PD/PU */
928 	mrc_alt_write_mask(DDRPHY, CHNLBUFSTATIC,
929 		(0x03 << 24) | (0x03 << 16), 0x1f1f0000);
930 	/* Set 1us counter */
931 	mrc_alt_write_mask(DDRPHY, MSCNTR, 0x64, 0xff);
932 	mrc_alt_write_mask(DDRPHY, LATCH1CTL, 0x1 << 28, 0x70000000);
933 
934 	/* Release PHY from reset */
935 	mrc_alt_write_mask(DDRPHY, MASTERRSTN, 1, 1);
936 
937 	/* STEP1 */
938 	mrc_post_code(0x03, 0x11);
939 
940 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
941 		if (mrc_params->channel_enables & (1 << ch)) {
942 			/* DQ01-DQ23 */
943 			for (bl_grp = 0;
944 			     bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
945 			     bl_grp++) {
946 				mrc_alt_write_mask(DDRPHY,
947 					DQMDLLCTL +
948 					bl_grp * DDRIODQ_BL_OFFSET +
949 					ch * DDRIODQ_CH_OFFSET,
950 					1 << 13,
951 					1 << 13);	/* Enable VREG */
952 				delay_n(3);
953 			}
954 
955 			/* ECC */
956 			mrc_alt_write_mask(DDRPHY, ECCMDLLCTL,
957 				1 << 13, 1 << 13);	/* Enable VREG */
958 			delay_n(3);
959 			/* CMD */
960 			mrc_alt_write_mask(DDRPHY,
961 				CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
962 				1 << 13, 1 << 13);	/* Enable VREG */
963 			delay_n(3);
964 			/* CLK-CTL */
965 			mrc_alt_write_mask(DDRPHY,
966 				CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
967 				1 << 13, 1 << 13);	/* Enable VREG */
968 			delay_n(3);
969 		}
970 	}
971 
972 	/* STEP2 */
973 	mrc_post_code(0x03, 0x12);
974 	delay_n(200);
975 
976 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
977 		if (mrc_params->channel_enables & (1 << ch)) {
978 			/* DQ01-DQ23 */
979 			for (bl_grp = 0;
980 			     bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
981 			     bl_grp++) {
982 				mrc_alt_write_mask(DDRPHY,
983 					DQMDLLCTL +
984 					bl_grp * DDRIODQ_BL_OFFSET +
985 					ch * DDRIODQ_CH_OFFSET,
986 					1 << 17,
987 					1 << 17);	/* Enable MCDLL */
988 				delay_n(50);
989 			}
990 
991 		/* ECC */
992 		mrc_alt_write_mask(DDRPHY, ECCMDLLCTL,
993 			1 << 17, 1 << 17);	/* Enable MCDLL */
994 		delay_n(50);
995 		/* CMD */
996 		mrc_alt_write_mask(DDRPHY,
997 			CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
998 			1 << 18, 1 << 18);	/* Enable MCDLL */
999 		delay_n(50);
1000 		/* CLK-CTL */
1001 		mrc_alt_write_mask(DDRPHY,
1002 			CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
1003 			1 << 18, 1 << 18);	/* Enable MCDLL */
1004 		delay_n(50);
1005 		}
1006 	}
1007 
1008 	/* STEP3: */
1009 	mrc_post_code(0x03, 0x13);
1010 	delay_n(100);
1011 
1012 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
1013 		if (mrc_params->channel_enables & (1 << ch)) {
1014 			/* DQ01-DQ23 */
1015 			for (bl_grp = 0;
1016 			     bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
1017 			     bl_grp++) {
1018 #ifdef FORCE_16BIT_DDRIO
1019 				temp = (bl_grp &&
1020 					(mrc_params->channel_width == X16)) ?
1021 					0x11ff : 0xffff;
1022 #else
1023 				temp = 0xffff;
1024 #endif
1025 				/* Enable TXDLL */
1026 				mrc_alt_write_mask(DDRPHY,
1027 					DQDLLTXCTL +
1028 					bl_grp * DDRIODQ_BL_OFFSET +
1029 					ch * DDRIODQ_CH_OFFSET,
1030 					temp, 0xffff);
1031 				delay_n(3);
1032 				/* Enable RXDLL */
1033 				mrc_alt_write_mask(DDRPHY,
1034 					DQDLLRXCTL +
1035 					bl_grp * DDRIODQ_BL_OFFSET +
1036 					ch * DDRIODQ_CH_OFFSET,
1037 					0xf, 0xf);
1038 				delay_n(3);
1039 				/* Enable RXDLL Overrides BL0 */
1040 				mrc_alt_write_mask(DDRPHY,
1041 					B0OVRCTL +
1042 					bl_grp * DDRIODQ_BL_OFFSET +
1043 					ch * DDRIODQ_CH_OFFSET,
1044 					0xf, 0xf);
1045 			}
1046 
1047 			/* ECC */
1048 			temp = 0xffff;
1049 			mrc_alt_write_mask(DDRPHY, ECCDLLTXCTL,
1050 				temp, 0xffff);
1051 			delay_n(3);
1052 
1053 			/* CMD (PO) */
1054 			mrc_alt_write_mask(DDRPHY,
1055 				CMDDLLTXCTL + ch * DDRIOCCC_CH_OFFSET,
1056 				temp, 0xffff);
1057 			delay_n(3);
1058 		}
1059 	}
1060 
1061 	/* STEP4 */
1062 	mrc_post_code(0x03, 0x14);
1063 
1064 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
1065 		if (mrc_params->channel_enables & (1 << ch)) {
1066 			/* Host To Memory Clock Alignment (HMC) for 800/1066 */
1067 			for (bl_grp = 0;
1068 			     bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
1069 			     bl_grp++) {
1070 				/* CLK_ALIGN_MOD_ID */
1071 				mrc_alt_write_mask(DDRPHY,
1072 					DQCLKALIGNREG2 +
1073 					bl_grp * DDRIODQ_BL_OFFSET +
1074 					ch * DDRIODQ_CH_OFFSET,
1075 					bl_grp ? 3 : 1,
1076 					0xf);
1077 			}
1078 
1079 			mrc_alt_write_mask(DDRPHY,
1080 				ECCCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
1081 				0x2, 0xf);
1082 			mrc_alt_write_mask(DDRPHY,
1083 				CMDCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
1084 				0x0, 0xf);
1085 			mrc_alt_write_mask(DDRPHY,
1086 				CCCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
1087 				0x2, 0xf);
1088 			mrc_alt_write_mask(DDRPHY,
1089 				CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET,
1090 				0x20, 0x30);
1091 			/*
1092 			 * NUM_SAMPLES, MAX_SAMPLES,
1093 			 * MACRO_PI_STEP, MICRO_PI_STEP
1094 			 */
1095 			mrc_alt_write_mask(DDRPHY,
1096 				CMDCLKALIGNREG1 + ch * DDRIOCCC_CH_OFFSET,
1097 				(0x18 << 16) | (0x10 << 8) |
1098 				(0x8 << 2) | (0x1 << 0),
1099 				0x007f7fff);
1100 			/* TOTAL_NUM_MODULES, FIRST_U_PARTITION */
1101 			mrc_alt_write_mask(DDRPHY,
1102 				CMDCLKALIGNREG2 + ch * DDRIOCCC_CH_OFFSET,
1103 				(0x10 << 16) | (0x4 << 8) | (0x2 << 4),
1104 				0x001f0ff0);
1105 #ifdef HMC_TEST
1106 			/* START_CLK_ALIGN=1 */
1107 			mrc_alt_write_mask(DDRPHY,
1108 				CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET,
1109 				1 << 24, 1 << 24);
1110 			while (msg_port_alt_read(DDRPHY,
1111 				CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET) &
1112 				(1 << 24))
1113 				;	/* wait for START_CLK_ALIGN=0 */
1114 #endif
1115 
1116 			/* Set RD/WR Pointer Seperation & COUNTEN & FIFOPTREN */
1117 			mrc_alt_write_mask(DDRPHY,
1118 				CMDPTRREG + ch * DDRIOCCC_CH_OFFSET,
1119 				1, 1);	/* WRPTRENABLE=1 */
1120 
1121 			/* COMP initial */
1122 			/* enable bypass for CLK buffer (PO) */
1123 			mrc_alt_write_mask(DDRPHY,
1124 				COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
1125 				1 << 5, 1 << 5);
1126 			/* Initial COMP Enable */
1127 			mrc_alt_write_mask(DDRPHY, CMPCTRL, 1, 1);
1128 			/* wait for Initial COMP Enable = 0 */
1129 			while (msg_port_alt_read(DDRPHY, CMPCTRL) & 1)
1130 				;
1131 			/* disable bypass for CLK buffer (PO) */
1132 			mrc_alt_write_mask(DDRPHY,
1133 				COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
1134 				~(1 << 5), 1 << 5);
1135 
1136 			/* IOBUFACT */
1137 
1138 			/* STEP4a */
1139 			mrc_alt_write_mask(DDRPHY,
1140 				CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
1141 				1 << 2, 1 << 2);	/* IOBUFACTRST_N=1 */
1142 
1143 			/* DDRPHY initialization complete */
1144 			mrc_alt_write_mask(DDRPHY,
1145 				CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
1146 				1 << 20, 1 << 20);	/* SPID_INIT_COMPLETE=1 */
1147 		}
1148 	}
1149 
1150 	LEAVEFN();
1151 }
1152 
1153 /* This function performs JEDEC initialization on all enabled channels */
1154 void perform_jedec_init(struct mrc_params *mrc_params)
1155 {
1156 	uint8_t twr, wl, rank;
1157 	uint32_t tck;
1158 	u32 dtr0;
1159 	u32 drp;
1160 	u32 drmc;
1161 	u32 mrs0_cmd = 0;
1162 	u32 emrs1_cmd = 0;
1163 	u32 emrs2_cmd = 0;
1164 	u32 emrs3_cmd = 0;
1165 
1166 	ENTERFN();
1167 
1168 	/* jedec_init starts */
1169 	mrc_post_code(0x04, 0x00);
1170 
1171 	/* DDR3_RESET_SET=0, DDR3_RESET_RESET=1 */
1172 	mrc_alt_write_mask(DDRPHY, CCDDR3RESETCTL, 2, 0x102);
1173 
1174 	/* Assert RESET# for 200us */
1175 	delay_u(200);
1176 
1177 	/* DDR3_RESET_SET=1, DDR3_RESET_RESET=0 */
1178 	mrc_alt_write_mask(DDRPHY, CCDDR3RESETCTL, 0x100, 0x102);
1179 
1180 	dtr0 = msg_port_read(MEM_CTLR, DTR0);
1181 
1182 	/*
1183 	 * Set CKEVAL for populated ranks
1184 	 * then send NOP to each rank (#4550197)
1185 	 */
1186 
1187 	drp = msg_port_read(MEM_CTLR, DRP);
1188 	drp &= 0x3;
1189 
1190 	drmc = msg_port_read(MEM_CTLR, DRMC);
1191 	drmc &= 0xfffffffc;
1192 	drmc |= (DRMC_CKEMODE | drp);
1193 
1194 	msg_port_write(MEM_CTLR, DRMC, drmc);
1195 
1196 	for (rank = 0; rank < NUM_RANKS; rank++) {
1197 		/* Skip to next populated rank */
1198 		if ((mrc_params->rank_enables & (1 << rank)) == 0)
1199 			continue;
1200 
1201 		dram_init_command(DCMD_NOP(rank));
1202 	}
1203 
1204 	msg_port_write(MEM_CTLR, DRMC,
1205 		(mrc_params->rd_odt_value == 0 ? DRMC_ODTMODE : 0));
1206 
1207 	/*
1208 	 * setup for emrs 2
1209 	 * BIT[15:11] --> Always "0"
1210 	 * BIT[10:09] --> Rtt_WR: want "Dynamic ODT Off" (0)
1211 	 * BIT[08]    --> Always "0"
1212 	 * BIT[07]    --> SRT: use sr_temp_range
1213 	 * BIT[06]    --> ASR: want "Manual SR Reference" (0)
1214 	 * BIT[05:03] --> CWL: use oem_tCWL
1215 	 * BIT[02:00] --> PASR: want "Full Array" (0)
1216 	 */
1217 	emrs2_cmd |= (2 << 3);
1218 	wl = 5 + mrc_params->ddr_speed;
1219 	emrs2_cmd |= ((wl - 5) << 9);
1220 	emrs2_cmd |= (mrc_params->sr_temp_range << 13);
1221 
1222 	/*
1223 	 * setup for emrs 3
1224 	 * BIT[15:03] --> Always "0"
1225 	 * BIT[02]    --> MPR: want "Normal Operation" (0)
1226 	 * BIT[01:00] --> MPR_Loc: want "Predefined Pattern" (0)
1227 	 */
1228 	emrs3_cmd |= (3 << 3);
1229 
1230 	/*
1231 	 * setup for emrs 1
1232 	 * BIT[15:13]     --> Always "0"
1233 	 * BIT[12:12]     --> Qoff: want "Output Buffer Enabled" (0)
1234 	 * BIT[11:11]     --> TDQS: want "Disabled" (0)
1235 	 * BIT[10:10]     --> Always "0"
1236 	 * BIT[09,06,02]  --> Rtt_nom: use rtt_nom_value
1237 	 * BIT[08]        --> Always "0"
1238 	 * BIT[07]        --> WR_LVL: want "Disabled" (0)
1239 	 * BIT[05,01]     --> DIC: use ron_value
1240 	 * BIT[04:03]     --> AL: additive latency want "0" (0)
1241 	 * BIT[00]        --> DLL: want "Enable" (0)
1242 	 *
1243 	 * (BIT5|BIT1) set Ron value
1244 	 * 00 --> RZQ/6 (40ohm)
1245 	 * 01 --> RZQ/7 (34ohm)
1246 	 * 1* --> RESERVED
1247 	 *
1248 	 * (BIT9|BIT6|BIT2) set Rtt_nom value
1249 	 * 000 --> Disabled
1250 	 * 001 --> RZQ/4 ( 60ohm)
1251 	 * 010 --> RZQ/2 (120ohm)
1252 	 * 011 --> RZQ/6 ( 40ohm)
1253 	 * 1** --> RESERVED
1254 	 */
1255 	emrs1_cmd |= (1 << 3);
1256 	emrs1_cmd &= ~(1 << 6);
1257 
1258 	if (mrc_params->ron_value == 0)
1259 		emrs1_cmd |= (1 << 7);
1260 	else
1261 		emrs1_cmd &= ~(1 << 7);
1262 
1263 	if (mrc_params->rtt_nom_value == 0)
1264 		emrs1_cmd |= (DDR3_EMRS1_RTTNOM_40 << 6);
1265 	else if (mrc_params->rtt_nom_value == 1)
1266 		emrs1_cmd |= (DDR3_EMRS1_RTTNOM_60 << 6);
1267 	else if (mrc_params->rtt_nom_value == 2)
1268 		emrs1_cmd |= (DDR3_EMRS1_RTTNOM_120 << 6);
1269 
1270 	/* save MRS1 value (excluding control fields) */
1271 	mrc_params->mrs1 = emrs1_cmd >> 6;
1272 
1273 	/*
1274 	 * setup for mrs 0
1275 	 * BIT[15:13]     --> Always "0"
1276 	 * BIT[12]        --> PPD: for Quark (1)
1277 	 * BIT[11:09]     --> WR: use oem_tWR
1278 	 * BIT[08]        --> DLL: want "Reset" (1, self clearing)
1279 	 * BIT[07]        --> MODE: want "Normal" (0)
1280 	 * BIT[06:04,02]  --> CL: use oem_tCAS
1281 	 * BIT[03]        --> RD_BURST_TYPE: want "Interleave" (1)
1282 	 * BIT[01:00]     --> BL: want "8 Fixed" (0)
1283 	 * WR:
1284 	 * 0 --> 16
1285 	 * 1 --> 5
1286 	 * 2 --> 6
1287 	 * 3 --> 7
1288 	 * 4 --> 8
1289 	 * 5 --> 10
1290 	 * 6 --> 12
1291 	 * 7 --> 14
1292 	 * CL:
1293 	 * BIT[02:02] "0" if oem_tCAS <= 11 (1866?)
1294 	 * BIT[06:04] use oem_tCAS-4
1295 	 */
1296 	mrs0_cmd |= (1 << 14);
1297 	mrs0_cmd |= (1 << 18);
1298 	mrs0_cmd |= ((((dtr0 >> 12) & 7) + 1) << 10);
1299 
1300 	tck = t_ck[mrc_params->ddr_speed];
1301 	/* Per JEDEC: tWR=15000ps DDR2/3 from 800-1600 */
1302 	twr = MCEIL(15000, tck);
1303 	mrs0_cmd |= ((twr - 4) << 15);
1304 
1305 	for (rank = 0; rank < NUM_RANKS; rank++) {
1306 		/* Skip to next populated rank */
1307 		if ((mrc_params->rank_enables & (1 << rank)) == 0)
1308 			continue;
1309 
1310 		emrs2_cmd |= (rank << 22);
1311 		dram_init_command(emrs2_cmd);
1312 
1313 		emrs3_cmd |= (rank << 22);
1314 		dram_init_command(emrs3_cmd);
1315 
1316 		emrs1_cmd |= (rank << 22);
1317 		dram_init_command(emrs1_cmd);
1318 
1319 		mrs0_cmd |= (rank << 22);
1320 		dram_init_command(mrs0_cmd);
1321 
1322 		dram_init_command(DCMD_ZQCL(rank));
1323 	}
1324 
1325 	LEAVEFN();
1326 }
1327 
1328 /*
1329  * Dunit Initialization Complete
1330  *
1331  * Indicates that initialization of the Dunit has completed.
1332  *
1333  * Memory accesses are permitted and maintenance operation begins.
1334  * Until this bit is set to a 1, the memory controller will not accept
1335  * DRAM requests from the MEMORY_MANAGER or HTE.
1336  */
1337 void set_ddr_init_complete(struct mrc_params *mrc_params)
1338 {
1339 	u32 dco;
1340 
1341 	ENTERFN();
1342 
1343 	dco = msg_port_read(MEM_CTLR, DCO);
1344 	dco &= ~DCO_PMICTL;
1345 	dco |= DCO_IC;
1346 	msg_port_write(MEM_CTLR, DCO, dco);
1347 
1348 	LEAVEFN();
1349 }
1350 
1351 /*
1352  * This function will retrieve relevant timing data
1353  *
1354  * This data will be used on subsequent boots to speed up boot times
1355  * and is required for Suspend To RAM capabilities.
1356  */
1357 void restore_timings(struct mrc_params *mrc_params)
1358 {
1359 	uint8_t ch, rk, bl;
1360 	const struct mrc_timings *mt = &mrc_params->timings;
1361 
1362 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
1363 		for (rk = 0; rk < NUM_RANKS; rk++) {
1364 			for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
1365 				set_rcvn(ch, rk, bl, mt->rcvn[ch][rk][bl]);
1366 				set_rdqs(ch, rk, bl, mt->rdqs[ch][rk][bl]);
1367 				set_wdqs(ch, rk, bl, mt->wdqs[ch][rk][bl]);
1368 				set_wdq(ch, rk, bl, mt->wdq[ch][rk][bl]);
1369 				if (rk == 0) {
1370 					/* VREF (RANK0 only) */
1371 					set_vref(ch, bl, mt->vref[ch][bl]);
1372 				}
1373 			}
1374 			set_wctl(ch, rk, mt->wctl[ch][rk]);
1375 		}
1376 		set_wcmd(ch, mt->wcmd[ch]);
1377 	}
1378 }
1379 
1380 /*
1381  * Configure default settings normally set as part of read training
1382  *
1383  * Some defaults have to be set earlier as they may affect earlier
1384  * training steps.
1385  */
1386 void default_timings(struct mrc_params *mrc_params)
1387 {
1388 	uint8_t ch, rk, bl;
1389 
1390 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
1391 		for (rk = 0; rk < NUM_RANKS; rk++) {
1392 			for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
1393 				set_rdqs(ch, rk, bl, 24);
1394 				if (rk == 0) {
1395 					/* VREF (RANK0 only) */
1396 					set_vref(ch, bl, 32);
1397 				}
1398 			}
1399 		}
1400 	}
1401 }
1402 
1403 /*
1404  * This function will perform our RCVEN Calibration Algorithm.
1405  * We will only use the 2xCLK domain timings to perform RCVEN Calibration.
1406  * All byte lanes will be calibrated "simultaneously" per channel per rank.
1407  */
1408 void rcvn_cal(struct mrc_params *mrc_params)
1409 {
1410 	uint8_t ch;	/* channel counter */
1411 	uint8_t rk;	/* rank counter */
1412 	uint8_t bl;	/* byte lane counter */
1413 	uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
1414 
1415 #ifdef R2R_SHARING
1416 	/* used to find placement for rank2rank sharing configs */
1417 	uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
1418 #ifndef BACKUP_RCVN
1419 	/* used to find placement for rank2rank sharing configs */
1420 	uint32_t num_ranks_enabled = 0;
1421 #endif
1422 #endif
1423 
1424 #ifdef BACKUP_RCVN
1425 #else
1426 	uint32_t temp;
1427 	/* absolute PI value to be programmed on the byte lane */
1428 	uint32_t delay[NUM_BYTE_LANES];
1429 	u32 dtr1, dtr1_save;
1430 #endif
1431 
1432 	ENTERFN();
1433 
1434 	/* rcvn_cal starts */
1435 	mrc_post_code(0x05, 0x00);
1436 
1437 #ifndef BACKUP_RCVN
1438 	/* need separate burst to sample DQS preamble */
1439 	dtr1 = msg_port_read(MEM_CTLR, DTR1);
1440 	dtr1_save = dtr1;
1441 	dtr1 |= DTR1_TCCD_12CLK;
1442 	msg_port_write(MEM_CTLR, DTR1, dtr1);
1443 #endif
1444 
1445 #ifdef R2R_SHARING
1446 	/* need to set "final_delay[][]" elements to "0" */
1447 	memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
1448 #endif
1449 
1450 	/* loop through each enabled channel */
1451 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
1452 		if (mrc_params->channel_enables & (1 << ch)) {
1453 			/* perform RCVEN Calibration on a per rank basis */
1454 			for (rk = 0; rk < NUM_RANKS; rk++) {
1455 				if (mrc_params->rank_enables & (1 << rk)) {
1456 					/*
1457 					 * POST_CODE here indicates the current
1458 					 * channel and rank being calibrated
1459 					 */
1460 					mrc_post_code(0x05, 0x10 + ((ch << 4) | rk));
1461 
1462 #ifdef BACKUP_RCVN
1463 					/* et hard-coded timing values */
1464 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++)
1465 						set_rcvn(ch, rk, bl, ddr_rcvn[PLATFORM_ID]);
1466 #else
1467 					/* enable FIFORST */
1468 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl += 2) {
1469 						mrc_alt_write_mask(DDRPHY,
1470 							B01PTRCTL1 +
1471 							(bl >> 1) * DDRIODQ_BL_OFFSET +
1472 							ch * DDRIODQ_CH_OFFSET,
1473 							0, 1 << 8);
1474 					}
1475 					/* initialize the starting delay to 128 PI (cas +1 CLK) */
1476 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1477 						/* 1x CLK domain timing is cas-4 */
1478 						delay[bl] = (4 + 1) * FULL_CLK;
1479 
1480 						set_rcvn(ch, rk, bl, delay[bl]);
1481 					}
1482 
1483 					/* now find the rising edge */
1484 					find_rising_edge(mrc_params, delay, ch, rk, true);
1485 
1486 					/* Now increase delay by 32 PI (1/4 CLK) to place in center of high pulse */
1487 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1488 						delay[bl] += QRTR_CLK;
1489 						set_rcvn(ch, rk, bl, delay[bl]);
1490 					}
1491 					/* Now decrement delay by 128 PI (1 CLK) until we sample a "0" */
1492 					do {
1493 						temp = sample_dqs(mrc_params, ch, rk, true);
1494 						for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1495 							if (temp & (1 << bl)) {
1496 								if (delay[bl] >= FULL_CLK) {
1497 									delay[bl] -= FULL_CLK;
1498 									set_rcvn(ch, rk, bl, delay[bl]);
1499 								} else {
1500 									/* not enough delay */
1501 									training_message(ch, rk, bl);
1502 									mrc_post_code(0xee, 0x50);
1503 								}
1504 							}
1505 						}
1506 					} while (temp & 0xff);
1507 
1508 #ifdef R2R_SHARING
1509 					/* increment "num_ranks_enabled" */
1510 					num_ranks_enabled++;
1511 					/* Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble */
1512 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1513 						delay[bl] += QRTR_CLK;
1514 						/* add "delay[]" values to "final_delay[][]" for rolling average */
1515 						final_delay[ch][bl] += delay[bl];
1516 						/* set timing based on rolling average values */
1517 						set_rcvn(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
1518 					}
1519 #else
1520 					/* Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble */
1521 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1522 						delay[bl] += QRTR_CLK;
1523 						set_rcvn(ch, rk, bl, delay[bl]);
1524 					}
1525 #endif
1526 
1527 					/* disable FIFORST */
1528 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl += 2) {
1529 						mrc_alt_write_mask(DDRPHY,
1530 							B01PTRCTL1 +
1531 							(bl >> 1) * DDRIODQ_BL_OFFSET +
1532 							ch * DDRIODQ_CH_OFFSET,
1533 							1 << 8, 1 << 8);
1534 					}
1535 #endif
1536 				}
1537 			}
1538 		}
1539 	}
1540 
1541 #ifndef BACKUP_RCVN
1542 	/* restore original */
1543 	msg_port_write(MEM_CTLR, DTR1, dtr1_save);
1544 #endif
1545 
1546 	LEAVEFN();
1547 }
1548 
1549 /*
1550  * This function will perform the Write Levelling algorithm
1551  * (align WCLK and WDQS).
1552  *
1553  * This algorithm will act on each rank in each channel separately.
1554  */
1555 void wr_level(struct mrc_params *mrc_params)
1556 {
1557 	uint8_t ch;	/* channel counter */
1558 	uint8_t rk;	/* rank counter */
1559 	uint8_t bl;	/* byte lane counter */
1560 	uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
1561 
1562 #ifdef R2R_SHARING
1563 	/* used to find placement for rank2rank sharing configs */
1564 	uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
1565 #ifndef BACKUP_WDQS
1566 	/* used to find placement for rank2rank sharing configs */
1567 	uint32_t num_ranks_enabled = 0;
1568 #endif
1569 #endif
1570 
1571 #ifdef BACKUP_WDQS
1572 #else
1573 	/* determines stop condition for CRS_WR_LVL */
1574 	bool all_edges_found;
1575 	/* absolute PI value to be programmed on the byte lane */
1576 	uint32_t delay[NUM_BYTE_LANES];
1577 	/*
1578 	 * static makes it so the data is loaded in the heap once by shadow(),
1579 	 * where non-static copies the data onto the stack every time this
1580 	 * function is called
1581 	 */
1582 	uint32_t address;	/* address to be checked during COARSE_WR_LVL */
1583 	u32 dtr4, dtr4_save;
1584 #endif
1585 
1586 	ENTERFN();
1587 
1588 	/* wr_level starts */
1589 	mrc_post_code(0x06, 0x00);
1590 
1591 #ifdef R2R_SHARING
1592 	/* need to set "final_delay[][]" elements to "0" */
1593 	memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
1594 #endif
1595 
1596 	/* loop through each enabled channel */
1597 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
1598 		if (mrc_params->channel_enables & (1 << ch)) {
1599 			/* perform WRITE LEVELING algorithm on a per rank basis */
1600 			for (rk = 0; rk < NUM_RANKS; rk++) {
1601 				if (mrc_params->rank_enables & (1 << rk)) {
1602 					/*
1603 					 * POST_CODE here indicates the current
1604 					 * rank and channel being calibrated
1605 					 */
1606 					mrc_post_code(0x06, 0x10 + ((ch << 4) | rk));
1607 
1608 #ifdef BACKUP_WDQS
1609 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1610 						set_wdqs(ch, rk, bl, ddr_wdqs[PLATFORM_ID]);
1611 						set_wdq(ch, rk, bl, ddr_wdqs[PLATFORM_ID] - QRTR_CLK);
1612 					}
1613 #else
1614 					/*
1615 					 * perform a single PRECHARGE_ALL command to
1616 					 * make DRAM state machine go to IDLE state
1617 					 */
1618 					dram_init_command(DCMD_PREA(rk));
1619 
1620 					/*
1621 					 * enable Write Levelling Mode
1622 					 * (EMRS1 w/ Write Levelling Mode Enable)
1623 					 */
1624 					dram_init_command(DCMD_MRS1(rk, 0x82));
1625 
1626 					/*
1627 					 * set ODT DRAM Full Time Termination
1628 					 * disable in MCU
1629 					 */
1630 
1631 					dtr4 = msg_port_read(MEM_CTLR, DTR4);
1632 					dtr4_save = dtr4;
1633 					dtr4 |= DTR4_ODTDIS;
1634 					msg_port_write(MEM_CTLR, DTR4, dtr4);
1635 
1636 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
1637 						/*
1638 						 * Enable Sandy Bridge Mode (WDQ Tri-State) &
1639 						 * Ensure 5 WDQS pulses during Write Leveling
1640 						 */
1641 						mrc_alt_write_mask(DDRPHY,
1642 							DQCTL + DDRIODQ_BL_OFFSET * bl + DDRIODQ_CH_OFFSET * ch,
1643 							0x10000154,
1644 							0x100003fc);
1645 					}
1646 
1647 					/* Write Leveling Mode enabled in IO */
1648 					mrc_alt_write_mask(DDRPHY,
1649 						CCDDR3RESETCTL + DDRIOCCC_CH_OFFSET * ch,
1650 						1 << 16, 1 << 16);
1651 
1652 					/* Initialize the starting delay to WCLK */
1653 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1654 						/*
1655 						 * CLK0 --> RK0
1656 						 * CLK1 --> RK1
1657 						 */
1658 						delay[bl] = get_wclk(ch, rk);
1659 
1660 						set_wdqs(ch, rk, bl, delay[bl]);
1661 					}
1662 
1663 					/* now find the rising edge */
1664 					find_rising_edge(mrc_params, delay, ch, rk, false);
1665 
1666 					/* disable Write Levelling Mode */
1667 					mrc_alt_write_mask(DDRPHY,
1668 						CCDDR3RESETCTL + DDRIOCCC_CH_OFFSET * ch,
1669 						0, 1 << 16);
1670 
1671 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
1672 						/* Disable Sandy Bridge Mode & Ensure 4 WDQS pulses during normal operation */
1673 						mrc_alt_write_mask(DDRPHY,
1674 							DQCTL + DDRIODQ_BL_OFFSET * bl + DDRIODQ_CH_OFFSET * ch,
1675 							0x00000154,
1676 							0x100003fc);
1677 					}
1678 
1679 					/* restore original DTR4 */
1680 					msg_port_write(MEM_CTLR, DTR4, dtr4_save);
1681 
1682 					/*
1683 					 * restore original value
1684 					 * (Write Levelling Mode Disable)
1685 					 */
1686 					dram_init_command(DCMD_MRS1(rk, mrc_params->mrs1));
1687 
1688 					/*
1689 					 * perform a single PRECHARGE_ALL command to
1690 					 * make DRAM state machine go to IDLE state
1691 					 */
1692 					dram_init_command(DCMD_PREA(rk));
1693 
1694 					mrc_post_code(0x06, 0x30 + ((ch << 4) | rk));
1695 
1696 					/*
1697 					 * COARSE WRITE LEVEL:
1698 					 * check that we're on the correct clock edge
1699 					 */
1700 
1701 					/* hte reconfiguration request */
1702 					mrc_params->hte_setup = 1;
1703 
1704 					/* start CRS_WR_LVL with WDQS = WDQS + 128 PI */
1705 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1706 						delay[bl] = get_wdqs(ch, rk, bl) + FULL_CLK;
1707 						set_wdqs(ch, rk, bl, delay[bl]);
1708 						/*
1709 						 * program WDQ timings based on WDQS
1710 						 * (WDQ = WDQS - 32 PI)
1711 						 */
1712 						set_wdq(ch, rk, bl, (delay[bl] - QRTR_CLK));
1713 					}
1714 
1715 					/* get an address in the targeted channel/rank */
1716 					address = get_addr(ch, rk);
1717 					do {
1718 						uint32_t coarse_result = 0x00;
1719 						uint32_t coarse_result_mask = byte_lane_mask(mrc_params);
1720 						/* assume pass */
1721 						all_edges_found = true;
1722 
1723 						mrc_params->hte_setup = 1;
1724 						coarse_result = check_rw_coarse(mrc_params, address);
1725 
1726 						/* check for failures and margin the byte lane back 128 PI (1 CLK) */
1727 						for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
1728 							if (coarse_result & (coarse_result_mask << bl)) {
1729 								all_edges_found = false;
1730 								delay[bl] -= FULL_CLK;
1731 								set_wdqs(ch, rk, bl, delay[bl]);
1732 								/* program WDQ timings based on WDQS (WDQ = WDQS - 32 PI) */
1733 								set_wdq(ch, rk, bl, delay[bl] - QRTR_CLK);
1734 							}
1735 						}
1736 					} while (!all_edges_found);
1737 
1738 #ifdef R2R_SHARING
1739 					/* increment "num_ranks_enabled" */
1740 					 num_ranks_enabled++;
1741 					/* accumulate "final_delay[][]" values from "delay[]" values for rolling average */
1742 					for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
1743 						final_delay[ch][bl] += delay[bl];
1744 						set_wdqs(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
1745 						/* program WDQ timings based on WDQS (WDQ = WDQS - 32 PI) */
1746 						set_wdq(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled - QRTR_CLK);
1747 					}
1748 #endif
1749 #endif
1750 				}
1751 			}
1752 		}
1753 	}
1754 
1755 	LEAVEFN();
1756 }
1757 
1758 void prog_page_ctrl(struct mrc_params *mrc_params)
1759 {
1760 	u32 dpmc0;
1761 
1762 	ENTERFN();
1763 
1764 	dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
1765 	dpmc0 &= ~DPMC0_PCLSTO_MASK;
1766 	dpmc0 |= (4 << 16);
1767 	dpmc0 |= DPMC0_PREAPWDEN;
1768 	msg_port_write(MEM_CTLR, DPMC0, dpmc0);
1769 }
1770 
1771 /*
1772  * This function will perform the READ TRAINING Algorithm on all
1773  * channels/ranks/byte_lanes simultaneously to minimize execution time.
1774  *
1775  * The idea here is to train the VREF and RDQS (and eventually RDQ) values
1776  * to achieve maximum READ margins. The algorithm will first determine the
1777  * X coordinate (RDQS setting). This is done by collapsing the VREF eye
1778  * until we find a minimum required RDQS eye for VREF_MIN and VREF_MAX.
1779  * Then we take the averages of the RDQS eye at VREF_MIN and VREF_MAX,
1780  * then average those; this will be the final X coordinate. The algorithm
1781  * will then determine the Y coordinate (VREF setting). This is done by
1782  * collapsing the RDQS eye until we find a minimum required VREF eye for
1783  * RDQS_MIN and RDQS_MAX. Then we take the averages of the VREF eye at
1784  * RDQS_MIN and RDQS_MAX, then average those; this will be the final Y
1785  * coordinate.
1786  *
1787  * NOTE: this algorithm assumes the eye curves have a one-to-one relationship,
1788  * meaning for each X the curve has only one Y and vice-a-versa.
1789  */
1790 void rd_train(struct mrc_params *mrc_params)
1791 {
1792 	uint8_t ch;	/* channel counter */
1793 	uint8_t rk;	/* rank counter */
1794 	uint8_t bl;	/* byte lane counter */
1795 	uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
1796 #ifdef BACKUP_RDQS
1797 #else
1798 	uint8_t side_x;	/* tracks LEFT/RIGHT approach vectors */
1799 	uint8_t side_y;	/* tracks BOTTOM/TOP approach vectors */
1800 	/* X coordinate data (passing RDQS values) for approach vectors */
1801 	uint8_t x_coordinate[2][2][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
1802 	/* Y coordinate data (passing VREF values) for approach vectors */
1803 	uint8_t y_coordinate[2][2][NUM_CHANNELS][NUM_BYTE_LANES];
1804 	/* centered X (RDQS) */
1805 	uint8_t x_center[NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
1806 	/* centered Y (VREF) */
1807 	uint8_t y_center[NUM_CHANNELS][NUM_BYTE_LANES];
1808 	uint32_t address;	/* target address for check_bls_ex() */
1809 	uint32_t result;	/* result of check_bls_ex() */
1810 	uint32_t bl_mask;	/* byte lane mask for result checking */
1811 #ifdef R2R_SHARING
1812 	/* used to find placement for rank2rank sharing configs */
1813 	uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
1814 	/* used to find placement for rank2rank sharing configs */
1815 	uint32_t num_ranks_enabled = 0;
1816 #endif
1817 #endif
1818 
1819 	/* rd_train starts */
1820 	mrc_post_code(0x07, 0x00);
1821 
1822 	ENTERFN();
1823 
1824 #ifdef BACKUP_RDQS
1825 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
1826 		if (mrc_params->channel_enables & (1 << ch)) {
1827 			for (rk = 0; rk < NUM_RANKS; rk++) {
1828 				if (mrc_params->rank_enables & (1 << rk)) {
1829 					for (bl = 0;
1830 					     bl < NUM_BYTE_LANES / bl_divisor;
1831 					     bl++) {
1832 						set_rdqs(ch, rk, bl, ddr_rdqs[PLATFORM_ID]);
1833 					}
1834 				}
1835 			}
1836 		}
1837 	}
1838 #else
1839 	/* initialize x/y_coordinate arrays */
1840 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
1841 		if (mrc_params->channel_enables & (1 << ch)) {
1842 			for (rk = 0; rk < NUM_RANKS; rk++) {
1843 				if (mrc_params->rank_enables & (1 << rk)) {
1844 					for (bl = 0;
1845 					     bl < NUM_BYTE_LANES / bl_divisor;
1846 					     bl++) {
1847 						/* x_coordinate */
1848 						x_coordinate[L][B][ch][rk][bl] = RDQS_MIN;
1849 						x_coordinate[R][B][ch][rk][bl] = RDQS_MAX;
1850 						x_coordinate[L][T][ch][rk][bl] = RDQS_MIN;
1851 						x_coordinate[R][T][ch][rk][bl] = RDQS_MAX;
1852 						/* y_coordinate */
1853 						y_coordinate[L][B][ch][bl] = VREF_MIN;
1854 						y_coordinate[R][B][ch][bl] = VREF_MIN;
1855 						y_coordinate[L][T][ch][bl] = VREF_MAX;
1856 						y_coordinate[R][T][ch][bl] = VREF_MAX;
1857 					}
1858 				}
1859 			}
1860 		}
1861 	}
1862 
1863 	/* initialize other variables */
1864 	bl_mask = byte_lane_mask(mrc_params);
1865 	address = get_addr(0, 0);
1866 
1867 #ifdef R2R_SHARING
1868 	/* need to set "final_delay[][]" elements to "0" */
1869 	memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
1870 #endif
1871 
1872 	/* look for passing coordinates */
1873 	for (side_y = B; side_y <= T; side_y++) {
1874 		for (side_x = L; side_x <= R; side_x++) {
1875 			mrc_post_code(0x07, 0x10 + side_y * 2 + side_x);
1876 
1877 			/* find passing values */
1878 			for (ch = 0; ch < NUM_CHANNELS; ch++) {
1879 				if (mrc_params->channel_enables & (0x1 << ch)) {
1880 					for (rk = 0; rk < NUM_RANKS; rk++) {
1881 						if (mrc_params->rank_enables &
1882 							(0x1 << rk)) {
1883 							/* set x/y_coordinate search starting settings */
1884 							for (bl = 0;
1885 							     bl < NUM_BYTE_LANES / bl_divisor;
1886 							     bl++) {
1887 								set_rdqs(ch, rk, bl,
1888 									 x_coordinate[side_x][side_y][ch][rk][bl]);
1889 								set_vref(ch, bl,
1890 									 y_coordinate[side_x][side_y][ch][bl]);
1891 							}
1892 
1893 							/* get an address in the target channel/rank */
1894 							address = get_addr(ch, rk);
1895 
1896 							/* request HTE reconfiguration */
1897 							mrc_params->hte_setup = 1;
1898 
1899 							/* test the settings */
1900 							do {
1901 								/* result[07:00] == failing byte lane (MAX 8) */
1902 								result = check_bls_ex(mrc_params, address);
1903 
1904 								/* check for failures */
1905 								if (result & 0xff) {
1906 									/* at least 1 byte lane failed */
1907 									for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
1908 										if (result &
1909 											(bl_mask << bl)) {
1910 											/* adjust the RDQS values accordingly */
1911 											if (side_x == L)
1912 												x_coordinate[L][side_y][ch][rk][bl] += RDQS_STEP;
1913 											else
1914 												x_coordinate[R][side_y][ch][rk][bl] -= RDQS_STEP;
1915 
1916 											/* check that we haven't closed the RDQS_EYE too much */
1917 											if ((x_coordinate[L][side_y][ch][rk][bl] > (RDQS_MAX - MIN_RDQS_EYE)) ||
1918 												(x_coordinate[R][side_y][ch][rk][bl] < (RDQS_MIN + MIN_RDQS_EYE)) ||
1919 												(x_coordinate[L][side_y][ch][rk][bl] ==
1920 												x_coordinate[R][side_y][ch][rk][bl])) {
1921 												/*
1922 												 * not enough RDQS margin available at this VREF
1923 												 * update VREF values accordingly
1924 												 */
1925 												if (side_y == B)
1926 													y_coordinate[side_x][B][ch][bl] += VREF_STEP;
1927 												else
1928 													y_coordinate[side_x][T][ch][bl] -= VREF_STEP;
1929 
1930 												/* check that we haven't closed the VREF_EYE too much */
1931 												if ((y_coordinate[side_x][B][ch][bl] > (VREF_MAX - MIN_VREF_EYE)) ||
1932 													(y_coordinate[side_x][T][ch][bl] < (VREF_MIN + MIN_VREF_EYE)) ||
1933 													(y_coordinate[side_x][B][ch][bl] == y_coordinate[side_x][T][ch][bl])) {
1934 													/* VREF_EYE collapsed below MIN_VREF_EYE */
1935 													training_message(ch, rk, bl);
1936 													mrc_post_code(0xEE, 0x70 + side_y * 2 + side_x);
1937 												} else {
1938 													/* update the VREF setting */
1939 													set_vref(ch, bl, y_coordinate[side_x][side_y][ch][bl]);
1940 													/* reset the X coordinate to begin the search at the new VREF */
1941 													x_coordinate[side_x][side_y][ch][rk][bl] =
1942 														(side_x == L) ? RDQS_MIN : RDQS_MAX;
1943 												}
1944 											}
1945 
1946 											/* update the RDQS setting */
1947 											set_rdqs(ch, rk, bl, x_coordinate[side_x][side_y][ch][rk][bl]);
1948 										}
1949 									}
1950 								}
1951 							} while (result & 0xff);
1952 						}
1953 					}
1954 				}
1955 			}
1956 		}
1957 	}
1958 
1959 	mrc_post_code(0x07, 0x20);
1960 
1961 	/* find final RDQS (X coordinate) & final VREF (Y coordinate) */
1962 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
1963 		if (mrc_params->channel_enables & (1 << ch)) {
1964 			for (rk = 0; rk < NUM_RANKS; rk++) {
1965 				if (mrc_params->rank_enables & (1 << rk)) {
1966 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
1967 						uint32_t temp1;
1968 						uint32_t temp2;
1969 
1970 						/* x_coordinate */
1971 						DPF(D_INFO,
1972 						    "RDQS T/B eye rank%d lane%d : %d-%d %d-%d\n",
1973 						    rk, bl,
1974 						    x_coordinate[L][T][ch][rk][bl],
1975 						    x_coordinate[R][T][ch][rk][bl],
1976 						    x_coordinate[L][B][ch][rk][bl],
1977 						    x_coordinate[R][B][ch][rk][bl]);
1978 
1979 						/* average the TOP side LEFT & RIGHT values */
1980 						temp1 = (x_coordinate[R][T][ch][rk][bl] + x_coordinate[L][T][ch][rk][bl]) / 2;
1981 						/* average the BOTTOM side LEFT & RIGHT values */
1982 						temp2 = (x_coordinate[R][B][ch][rk][bl] + x_coordinate[L][B][ch][rk][bl]) / 2;
1983 						/* average the above averages */
1984 						x_center[ch][rk][bl] = (uint8_t) ((temp1 + temp2) / 2);
1985 
1986 						/* y_coordinate */
1987 						DPF(D_INFO,
1988 						    "VREF R/L eye lane%d : %d-%d %d-%d\n",
1989 						    bl,
1990 						    y_coordinate[R][B][ch][bl],
1991 						    y_coordinate[R][T][ch][bl],
1992 						    y_coordinate[L][B][ch][bl],
1993 						    y_coordinate[L][T][ch][bl]);
1994 
1995 						/* average the RIGHT side TOP & BOTTOM values */
1996 						temp1 = (y_coordinate[R][T][ch][bl] + y_coordinate[R][B][ch][bl]) / 2;
1997 						/* average the LEFT side TOP & BOTTOM values */
1998 						temp2 = (y_coordinate[L][T][ch][bl] + y_coordinate[L][B][ch][bl]) / 2;
1999 						/* average the above averages */
2000 						y_center[ch][bl] = (uint8_t) ((temp1 + temp2) / 2);
2001 					}
2002 				}
2003 			}
2004 		}
2005 	}
2006 
2007 #ifdef RX_EYE_CHECK
2008 	/* perform an eye check */
2009 	for (side_y = B; side_y <= T; side_y++) {
2010 		for (side_x = L; side_x <= R; side_x++) {
2011 			mrc_post_code(0x07, 0x30 + side_y * 2 + side_x);
2012 
2013 			/* update the settings for the eye check */
2014 			for (ch = 0; ch < NUM_CHANNELS; ch++) {
2015 				if (mrc_params->channel_enables & (1 << ch)) {
2016 					for (rk = 0; rk < NUM_RANKS; rk++) {
2017 						if (mrc_params->rank_enables & (1 << rk)) {
2018 							for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
2019 								if (side_x == L)
2020 									set_rdqs(ch, rk, bl, x_center[ch][rk][bl] - (MIN_RDQS_EYE / 2));
2021 								else
2022 									set_rdqs(ch, rk, bl, x_center[ch][rk][bl] + (MIN_RDQS_EYE / 2));
2023 
2024 								if (side_y == B)
2025 									set_vref(ch, bl, y_center[ch][bl] - (MIN_VREF_EYE / 2));
2026 								else
2027 									set_vref(ch, bl, y_center[ch][bl] + (MIN_VREF_EYE / 2));
2028 							}
2029 						}
2030 					}
2031 				}
2032 			}
2033 
2034 			/* request HTE reconfiguration */
2035 			mrc_params->hte_setup = 1;
2036 
2037 			/* check the eye */
2038 			if (check_bls_ex(mrc_params, address) & 0xff) {
2039 				/* one or more byte lanes failed */
2040 				mrc_post_code(0xee, 0x74 + side_x * 2 + side_y);
2041 			}
2042 		}
2043 	}
2044 #endif
2045 
2046 	mrc_post_code(0x07, 0x40);
2047 
2048 	/* set final placements */
2049 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
2050 		if (mrc_params->channel_enables & (1 << ch)) {
2051 			for (rk = 0; rk < NUM_RANKS; rk++) {
2052 				if (mrc_params->rank_enables & (1 << rk)) {
2053 #ifdef R2R_SHARING
2054 					/* increment "num_ranks_enabled" */
2055 					num_ranks_enabled++;
2056 #endif
2057 					for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
2058 						/* x_coordinate */
2059 #ifdef R2R_SHARING
2060 						final_delay[ch][bl] += x_center[ch][rk][bl];
2061 						set_rdqs(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
2062 #else
2063 						set_rdqs(ch, rk, bl, x_center[ch][rk][bl]);
2064 #endif
2065 						/* y_coordinate */
2066 						set_vref(ch, bl, y_center[ch][bl]);
2067 					}
2068 				}
2069 			}
2070 		}
2071 	}
2072 #endif
2073 
2074 	LEAVEFN();
2075 }
2076 
2077 /*
2078  * This function will perform the WRITE TRAINING Algorithm on all
2079  * channels/ranks/byte_lanes simultaneously to minimize execution time.
2080  *
2081  * The idea here is to train the WDQ timings to achieve maximum WRITE margins.
2082  * The algorithm will start with WDQ at the current WDQ setting (tracks WDQS
2083  * in WR_LVL) +/- 32 PIs (+/- 1/4 CLK) and collapse the eye until all data
2084  * patterns pass. This is because WDQS will be aligned to WCLK by the
2085  * Write Leveling algorithm and WDQ will only ever have a 1/2 CLK window
2086  * of validity.
2087  */
2088 void wr_train(struct mrc_params *mrc_params)
2089 {
2090 	uint8_t ch;	/* channel counter */
2091 	uint8_t rk;	/* rank counter */
2092 	uint8_t bl;	/* byte lane counter */
2093 	uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
2094 #ifdef BACKUP_WDQ
2095 #else
2096 	uint8_t side;		/* LEFT/RIGHT side indicator (0=L, 1=R) */
2097 	uint32_t temp;		/* temporary DWORD */
2098 	/* 2 arrays, for L & R side passing delays */
2099 	uint32_t delay[2][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
2100 	uint32_t address;	/* target address for check_bls_ex() */
2101 	uint32_t result;	/* result of check_bls_ex() */
2102 	uint32_t bl_mask;	/* byte lane mask for result checking */
2103 #ifdef R2R_SHARING
2104 	/* used to find placement for rank2rank sharing configs */
2105 	uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
2106 	/* used to find placement for rank2rank sharing configs */
2107 	uint32_t num_ranks_enabled = 0;
2108 #endif
2109 #endif
2110 
2111 	/* wr_train starts */
2112 	mrc_post_code(0x08, 0x00);
2113 
2114 	ENTERFN();
2115 
2116 #ifdef BACKUP_WDQ
2117 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
2118 		if (mrc_params->channel_enables & (1 << ch)) {
2119 			for (rk = 0; rk < NUM_RANKS; rk++) {
2120 				if (mrc_params->rank_enables & (1 << rk)) {
2121 					for (bl = 0;
2122 					     bl < NUM_BYTE_LANES / bl_divisor;
2123 					     bl++) {
2124 						set_wdq(ch, rk, bl, ddr_wdq[PLATFORM_ID]);
2125 					}
2126 				}
2127 			}
2128 		}
2129 	}
2130 #else
2131 	/* initialize "delay" */
2132 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
2133 		if (mrc_params->channel_enables & (1 << ch)) {
2134 			for (rk = 0; rk < NUM_RANKS; rk++) {
2135 				if (mrc_params->rank_enables & (1 << rk)) {
2136 					for (bl = 0;
2137 					     bl < NUM_BYTE_LANES / bl_divisor;
2138 					     bl++) {
2139 						/*
2140 						 * want to start with
2141 						 * WDQ = (WDQS - QRTR_CLK)
2142 						 * +/- QRTR_CLK
2143 						 */
2144 						temp = get_wdqs(ch, rk, bl) - QRTR_CLK;
2145 						delay[L][ch][rk][bl] = temp - QRTR_CLK;
2146 						delay[R][ch][rk][bl] = temp + QRTR_CLK;
2147 					}
2148 				}
2149 			}
2150 		}
2151 	}
2152 
2153 	/* initialize other variables */
2154 	bl_mask = byte_lane_mask(mrc_params);
2155 	address = get_addr(0, 0);
2156 
2157 #ifdef R2R_SHARING
2158 	/* need to set "final_delay[][]" elements to "0" */
2159 	memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
2160 #endif
2161 
2162 	/*
2163 	 * start algorithm on the LEFT side and train each channel/bl
2164 	 * until no failures are observed, then repeat for the RIGHT side.
2165 	 */
2166 	for (side = L; side <= R; side++) {
2167 		mrc_post_code(0x08, 0x10 + side);
2168 
2169 		/* set starting values */
2170 		for (ch = 0; ch < NUM_CHANNELS; ch++) {
2171 			if (mrc_params->channel_enables & (1 << ch)) {
2172 				for (rk = 0; rk < NUM_RANKS; rk++) {
2173 					if (mrc_params->rank_enables &
2174 						(1 << rk)) {
2175 						for (bl = 0;
2176 						     bl < NUM_BYTE_LANES / bl_divisor;
2177 						     bl++) {
2178 							set_wdq(ch, rk, bl, delay[side][ch][rk][bl]);
2179 						}
2180 					}
2181 				}
2182 			}
2183 		}
2184 
2185 		/* find passing values */
2186 		for (ch = 0; ch < NUM_CHANNELS; ch++) {
2187 			if (mrc_params->channel_enables & (1 << ch)) {
2188 				for (rk = 0; rk < NUM_RANKS; rk++) {
2189 					if (mrc_params->rank_enables &
2190 						(1 << rk)) {
2191 						/* get an address in the target channel/rank */
2192 						address = get_addr(ch, rk);
2193 
2194 						/* request HTE reconfiguration */
2195 						mrc_params->hte_setup = 1;
2196 
2197 						/* check the settings */
2198 						do {
2199 							/* result[07:00] == failing byte lane (MAX 8) */
2200 							result = check_bls_ex(mrc_params, address);
2201 							/* check for failures */
2202 							if (result & 0xff) {
2203 								/* at least 1 byte lane failed */
2204 								for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
2205 									if (result &
2206 										(bl_mask << bl)) {
2207 										if (side == L)
2208 											delay[L][ch][rk][bl] += WDQ_STEP;
2209 										else
2210 											delay[R][ch][rk][bl] -= WDQ_STEP;
2211 
2212 										/* check for algorithm failure */
2213 										if (delay[L][ch][rk][bl] != delay[R][ch][rk][bl]) {
2214 											/*
2215 											 * margin available
2216 											 * update delay setting
2217 											 */
2218 											set_wdq(ch, rk, bl,
2219 												delay[side][ch][rk][bl]);
2220 										} else {
2221 											/*
2222 											 * no margin available
2223 											 * notify the user and halt
2224 											 */
2225 											training_message(ch, rk, bl);
2226 											mrc_post_code(0xee, 0x80 + side);
2227 										}
2228 									}
2229 								}
2230 							}
2231 						/* stop when all byte lanes pass */
2232 						} while (result & 0xff);
2233 					}
2234 				}
2235 			}
2236 		}
2237 	}
2238 
2239 	/* program WDQ to the middle of passing window */
2240 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
2241 		if (mrc_params->channel_enables & (1 << ch)) {
2242 			for (rk = 0; rk < NUM_RANKS; rk++) {
2243 				if (mrc_params->rank_enables & (1 << rk)) {
2244 #ifdef R2R_SHARING
2245 					/* increment "num_ranks_enabled" */
2246 					num_ranks_enabled++;
2247 #endif
2248 					for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
2249 						DPF(D_INFO,
2250 						    "WDQ eye rank%d lane%d : %d-%d\n",
2251 						    rk, bl,
2252 						    delay[L][ch][rk][bl],
2253 						    delay[R][ch][rk][bl]);
2254 
2255 						temp = (delay[R][ch][rk][bl] + delay[L][ch][rk][bl]) / 2;
2256 
2257 #ifdef R2R_SHARING
2258 						final_delay[ch][bl] += temp;
2259 						set_wdq(ch, rk, bl,
2260 							final_delay[ch][bl] / num_ranks_enabled);
2261 #else
2262 						set_wdq(ch, rk, bl, temp);
2263 #endif
2264 					}
2265 				}
2266 			}
2267 		}
2268 	}
2269 #endif
2270 
2271 	LEAVEFN();
2272 }
2273 
2274 /*
2275  * This function will store relevant timing data
2276  *
2277  * This data will be used on subsequent boots to speed up boot times
2278  * and is required for Suspend To RAM capabilities.
2279  */
2280 void store_timings(struct mrc_params *mrc_params)
2281 {
2282 	uint8_t ch, rk, bl;
2283 	struct mrc_timings *mt = &mrc_params->timings;
2284 
2285 	for (ch = 0; ch < NUM_CHANNELS; ch++) {
2286 		for (rk = 0; rk < NUM_RANKS; rk++) {
2287 			for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
2288 				mt->rcvn[ch][rk][bl] = get_rcvn(ch, rk, bl);
2289 				mt->rdqs[ch][rk][bl] = get_rdqs(ch, rk, bl);
2290 				mt->wdqs[ch][rk][bl] = get_wdqs(ch, rk, bl);
2291 				mt->wdq[ch][rk][bl] = get_wdq(ch, rk, bl);
2292 
2293 				if (rk == 0)
2294 					mt->vref[ch][bl] = get_vref(ch, bl);
2295 			}
2296 
2297 			mt->wctl[ch][rk] = get_wctl(ch, rk);
2298 		}
2299 
2300 		mt->wcmd[ch] = get_wcmd(ch);
2301 	}
2302 
2303 	/* need to save for a case of changing frequency after warm reset */
2304 	mt->ddr_speed = mrc_params->ddr_speed;
2305 }
2306 
2307 /*
2308  * The purpose of this function is to ensure the SEC comes out of reset
2309  * and IA initiates the SEC enabling Memory Scrambling.
2310  */
2311 void enable_scrambling(struct mrc_params *mrc_params)
2312 {
2313 	uint32_t lfsr = 0;
2314 	uint8_t i;
2315 
2316 	if (mrc_params->scrambling_enables == 0)
2317 		return;
2318 
2319 	ENTERFN();
2320 
2321 	/* 32 bit seed is always stored in BIOS NVM */
2322 	lfsr = mrc_params->timings.scrambler_seed;
2323 
2324 	if (mrc_params->boot_mode == BM_COLD) {
2325 		/*
2326 		 * factory value is 0 and in first boot,
2327 		 * a clock based seed is loaded.
2328 		 */
2329 		if (lfsr == 0) {
2330 			/*
2331 			 * get seed from system clock
2332 			 * and make sure it is not all 1's
2333 			 */
2334 			lfsr = rdtsc() & 0x0fffffff;
2335 		} else {
2336 			/*
2337 			 * Need to replace scrambler
2338 			 *
2339 			 * get next 32bit LFSR 16 times which is the last
2340 			 * part of the previous scrambler vector
2341 			 */
2342 			for (i = 0; i < 16; i++)
2343 				lfsr32(&lfsr);
2344 		}
2345 
2346 		/* save new seed */
2347 		mrc_params->timings.scrambler_seed = lfsr;
2348 	}
2349 
2350 	/*
2351 	 * In warm boot or S3 exit, we have the previous seed.
2352 	 * In cold boot, we have the last 32bit LFSR which is the new seed.
2353 	 */
2354 	lfsr32(&lfsr);	/* shift to next value */
2355 	msg_port_write(MEM_CTLR, SCRMSEED, (lfsr & 0x0003ffff));
2356 
2357 	for (i = 0; i < 2; i++)
2358 		msg_port_write(MEM_CTLR, SCRMLO + i, (lfsr & 0xaaaaaaaa));
2359 
2360 	LEAVEFN();
2361 }
2362 
2363 /*
2364  * Configure MCU Power Management Control Register
2365  * and Scheduler Control Register
2366  */
2367 void prog_ddr_control(struct mrc_params *mrc_params)
2368 {
2369 	u32 dsch;
2370 	u32 dpmc0;
2371 
2372 	ENTERFN();
2373 
2374 	dsch = msg_port_read(MEM_CTLR, DSCH);
2375 	dsch &= ~(DSCH_OOODIS | DSCH_OOOST3DIS | DSCH_NEWBYPDIS);
2376 	msg_port_write(MEM_CTLR, DSCH, dsch);
2377 
2378 	dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
2379 	dpmc0 &= ~DPMC0_DISPWRDN;
2380 	dpmc0 |= (mrc_params->power_down_disable << 25);
2381 	dpmc0 &= ~DPMC0_CLKGTDIS;
2382 	dpmc0 &= ~DPMC0_PCLSTO_MASK;
2383 	dpmc0 |= (4 << 16);
2384 	dpmc0 |= DPMC0_PREAPWDEN;
2385 	msg_port_write(MEM_CTLR, DPMC0, dpmc0);
2386 
2387 	/* CMDTRIST = 2h - CMD/ADDR are tristated when no valid command */
2388 	mrc_write_mask(MEM_CTLR, DPMC1, 0x20, 0x30);
2389 
2390 	LEAVEFN();
2391 }
2392 
2393 /*
2394  * After training complete configure MCU Rank Population Register
2395  * specifying: ranks enabled, device width, density, address mode
2396  */
2397 void prog_dra_drb(struct mrc_params *mrc_params)
2398 {
2399 	u32 drp;
2400 	u32 dco;
2401 	u8 density = mrc_params->params.density;
2402 
2403 	ENTERFN();
2404 
2405 	dco = msg_port_read(MEM_CTLR, DCO);
2406 	dco &= ~DCO_IC;
2407 	msg_port_write(MEM_CTLR, DCO, dco);
2408 
2409 	drp = 0;
2410 	if (mrc_params->rank_enables & 1)
2411 		drp |= DRP_RKEN0;
2412 	if (mrc_params->rank_enables & 2)
2413 		drp |= DRP_RKEN1;
2414 	if (mrc_params->dram_width == X16) {
2415 		drp |= (1 << 4);
2416 		drp |= (1 << 9);
2417 	}
2418 
2419 	/*
2420 	 * Density encoding in struct dram_params: 0=512Mb, 1=Gb, 2=2Gb, 3=4Gb
2421 	 * has to be mapped RANKDENSx encoding (0=1Gb)
2422 	 */
2423 	if (density == 0)
2424 		density = 4;
2425 
2426 	drp |= ((density - 1) << 6);
2427 	drp |= ((density - 1) << 11);
2428 
2429 	/* Address mode can be overwritten if ECC enabled */
2430 	drp |= (mrc_params->address_mode << 14);
2431 
2432 	msg_port_write(MEM_CTLR, DRP, drp);
2433 
2434 	dco &= ~DCO_PMICTL;
2435 	dco |= DCO_IC;
2436 	msg_port_write(MEM_CTLR, DCO, dco);
2437 
2438 	LEAVEFN();
2439 }
2440 
2441 /* Send DRAM wake command */
2442 void perform_wake(struct mrc_params *mrc_params)
2443 {
2444 	ENTERFN();
2445 
2446 	dram_wake_command();
2447 
2448 	LEAVEFN();
2449 }
2450 
2451 /*
2452  * Configure refresh rate and short ZQ calibration interval
2453  * Activate dynamic self refresh
2454  */
2455 void change_refresh_period(struct mrc_params *mrc_params)
2456 {
2457 	u32 drfc;
2458 	u32 dcal;
2459 	u32 dpmc0;
2460 
2461 	ENTERFN();
2462 
2463 	drfc = msg_port_read(MEM_CTLR, DRFC);
2464 	drfc &= ~DRFC_TREFI_MASK;
2465 	drfc |= (mrc_params->refresh_rate << 12);
2466 	drfc |= DRFC_REFDBTCLR;
2467 	msg_port_write(MEM_CTLR, DRFC, drfc);
2468 
2469 	dcal = msg_port_read(MEM_CTLR, DCAL);
2470 	dcal &= ~DCAL_ZQCINT_MASK;
2471 	dcal |= (3 << 8);	/* 63ms */
2472 	msg_port_write(MEM_CTLR, DCAL, dcal);
2473 
2474 	dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
2475 	dpmc0 |= (DPMC0_DYNSREN | DPMC0_ENPHYCLKGATE);
2476 	msg_port_write(MEM_CTLR, DPMC0, dpmc0);
2477 
2478 	LEAVEFN();
2479 }
2480 
2481 /*
2482  * Configure DDRPHY for Auto-Refresh, Periodic Compensations,
2483  * Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down
2484  */
2485 void set_auto_refresh(struct mrc_params *mrc_params)
2486 {
2487 	uint32_t channel;
2488 	uint32_t rank;
2489 	uint32_t bl;
2490 	uint32_t bl_divisor = 1;
2491 	uint32_t temp;
2492 
2493 	ENTERFN();
2494 
2495 	/*
2496 	 * Enable Auto-Refresh, Periodic Compensations, Dynamic Diff-Amp,
2497 	 * ZQSPERIOD, Auto-Precharge, CKE Power-Down
2498 	 */
2499 	for (channel = 0; channel < NUM_CHANNELS; channel++) {
2500 		if (mrc_params->channel_enables & (1 << channel)) {
2501 			/* Enable Periodic RCOMPS */
2502 			mrc_alt_write_mask(DDRPHY, CMPCTRL, 2, 2);
2503 
2504 			/* Enable Dynamic DiffAmp & Set Read ODT Value */
2505 			switch (mrc_params->rd_odt_value) {
2506 			case 0:
2507 				temp = 0x3f;	/* OFF */
2508 				break;
2509 			default:
2510 				temp = 0x00;	/* Auto */
2511 				break;
2512 			}
2513 
2514 			for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
2515 				/* Override: DIFFAMP, ODT */
2516 				mrc_alt_write_mask(DDRPHY,
2517 					B0OVRCTL + bl * DDRIODQ_BL_OFFSET +
2518 					channel * DDRIODQ_CH_OFFSET,
2519 					temp << 10,
2520 					0x003ffc00);
2521 
2522 				/* Override: DIFFAMP, ODT */
2523 				mrc_alt_write_mask(DDRPHY,
2524 					B1OVRCTL + bl * DDRIODQ_BL_OFFSET +
2525 					channel * DDRIODQ_CH_OFFSET,
2526 					temp << 10,
2527 					0x003ffc00);
2528 			}
2529 
2530 			/* Issue ZQCS command */
2531 			for (rank = 0; rank < NUM_RANKS; rank++) {
2532 				if (mrc_params->rank_enables & (1 << rank))
2533 					dram_init_command(DCMD_ZQCS(rank));
2534 			}
2535 		}
2536 	}
2537 
2538 	clear_pointers();
2539 
2540 	LEAVEFN();
2541 }
2542 
2543 /*
2544  * Depending on configuration enables ECC support
2545  *
2546  * Available memory size is decreased, and updated with 0s
2547  * in order to clear error status. Address mode 2 forced.
2548  */
2549 void ecc_enable(struct mrc_params *mrc_params)
2550 {
2551 	u32 drp;
2552 	u32 dsch;
2553 	u32 ecc_ctrl;
2554 
2555 	if (mrc_params->ecc_enables == 0)
2556 		return;
2557 
2558 	ENTERFN();
2559 
2560 	/* Configuration required in ECC mode */
2561 	drp = msg_port_read(MEM_CTLR, DRP);
2562 	drp &= ~DRP_ADDRMAP_MASK;
2563 	drp |= DRP_ADDRMAP_MAP1;
2564 	drp |= DRP_PRI64BSPLITEN;
2565 	msg_port_write(MEM_CTLR, DRP, drp);
2566 
2567 	/* Disable new request bypass */
2568 	dsch = msg_port_read(MEM_CTLR, DSCH);
2569 	dsch |= DSCH_NEWBYPDIS;
2570 	msg_port_write(MEM_CTLR, DSCH, dsch);
2571 
2572 	/* Enable ECC */
2573 	ecc_ctrl = (DECCCTRL_SBEEN | DECCCTRL_DBEEN | DECCCTRL_ENCBGEN);
2574 	msg_port_write(MEM_CTLR, DECCCTRL, ecc_ctrl);
2575 
2576 	/* Assume 8 bank memory, one bank is gone for ECC */
2577 	mrc_params->mem_size -= mrc_params->mem_size / 8;
2578 
2579 	/* For S3 resume memory content has to be preserved */
2580 	if (mrc_params->boot_mode != BM_S3) {
2581 		select_hte();
2582 		hte_mem_init(mrc_params, MRC_MEM_INIT);
2583 		select_mem_mgr();
2584 	}
2585 
2586 	LEAVEFN();
2587 }
2588 
2589 /*
2590  * Execute memory test
2591  * if error detected it is indicated in mrc_params->status
2592  */
2593 void memory_test(struct mrc_params *mrc_params)
2594 {
2595 	uint32_t result = 0;
2596 
2597 	ENTERFN();
2598 
2599 	select_hte();
2600 	result = hte_mem_init(mrc_params, MRC_MEM_TEST);
2601 	select_mem_mgr();
2602 
2603 	DPF(D_INFO, "Memory test result %x\n", result);
2604 	mrc_params->status = ((result == 0) ? MRC_SUCCESS : MRC_E_MEMTEST);
2605 	LEAVEFN();
2606 }
2607 
2608 /* Lock MCU registers at the end of initialization sequence */
2609 void lock_registers(struct mrc_params *mrc_params)
2610 {
2611 	u32 dco;
2612 
2613 	ENTERFN();
2614 
2615 	dco = msg_port_read(MEM_CTLR, DCO);
2616 	dco &= ~(DCO_PMICTL | DCO_PMIDIS);
2617 	dco |= (DCO_DRPLOCK | DCO_CPGCLOCK);
2618 	msg_port_write(MEM_CTLR, DCO, dco);
2619 
2620 	LEAVEFN();
2621 }
2622