xref: /openbmc/u-boot/drivers/ddr/altera/sequencer.c (revision 23e8ea901a87e0a6296ecf135b3b71672d832676)
1 /*
2  * Copyright Altera Corporation (C) 2012-2015
3  *
4  * SPDX-License-Identifier:    BSD-3-Clause
5  */
6 
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/arch/sdram.h>
10 #include <errno.h>
11 #include "sequencer.h"
12 #include "sequencer_auto.h"
13 #include "sequencer_auto_ac_init.h"
14 #include "sequencer_auto_inst_init.h"
15 #include "sequencer_defines.h"
16 
17 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
18 	(struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
19 
20 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
21 	(struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
22 
23 static struct socfpga_sdr_reg_file *sdr_reg_file =
24 	(struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
25 
26 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
27 	(struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
28 
29 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
30 	(struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
31 
32 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
33 	(struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
34 
35 static struct socfpga_data_mgr *data_mgr =
36 	(struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
37 
38 static struct socfpga_sdr_ctrl *sdr_ctrl =
39 	(struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
40 
41 #define DELTA_D		1
42 
43 /*
44  * In order to reduce ROM size, most of the selectable calibration steps are
45  * decided at compile time based on the user's calibration mode selection,
46  * as captured by the STATIC_CALIB_STEPS selection below.
47  *
48  * However, to support simulation-time selection of fast simulation mode, where
49  * we skip everything except the bare minimum, we need a few of the steps to
50  * be dynamic.  In those cases, we either use the DYNAMIC_CALIB_STEPS for the
51  * check, which is based on the rtl-supplied value, or we dynamically compute
52  * the value to use based on the dynamically-chosen calibration mode
53  */
54 
55 #define DLEVEL 0
56 #define STATIC_IN_RTL_SIM 0
57 #define STATIC_SKIP_DELAY_LOOPS 0
58 
59 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
60 	STATIC_SKIP_DELAY_LOOPS)
61 
62 /* calibration steps requested by the rtl */
63 uint16_t dyn_calib_steps;
64 
65 /*
66  * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
67  * instead of static, we use boolean logic to select between
68  * non-skip and skip values
69  *
70  * The mask is set to include all bits when not-skipping, but is
71  * zero when skipping
72  */
73 
74 uint16_t skip_delay_mask;	/* mask off bits when skipping/not-skipping */
75 
76 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
77 	((non_skip_value) & skip_delay_mask)
78 
79 struct gbl_type *gbl;
80 struct param_type *param;
81 uint32_t curr_shadow_reg;
82 
83 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
84 	uint32_t write_group, uint32_t use_dm,
85 	uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
86 
87 static void set_failing_group_stage(uint32_t group, uint32_t stage,
88 	uint32_t substage)
89 {
90 	/*
91 	 * Only set the global stage if there was not been any other
92 	 * failing group
93 	 */
94 	if (gbl->error_stage == CAL_STAGE_NIL)	{
95 		gbl->error_substage = substage;
96 		gbl->error_stage = stage;
97 		gbl->error_group = group;
98 	}
99 }
100 
101 static void reg_file_set_group(u16 set_group)
102 {
103 	clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
104 }
105 
106 static void reg_file_set_stage(u8 set_stage)
107 {
108 	clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
109 }
110 
111 static void reg_file_set_sub_stage(u8 set_sub_stage)
112 {
113 	set_sub_stage &= 0xff;
114 	clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
115 }
116 
117 /**
118  * phy_mgr_initialize() - Initialize PHY Manager
119  *
120  * Initialize PHY Manager.
121  */
122 static void phy_mgr_initialize(void)
123 {
124 	u32 ratio;
125 
126 	debug("%s:%d\n", __func__, __LINE__);
127 	/* Calibration has control over path to memory */
128 	/*
129 	 * In Hard PHY this is a 2-bit control:
130 	 * 0: AFI Mux Select
131 	 * 1: DDIO Mux Select
132 	 */
133 	writel(0x3, &phy_mgr_cfg->mux_sel);
134 
135 	/* USER memory clock is not stable we begin initialization  */
136 	writel(0, &phy_mgr_cfg->reset_mem_stbl);
137 
138 	/* USER calibration status all set to zero */
139 	writel(0, &phy_mgr_cfg->cal_status);
140 
141 	writel(0, &phy_mgr_cfg->cal_debug_info);
142 
143 	/* Init params only if we do NOT skip calibration. */
144 	if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
145 		return;
146 
147 	ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
148 		RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
149 	param->read_correct_mask_vg = (1 << ratio) - 1;
150 	param->write_correct_mask_vg = (1 << ratio) - 1;
151 	param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
152 	param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
153 	ratio = RW_MGR_MEM_DATA_WIDTH /
154 		RW_MGR_MEM_DATA_MASK_WIDTH;
155 	param->dm_correct_mask = (1 << ratio) - 1;
156 }
157 
158 /**
159  * set_rank_and_odt_mask() - Set Rank and ODT mask
160  * @rank:	Rank mask
161  * @odt_mode:	ODT mode, OFF or READ_WRITE
162  *
163  * Set Rank and ODT mask (On-Die Termination).
164  */
165 static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
166 {
167 	u32 odt_mask_0 = 0;
168 	u32 odt_mask_1 = 0;
169 	u32 cs_and_odt_mask;
170 
171 	if (odt_mode == RW_MGR_ODT_MODE_OFF) {
172 		odt_mask_0 = 0x0;
173 		odt_mask_1 = 0x0;
174 	} else {	/* RW_MGR_ODT_MODE_READ_WRITE */
175 		switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
176 		case 1:	/* 1 Rank */
177 			/* Read: ODT = 0 ; Write: ODT = 1 */
178 			odt_mask_0 = 0x0;
179 			odt_mask_1 = 0x1;
180 			break;
181 		case 2:	/* 2 Ranks */
182 			if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
183 				/*
184 				 * - Dual-Slot , Single-Rank (1 CS per DIMM)
185 				 *   OR
186 				 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
187 				 *
188 				 * Since MEM_NUMBER_OF_RANKS is 2, they
189 				 * are both single rank with 2 CS each
190 				 * (special for RDIMM).
191 				 *
192 				 * Read: Turn on ODT on the opposite rank
193 				 * Write: Turn on ODT on all ranks
194 				 */
195 				odt_mask_0 = 0x3 & ~(1 << rank);
196 				odt_mask_1 = 0x3;
197 			} else {
198 				/*
199 				 * - Single-Slot , Dual-Rank (2 CS per DIMM)
200 				 *
201 				 * Read: Turn on ODT off on all ranks
202 				 * Write: Turn on ODT on active rank
203 				 */
204 				odt_mask_0 = 0x0;
205 				odt_mask_1 = 0x3 & (1 << rank);
206 			}
207 			break;
208 		case 4:	/* 4 Ranks */
209 			/* Read:
210 			 * ----------+-----------------------+
211 			 *           |         ODT           |
212 			 * Read From +-----------------------+
213 			 *   Rank    |  3  |  2  |  1  |  0  |
214 			 * ----------+-----+-----+-----+-----+
215 			 *     0     |  0  |  1  |  0  |  0  |
216 			 *     1     |  1  |  0  |  0  |  0  |
217 			 *     2     |  0  |  0  |  0  |  1  |
218 			 *     3     |  0  |  0  |  1  |  0  |
219 			 * ----------+-----+-----+-----+-----+
220 			 *
221 			 * Write:
222 			 * ----------+-----------------------+
223 			 *           |         ODT           |
224 			 * Write To  +-----------------------+
225 			 *   Rank    |  3  |  2  |  1  |  0  |
226 			 * ----------+-----+-----+-----+-----+
227 			 *     0     |  0  |  1  |  0  |  1  |
228 			 *     1     |  1  |  0  |  1  |  0  |
229 			 *     2     |  0  |  1  |  0  |  1  |
230 			 *     3     |  1  |  0  |  1  |  0  |
231 			 * ----------+-----+-----+-----+-----+
232 			 */
233 			switch (rank) {
234 			case 0:
235 				odt_mask_0 = 0x4;
236 				odt_mask_1 = 0x5;
237 				break;
238 			case 1:
239 				odt_mask_0 = 0x8;
240 				odt_mask_1 = 0xA;
241 				break;
242 			case 2:
243 				odt_mask_0 = 0x1;
244 				odt_mask_1 = 0x5;
245 				break;
246 			case 3:
247 				odt_mask_0 = 0x2;
248 				odt_mask_1 = 0xA;
249 				break;
250 			}
251 			break;
252 		}
253 	}
254 
255 	cs_and_odt_mask = (0xFF & ~(1 << rank)) |
256 			  ((0xFF & odt_mask_0) << 8) |
257 			  ((0xFF & odt_mask_1) << 16);
258 	writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
259 				RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
260 }
261 
262 /**
263  * scc_mgr_set() - Set SCC Manager register
264  * @off:	Base offset in SCC Manager space
265  * @grp:	Read/Write group
266  * @val:	Value to be set
267  *
268  * This function sets the SCC Manager (Scan Chain Control Manager) register.
269  */
270 static void scc_mgr_set(u32 off, u32 grp, u32 val)
271 {
272 	writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
273 }
274 
275 /**
276  * scc_mgr_initialize() - Initialize SCC Manager registers
277  *
278  * Initialize SCC Manager registers.
279  */
280 static void scc_mgr_initialize(void)
281 {
282 	/*
283 	 * Clear register file for HPS. 16 (2^4) is the size of the
284 	 * full register file in the scc mgr:
285 	 *	RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
286 	 *                             MEM_IF_READ_DQS_WIDTH - 1);
287 	 */
288 	int i;
289 
290 	for (i = 0; i < 16; i++) {
291 		debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
292 			   __func__, __LINE__, i);
293 		scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
294 	}
295 }
296 
297 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
298 {
299 	scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
300 }
301 
302 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
303 {
304 	scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
305 }
306 
307 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
308 {
309 	scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
310 }
311 
312 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
313 {
314 	scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
315 }
316 
317 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay)
318 {
319 	scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
320 		    delay);
321 }
322 
323 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
324 {
325 	scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
326 }
327 
328 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
329 {
330 	scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
331 }
332 
333 static void scc_mgr_set_dqs_out1_delay(uint32_t delay)
334 {
335 	scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
336 		    delay);
337 }
338 
339 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
340 {
341 	scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
342 		    RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
343 		    delay);
344 }
345 
346 /* load up dqs config settings */
347 static void scc_mgr_load_dqs(uint32_t dqs)
348 {
349 	writel(dqs, &sdr_scc_mgr->dqs_ena);
350 }
351 
352 /* load up dqs io config settings */
353 static void scc_mgr_load_dqs_io(void)
354 {
355 	writel(0, &sdr_scc_mgr->dqs_io_ena);
356 }
357 
358 /* load up dq config settings */
359 static void scc_mgr_load_dq(uint32_t dq_in_group)
360 {
361 	writel(dq_in_group, &sdr_scc_mgr->dq_ena);
362 }
363 
364 /* load up dm config settings */
365 static void scc_mgr_load_dm(uint32_t dm)
366 {
367 	writel(dm, &sdr_scc_mgr->dm_ena);
368 }
369 
370 /**
371  * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
372  * @off:	Base offset in SCC Manager space
373  * @grp:	Read/Write group
374  * @val:	Value to be set
375  * @update:	If non-zero, trigger SCC Manager update for all ranks
376  *
377  * This function sets the SCC Manager (Scan Chain Control Manager) register
378  * and optionally triggers the SCC update for all ranks.
379  */
380 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
381 				  const int update)
382 {
383 	u32 r;
384 
385 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
386 	     r += NUM_RANKS_PER_SHADOW_REG) {
387 		scc_mgr_set(off, grp, val);
388 
389 		if (update || (r == 0)) {
390 			writel(grp, &sdr_scc_mgr->dqs_ena);
391 			writel(0, &sdr_scc_mgr->update);
392 		}
393 	}
394 }
395 
396 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
397 {
398 	/*
399 	 * USER although the h/w doesn't support different phases per
400 	 * shadow register, for simplicity our scc manager modeling
401 	 * keeps different phase settings per shadow reg, and it's
402 	 * important for us to keep them in sync to match h/w.
403 	 * for efficiency, the scan chain update should occur only
404 	 * once to sr0.
405 	 */
406 	scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
407 			      read_group, phase, 0);
408 }
409 
410 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
411 						     uint32_t phase)
412 {
413 	/*
414 	 * USER although the h/w doesn't support different phases per
415 	 * shadow register, for simplicity our scc manager modeling
416 	 * keeps different phase settings per shadow reg, and it's
417 	 * important for us to keep them in sync to match h/w.
418 	 * for efficiency, the scan chain update should occur only
419 	 * once to sr0.
420 	 */
421 	scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
422 			      write_group, phase, 0);
423 }
424 
425 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
426 					       uint32_t delay)
427 {
428 	/*
429 	 * In shadow register mode, the T11 settings are stored in
430 	 * registers in the core, which are updated by the DQS_ENA
431 	 * signals. Not issuing the SCC_MGR_UPD command allows us to
432 	 * save lots of rank switching overhead, by calling
433 	 * select_shadow_regs_for_update with update_scan_chains
434 	 * set to 0.
435 	 */
436 	scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
437 			      read_group, delay, 1);
438 	writel(0, &sdr_scc_mgr->update);
439 }
440 
441 /**
442  * scc_mgr_set_oct_out1_delay() - Set OCT output delay
443  * @write_group:	Write group
444  * @delay:		Delay value
445  *
446  * This function sets the OCT output delay in SCC manager.
447  */
448 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
449 {
450 	const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
451 			  RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
452 	const int base = write_group * ratio;
453 	int i;
454 	/*
455 	 * Load the setting in the SCC manager
456 	 * Although OCT affects only write data, the OCT delay is controlled
457 	 * by the DQS logic block which is instantiated once per read group.
458 	 * For protocols where a write group consists of multiple read groups,
459 	 * the setting must be set multiple times.
460 	 */
461 	for (i = 0; i < ratio; i++)
462 		scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
463 }
464 
465 /**
466  * scc_mgr_set_hhp_extras() - Set HHP extras.
467  *
468  * Load the fixed setting in the SCC manager HHP extras.
469  */
470 static void scc_mgr_set_hhp_extras(void)
471 {
472 	/*
473 	 * Load the fixed setting in the SCC manager
474 	 * bits: 0:0 = 1'b1	- DQS bypass
475 	 * bits: 1:1 = 1'b1	- DQ bypass
476 	 * bits: 4:2 = 3'b001	- rfifo_mode
477 	 * bits: 6:5 = 2'b01	- rfifo clock_select
478 	 * bits: 7:7 = 1'b0	- separate gating from ungating setting
479 	 * bits: 8:8 = 1'b0	- separate OE from Output delay setting
480 	 */
481 	const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
482 			  (1 << 2) | (1 << 1) | (1 << 0);
483 	const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
484 			 SCC_MGR_HHP_GLOBALS_OFFSET |
485 			 SCC_MGR_HHP_EXTRAS_OFFSET;
486 
487 	debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
488 		   __func__, __LINE__);
489 	writel(value, addr);
490 	debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
491 		   __func__, __LINE__);
492 }
493 
494 /**
495  * scc_mgr_zero_all() - Zero all DQS config
496  *
497  * Zero all DQS config.
498  */
499 static void scc_mgr_zero_all(void)
500 {
501 	int i, r;
502 
503 	/*
504 	 * USER Zero all DQS config settings, across all groups and all
505 	 * shadow registers
506 	 */
507 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
508 	     r += NUM_RANKS_PER_SHADOW_REG) {
509 		for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
510 			/*
511 			 * The phases actually don't exist on a per-rank basis,
512 			 * but there's no harm updating them several times, so
513 			 * let's keep the code simple.
514 			 */
515 			scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
516 			scc_mgr_set_dqs_en_phase(i, 0);
517 			scc_mgr_set_dqs_en_delay(i, 0);
518 		}
519 
520 		for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
521 			scc_mgr_set_dqdqs_output_phase(i, 0);
522 			/* Arria V/Cyclone V don't have out2. */
523 			scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
524 		}
525 	}
526 
527 	/* Multicast to all DQS group enables. */
528 	writel(0xff, &sdr_scc_mgr->dqs_ena);
529 	writel(0, &sdr_scc_mgr->update);
530 }
531 
532 /**
533  * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
534  * @write_group:	Write group
535  *
536  * Set bypass mode and trigger SCC update.
537  */
538 static void scc_set_bypass_mode(const u32 write_group)
539 {
540 	/* Multicast to all DQ enables. */
541 	writel(0xff, &sdr_scc_mgr->dq_ena);
542 	writel(0xff, &sdr_scc_mgr->dm_ena);
543 
544 	/* Update current DQS IO enable. */
545 	writel(0, &sdr_scc_mgr->dqs_io_ena);
546 
547 	/* Update the DQS logic. */
548 	writel(write_group, &sdr_scc_mgr->dqs_ena);
549 
550 	/* Hit update. */
551 	writel(0, &sdr_scc_mgr->update);
552 }
553 
554 /**
555  * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
556  * @write_group:	Write group
557  *
558  * Load DQS settings for Write Group, do not trigger SCC update.
559  */
560 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
561 {
562 	const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
563 			  RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
564 	const int base = write_group * ratio;
565 	int i;
566 	/*
567 	 * Load the setting in the SCC manager
568 	 * Although OCT affects only write data, the OCT delay is controlled
569 	 * by the DQS logic block which is instantiated once per read group.
570 	 * For protocols where a write group consists of multiple read groups,
571 	 * the setting must be set multiple times.
572 	 */
573 	for (i = 0; i < ratio; i++)
574 		writel(base + i, &sdr_scc_mgr->dqs_ena);
575 }
576 
577 /**
578  * scc_mgr_zero_group() - Zero all configs for a group
579  *
580  * Zero DQ, DM, DQS and OCT configs for a group.
581  */
582 static void scc_mgr_zero_group(const u32 write_group, const int out_only)
583 {
584 	int i, r;
585 
586 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
587 	     r += NUM_RANKS_PER_SHADOW_REG) {
588 		/* Zero all DQ config settings. */
589 		for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
590 			scc_mgr_set_dq_out1_delay(i, 0);
591 			if (!out_only)
592 				scc_mgr_set_dq_in_delay(i, 0);
593 		}
594 
595 		/* Multicast to all DQ enables. */
596 		writel(0xff, &sdr_scc_mgr->dq_ena);
597 
598 		/* Zero all DM config settings. */
599 		for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
600 			scc_mgr_set_dm_out1_delay(i, 0);
601 
602 		/* Multicast to all DM enables. */
603 		writel(0xff, &sdr_scc_mgr->dm_ena);
604 
605 		/* Zero all DQS IO settings. */
606 		if (!out_only)
607 			scc_mgr_set_dqs_io_in_delay(0);
608 
609 		/* Arria V/Cyclone V don't have out2. */
610 		scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
611 		scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
612 		scc_mgr_load_dqs_for_write_group(write_group);
613 
614 		/* Multicast to all DQS IO enables (only 1 in total). */
615 		writel(0, &sdr_scc_mgr->dqs_io_ena);
616 
617 		/* Hit update to zero everything. */
618 		writel(0, &sdr_scc_mgr->update);
619 	}
620 }
621 
622 /*
623  * apply and load a particular input delay for the DQ pins in a group
624  * group_bgn is the index of the first dq pin (in the write group)
625  */
626 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay)
627 {
628 	uint32_t i, p;
629 
630 	for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
631 		scc_mgr_set_dq_in_delay(p, delay);
632 		scc_mgr_load_dq(p);
633 	}
634 }
635 
636 /**
637  * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
638  * @delay:		Delay value
639  *
640  * Apply and load a particular output delay for the DQ pins in a group.
641  */
642 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
643 {
644 	int i;
645 
646 	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
647 		scc_mgr_set_dq_out1_delay(i, delay);
648 		scc_mgr_load_dq(i);
649 	}
650 }
651 
652 /* apply and load a particular output delay for the DM pins in a group */
653 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
654 {
655 	uint32_t i;
656 
657 	for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
658 		scc_mgr_set_dm_out1_delay(i, delay1);
659 		scc_mgr_load_dm(i);
660 	}
661 }
662 
663 
664 /* apply and load delay on both DQS and OCT out1 */
665 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
666 						    uint32_t delay)
667 {
668 	scc_mgr_set_dqs_out1_delay(delay);
669 	scc_mgr_load_dqs_io();
670 
671 	scc_mgr_set_oct_out1_delay(write_group, delay);
672 	scc_mgr_load_dqs_for_write_group(write_group);
673 }
674 
675 /**
676  * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
677  * @write_group:	Write group
678  * @delay:		Delay value
679  *
680  * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
681  */
682 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
683 						  const u32 delay)
684 {
685 	u32 i, new_delay;
686 
687 	/* DQ shift */
688 	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
689 		scc_mgr_load_dq(i);
690 
691 	/* DM shift */
692 	for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
693 		scc_mgr_load_dm(i);
694 
695 	/* DQS shift */
696 	new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
697 	if (new_delay > IO_IO_OUT2_DELAY_MAX) {
698 		debug_cond(DLEVEL == 1,
699 			   "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
700 			   __func__, __LINE__, write_group, delay, new_delay,
701 			   IO_IO_OUT2_DELAY_MAX,
702 			   new_delay - IO_IO_OUT2_DELAY_MAX);
703 		new_delay -= IO_IO_OUT2_DELAY_MAX;
704 		scc_mgr_set_dqs_out1_delay(new_delay);
705 	}
706 
707 	scc_mgr_load_dqs_io();
708 
709 	/* OCT shift */
710 	new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
711 	if (new_delay > IO_IO_OUT2_DELAY_MAX) {
712 		debug_cond(DLEVEL == 1,
713 			   "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
714 			   __func__, __LINE__, write_group, delay,
715 			   new_delay, IO_IO_OUT2_DELAY_MAX,
716 			   new_delay - IO_IO_OUT2_DELAY_MAX);
717 		new_delay -= IO_IO_OUT2_DELAY_MAX;
718 		scc_mgr_set_oct_out1_delay(write_group, new_delay);
719 	}
720 
721 	scc_mgr_load_dqs_for_write_group(write_group);
722 }
723 
724 /**
725  * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
726  * @write_group:	Write group
727  * @delay:		Delay value
728  *
729  * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
730  */
731 static void
732 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
733 						const u32 delay)
734 {
735 	int r;
736 
737 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
738 	     r += NUM_RANKS_PER_SHADOW_REG) {
739 		scc_mgr_apply_group_all_out_delay_add(write_group, delay);
740 		writel(0, &sdr_scc_mgr->update);
741 	}
742 }
743 
744 /**
745  * set_jump_as_return() - Return instruction optimization
746  *
747  * Optimization used to recover some slots in ddr3 inst_rom could be
748  * applied to other protocols if we wanted to
749  */
750 static void set_jump_as_return(void)
751 {
752 	/*
753 	 * To save space, we replace return with jump to special shared
754 	 * RETURN instruction so we set the counter to large value so that
755 	 * we always jump.
756 	 */
757 	writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
758 	writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
759 }
760 
761 /*
762  * should always use constants as argument to ensure all computations are
763  * performed at compile time
764  */
765 static void delay_for_n_mem_clocks(const uint32_t clocks)
766 {
767 	uint32_t afi_clocks;
768 	uint8_t inner = 0;
769 	uint8_t outer = 0;
770 	uint16_t c_loop = 0;
771 
772 	debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
773 
774 
775 	afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
776 	/* scale (rounding up) to get afi clocks */
777 
778 	/*
779 	 * Note, we don't bother accounting for being off a little bit
780 	 * because of a few extra instructions in outer loops
781 	 * Note, the loops have a test at the end, and do the test before
782 	 * the decrement, and so always perform the loop
783 	 * 1 time more than the counter value
784 	 */
785 	if (afi_clocks == 0) {
786 		;
787 	} else if (afi_clocks <= 0x100) {
788 		inner = afi_clocks-1;
789 		outer = 0;
790 		c_loop = 0;
791 	} else if (afi_clocks <= 0x10000) {
792 		inner = 0xff;
793 		outer = (afi_clocks-1) >> 8;
794 		c_loop = 0;
795 	} else {
796 		inner = 0xff;
797 		outer = 0xff;
798 		c_loop = (afi_clocks-1) >> 16;
799 	}
800 
801 	/*
802 	 * rom instructions are structured as follows:
803 	 *
804 	 *    IDLE_LOOP2: jnz cntr0, TARGET_A
805 	 *    IDLE_LOOP1: jnz cntr1, TARGET_B
806 	 *                return
807 	 *
808 	 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
809 	 * TARGET_B is set to IDLE_LOOP2 as well
810 	 *
811 	 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
812 	 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
813 	 *
814 	 * a little confusing, but it helps save precious space in the inst_rom
815 	 * and sequencer rom and keeps the delays more accurate and reduces
816 	 * overhead
817 	 */
818 	if (afi_clocks <= 0x100) {
819 		writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
820 			&sdr_rw_load_mgr_regs->load_cntr1);
821 
822 		writel(RW_MGR_IDLE_LOOP1,
823 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
824 
825 		writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
826 					  RW_MGR_RUN_SINGLE_GROUP_OFFSET);
827 	} else {
828 		writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
829 			&sdr_rw_load_mgr_regs->load_cntr0);
830 
831 		writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
832 			&sdr_rw_load_mgr_regs->load_cntr1);
833 
834 		writel(RW_MGR_IDLE_LOOP2,
835 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
836 
837 		writel(RW_MGR_IDLE_LOOP2,
838 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
839 
840 		/* hack to get around compiler not being smart enough */
841 		if (afi_clocks <= 0x10000) {
842 			/* only need to run once */
843 			writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
844 						  RW_MGR_RUN_SINGLE_GROUP_OFFSET);
845 		} else {
846 			do {
847 				writel(RW_MGR_IDLE_LOOP2,
848 					SDR_PHYGRP_RWMGRGRP_ADDRESS |
849 					RW_MGR_RUN_SINGLE_GROUP_OFFSET);
850 			} while (c_loop-- != 0);
851 		}
852 	}
853 	debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
854 }
855 
856 /**
857  * rw_mgr_mem_init_load_regs() - Load instruction registers
858  * @cntr0:	Counter 0 value
859  * @cntr1:	Counter 1 value
860  * @cntr2:	Counter 2 value
861  * @jump:	Jump instruction value
862  *
863  * Load instruction registers.
864  */
865 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
866 {
867 	uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
868 			   RW_MGR_RUN_SINGLE_GROUP_OFFSET;
869 
870 	/* Load counters */
871 	writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
872 	       &sdr_rw_load_mgr_regs->load_cntr0);
873 	writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
874 	       &sdr_rw_load_mgr_regs->load_cntr1);
875 	writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
876 	       &sdr_rw_load_mgr_regs->load_cntr2);
877 
878 	/* Load jump address */
879 	writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
880 	writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
881 	writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
882 
883 	/* Execute count instruction */
884 	writel(jump, grpaddr);
885 }
886 
887 /**
888  * rw_mgr_mem_load_user() - Load user calibration values
889  * @fin1:	Final instruction 1
890  * @fin2:	Final instruction 2
891  * @precharge:	If 1, precharge the banks at the end
892  *
893  * Load user calibration values and optionally precharge the banks.
894  */
895 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
896 				 const int precharge)
897 {
898 	u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
899 		      RW_MGR_RUN_SINGLE_GROUP_OFFSET;
900 	u32 r;
901 
902 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
903 		if (param->skip_ranks[r]) {
904 			/* request to skip the rank */
905 			continue;
906 		}
907 
908 		/* set rank */
909 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
910 
911 		/* precharge all banks ... */
912 		if (precharge)
913 			writel(RW_MGR_PRECHARGE_ALL, grpaddr);
914 
915 		/*
916 		 * USER Use Mirror-ed commands for odd ranks if address
917 		 * mirrorring is on
918 		 */
919 		if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
920 			set_jump_as_return();
921 			writel(RW_MGR_MRS2_MIRR, grpaddr);
922 			delay_for_n_mem_clocks(4);
923 			set_jump_as_return();
924 			writel(RW_MGR_MRS3_MIRR, grpaddr);
925 			delay_for_n_mem_clocks(4);
926 			set_jump_as_return();
927 			writel(RW_MGR_MRS1_MIRR, grpaddr);
928 			delay_for_n_mem_clocks(4);
929 			set_jump_as_return();
930 			writel(fin1, grpaddr);
931 		} else {
932 			set_jump_as_return();
933 			writel(RW_MGR_MRS2, grpaddr);
934 			delay_for_n_mem_clocks(4);
935 			set_jump_as_return();
936 			writel(RW_MGR_MRS3, grpaddr);
937 			delay_for_n_mem_clocks(4);
938 			set_jump_as_return();
939 			writel(RW_MGR_MRS1, grpaddr);
940 			set_jump_as_return();
941 			writel(fin2, grpaddr);
942 		}
943 
944 		if (precharge)
945 			continue;
946 
947 		set_jump_as_return();
948 		writel(RW_MGR_ZQCL, grpaddr);
949 
950 		/* tZQinit = tDLLK = 512 ck cycles */
951 		delay_for_n_mem_clocks(512);
952 	}
953 }
954 
955 /**
956  * rw_mgr_mem_initialize() - Initialize RW Manager
957  *
958  * Initialize RW Manager.
959  */
960 static void rw_mgr_mem_initialize(void)
961 {
962 	debug("%s:%d\n", __func__, __LINE__);
963 
964 	/* The reset / cke part of initialization is broadcasted to all ranks */
965 	writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
966 				RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
967 
968 	/*
969 	 * Here's how you load register for a loop
970 	 * Counters are located @ 0x800
971 	 * Jump address are located @ 0xC00
972 	 * For both, registers 0 to 3 are selected using bits 3 and 2, like
973 	 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
974 	 * I know this ain't pretty, but Avalon bus throws away the 2 least
975 	 * significant bits
976 	 */
977 
978 	/* Start with memory RESET activated */
979 
980 	/* tINIT = 200us */
981 
982 	/*
983 	 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
984 	 * If a and b are the number of iteration in 2 nested loops
985 	 * it takes the following number of cycles to complete the operation:
986 	 * number_of_cycles = ((2 + n) * a + 2) * b
987 	 * where n is the number of instruction in the inner loop
988 	 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
989 	 * b = 6A
990 	 */
991 	rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL,
992 				  SEQ_TINIT_CNTR2_VAL,
993 				  RW_MGR_INIT_RESET_0_CKE_0);
994 
995 	/* Indicate that memory is stable. */
996 	writel(1, &phy_mgr_cfg->reset_mem_stbl);
997 
998 	/*
999 	 * transition the RESET to high
1000 	 * Wait for 500us
1001 	 */
1002 
1003 	/*
1004 	 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
1005 	 * If a and b are the number of iteration in 2 nested loops
1006 	 * it takes the following number of cycles to complete the operation
1007 	 * number_of_cycles = ((2 + n) * a + 2) * b
1008 	 * where n is the number of instruction in the inner loop
1009 	 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
1010 	 * b = FF
1011 	 */
1012 	rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
1013 				  SEQ_TRESET_CNTR2_VAL,
1014 				  RW_MGR_INIT_RESET_1_CKE_0);
1015 
1016 	/* Bring up clock enable. */
1017 
1018 	/* tXRP < 250 ck cycles */
1019 	delay_for_n_mem_clocks(250);
1020 
1021 	rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
1022 			     0);
1023 }
1024 
1025 /*
1026  * At the end of calibration we have to program the user settings in, and
1027  * USER  hand off the memory to the user.
1028  */
1029 static void rw_mgr_mem_handoff(void)
1030 {
1031 	rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
1032 	/*
1033 	 * USER  need to wait tMOD (12CK or 15ns) time before issuing
1034 	 * other commands, but we will have plenty of NIOS cycles before
1035 	 * actual handoff so its okay.
1036 	 */
1037 }
1038 
1039 /**
1040  * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1041  * @rank_bgn:	Rank number
1042  * @group:	Read/Write Group
1043  * @all_ranks:	Test all ranks
1044  *
1045  * Performs a guaranteed read on the patterns we are going to use during a
1046  * read test to ensure memory works.
1047  */
1048 static int
1049 rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
1050 					const u32 all_ranks)
1051 {
1052 	const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1053 			 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1054 	const u32 addr_offset =
1055 			 (group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2;
1056 	const u32 rank_end = all_ranks ?
1057 				RW_MGR_MEM_NUMBER_OF_RANKS :
1058 				(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1059 	const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
1060 				RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
1061 	const u32 correct_mask_vg = param->read_correct_mask_vg;
1062 
1063 	u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1064 	int vg, r;
1065 	int ret = 0;
1066 
1067 	bit_chk = param->read_correct_mask;
1068 
1069 	for (r = rank_bgn; r < rank_end; r++) {
1070 		/* Request to skip the rank */
1071 		if (param->skip_ranks[r])
1072 			continue;
1073 
1074 		/* Set rank */
1075 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1076 
1077 		/* Load up a constant bursts of read commands */
1078 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1079 		writel(RW_MGR_GUARANTEED_READ,
1080 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
1081 
1082 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1083 		writel(RW_MGR_GUARANTEED_READ_CONT,
1084 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1085 
1086 		tmp_bit_chk = 0;
1087 		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;
1088 		     vg >= 0; vg--) {
1089 			/* Reset the FIFOs to get pointers to known state. */
1090 			writel(0, &phy_mgr_cmd->fifo_reset);
1091 			writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1092 				  RW_MGR_RESET_READ_DATAPATH_OFFSET);
1093 			writel(RW_MGR_GUARANTEED_READ,
1094 			       addr + addr_offset + (vg << 2));
1095 
1096 			base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1097 			tmp_bit_chk <<= shift_ratio;
1098 			tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
1099 		}
1100 
1101 		bit_chk &= tmp_bit_chk;
1102 	}
1103 
1104 	writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1105 
1106 	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1107 
1108 	if (bit_chk != param->read_correct_mask)
1109 		ret = -EIO;
1110 
1111 	debug_cond(DLEVEL == 1,
1112 		   "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1113 		   __func__, __LINE__, group, bit_chk,
1114 		   param->read_correct_mask, ret);
1115 
1116 	return ret;
1117 }
1118 
1119 /**
1120  * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
1121  * @rank_bgn:	Rank number
1122  * @all_ranks:	Test all ranks
1123  *
1124  * Load up the patterns we are going to use during a read test.
1125  */
1126 static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
1127 						    const int all_ranks)
1128 {
1129 	const u32 rank_end = all_ranks ?
1130 			RW_MGR_MEM_NUMBER_OF_RANKS :
1131 			(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1132 	u32 r;
1133 
1134 	debug("%s:%d\n", __func__, __LINE__);
1135 
1136 	for (r = rank_bgn; r < rank_end; r++) {
1137 		if (param->skip_ranks[r])
1138 			/* request to skip the rank */
1139 			continue;
1140 
1141 		/* set rank */
1142 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1143 
1144 		/* Load up a constant bursts */
1145 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1146 
1147 		writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1148 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
1149 
1150 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1151 
1152 		writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1153 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1154 
1155 		writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1156 
1157 		writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1158 			&sdr_rw_load_jump_mgr_regs->load_jump_add2);
1159 
1160 		writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1161 
1162 		writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1163 			&sdr_rw_load_jump_mgr_regs->load_jump_add3);
1164 
1165 		writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1166 						RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1167 	}
1168 
1169 	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1170 }
1171 
1172 /*
1173  * try a read and see if it returns correct data back. has dummy reads
1174  * inserted into the mix used to align dqs enable. has more thorough checks
1175  * than the regular read test.
1176  */
1177 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
1178 	uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1179 	uint32_t all_groups, uint32_t all_ranks)
1180 {
1181 	uint32_t r, vg;
1182 	uint32_t correct_mask_vg;
1183 	uint32_t tmp_bit_chk;
1184 	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1185 		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1186 	uint32_t addr;
1187 	uint32_t base_rw_mgr;
1188 
1189 	*bit_chk = param->read_correct_mask;
1190 	correct_mask_vg = param->read_correct_mask_vg;
1191 
1192 	uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
1193 		CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
1194 
1195 	for (r = rank_bgn; r < rank_end; r++) {
1196 		if (param->skip_ranks[r])
1197 			/* request to skip the rank */
1198 			continue;
1199 
1200 		/* set rank */
1201 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1202 
1203 		writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1204 
1205 		writel(RW_MGR_READ_B2B_WAIT1,
1206 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1207 
1208 		writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1209 		writel(RW_MGR_READ_B2B_WAIT2,
1210 			&sdr_rw_load_jump_mgr_regs->load_jump_add2);
1211 
1212 		if (quick_read_mode)
1213 			writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1214 			/* need at least two (1+1) reads to capture failures */
1215 		else if (all_groups)
1216 			writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1217 		else
1218 			writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1219 
1220 		writel(RW_MGR_READ_B2B,
1221 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
1222 		if (all_groups)
1223 			writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1224 			       RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
1225 			       &sdr_rw_load_mgr_regs->load_cntr3);
1226 		else
1227 			writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1228 
1229 		writel(RW_MGR_READ_B2B,
1230 			&sdr_rw_load_jump_mgr_regs->load_jump_add3);
1231 
1232 		tmp_bit_chk = 0;
1233 		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1234 			/* reset the fifos to get pointers to known state */
1235 			writel(0, &phy_mgr_cmd->fifo_reset);
1236 			writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1237 				  RW_MGR_RESET_READ_DATAPATH_OFFSET);
1238 
1239 			tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1240 				/ RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1241 
1242 			if (all_groups)
1243 				addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET;
1244 			else
1245 				addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1246 
1247 			writel(RW_MGR_READ_B2B, addr +
1248 			       ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1249 			       vg) << 2));
1250 
1251 			base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1252 			tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
1253 
1254 			if (vg == 0)
1255 				break;
1256 		}
1257 		*bit_chk &= tmp_bit_chk;
1258 	}
1259 
1260 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1261 	writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1262 
1263 	if (all_correct) {
1264 		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1265 		debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
1266 			   (%u == %u) => %lu", __func__, __LINE__, group,
1267 			   all_groups, *bit_chk, param->read_correct_mask,
1268 			   (long unsigned int)(*bit_chk ==
1269 			   param->read_correct_mask));
1270 		return *bit_chk == param->read_correct_mask;
1271 	} else	{
1272 		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1273 		debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
1274 			   (%u != %lu) => %lu\n", __func__, __LINE__,
1275 			   group, all_groups, *bit_chk, (long unsigned int)0,
1276 			   (long unsigned int)(*bit_chk != 0x00));
1277 		return *bit_chk != 0x00;
1278 	}
1279 }
1280 
1281 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
1282 	uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1283 	uint32_t all_groups)
1284 {
1285 	return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
1286 					      bit_chk, all_groups, 1);
1287 }
1288 
1289 /**
1290  * rw_mgr_incr_vfifo() - Increase VFIFO value
1291  * @grp:	Read/Write group
1292  *
1293  * Increase VFIFO value.
1294  */
1295 static void rw_mgr_incr_vfifo(const u32 grp)
1296 {
1297 	writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1298 }
1299 
1300 /**
1301  * rw_mgr_decr_vfifo() - Decrease VFIFO value
1302  * @grp:	Read/Write group
1303  *
1304  * Decrease VFIFO value.
1305  */
1306 static void rw_mgr_decr_vfifo(const u32 grp)
1307 {
1308 	u32 i;
1309 
1310 	for (i = 0; i < VFIFO_SIZE - 1; i++)
1311 		rw_mgr_incr_vfifo(grp);
1312 }
1313 
1314 /**
1315  * find_vfifo_failing_read() - Push VFIFO to get a failing read
1316  * @grp:	Read/Write group
1317  *
1318  * Push VFIFO until a failing read happens.
1319  */
1320 static int find_vfifo_failing_read(const u32 grp)
1321 {
1322 	u32 v, ret, bit_chk, fail_cnt = 0;
1323 
1324 	for (v = 0; v < VFIFO_SIZE; v++) {
1325 		debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
1326 			   __func__, __LINE__, v);
1327 		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1328 						PASS_ONE_BIT, &bit_chk, 0);
1329 		if (!ret) {
1330 			fail_cnt++;
1331 
1332 			if (fail_cnt == 2)
1333 				return v;
1334 		}
1335 
1336 		/* Fiddle with FIFO. */
1337 		rw_mgr_incr_vfifo(grp);
1338 	}
1339 
1340 	/* No failing read found! Something must have gone wrong. */
1341 	debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
1342 	return 0;
1343 }
1344 
1345 /**
1346  * sdr_find_phase() - Find DQS enable phase
1347  * @working:	If 1, look for working phase, if 0, look for non-working phase
1348  * @grp:	Read/Write group
1349  * @work:	Working window position
1350  * @i:		Iterator
1351  * @p:		DQS Phase Iterator
1352  *
1353  * Find working or non-working DQS enable phase setting.
1354  */
1355 static int sdr_find_phase(int working, const u32 grp, u32 *work,
1356 			  u32 *i, u32 *p)
1357 {
1358 	u32 ret, bit_chk;
1359 	const u32 end = VFIFO_SIZE + (working ? 0 : 1);
1360 
1361 	for (; *i < end; (*i)++) {
1362 		if (working)
1363 			*p = 0;
1364 
1365 		for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++) {
1366 			scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
1367 
1368 			ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1369 						PASS_ONE_BIT, &bit_chk, 0);
1370 			if (!working)
1371 				ret = !ret;
1372 
1373 			if (ret)
1374 				return 0;
1375 
1376 			*work += IO_DELAY_PER_OPA_TAP;
1377 		}
1378 
1379 		if (*p > IO_DQS_EN_PHASE_MAX) {
1380 			/* Fiddle with FIFO. */
1381 			rw_mgr_incr_vfifo(grp);
1382 			if (!working)
1383 				*p = 0;
1384 		}
1385 	}
1386 
1387 	return -EINVAL;
1388 }
1389 
1390 /**
1391  * sdr_working_phase() - Find working DQS enable phase
1392  * @grp:	Read/Write group
1393  * @work_bgn:	Working window start position
1394  * @d:		dtaps output value
1395  * @p:		DQS Phase Iterator
1396  * @i:		Iterator
1397  *
1398  * Find working DQS enable phase setting.
1399  */
1400 static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
1401 			     u32 *p, u32 *i)
1402 {
1403 	const u32 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP /
1404 				   IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1405 	int ret;
1406 
1407 	*work_bgn = 0;
1408 
1409 	for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1410 		*i = 0;
1411 		scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
1412 		ret = sdr_find_phase(1, grp, work_bgn, i, p);
1413 		if (!ret)
1414 			return 0;
1415 		*work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1416 	}
1417 
1418 	/* Cannot find working solution */
1419 	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
1420 		   __func__, __LINE__);
1421 	return -EINVAL;
1422 }
1423 
1424 /**
1425  * sdr_backup_phase() - Find DQS enable backup phase
1426  * @grp:	Read/Write group
1427  * @work_bgn:	Working window start position
1428  * @p:		DQS Phase Iterator
1429  *
1430  * Find DQS enable backup phase setting.
1431  */
1432 static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
1433 {
1434 	u32 tmp_delay, bit_chk, d;
1435 	int ret;
1436 
1437 	/* Special case code for backing up a phase */
1438 	if (*p == 0) {
1439 		*p = IO_DQS_EN_PHASE_MAX;
1440 		rw_mgr_decr_vfifo(grp);
1441 	} else {
1442 		(*p)--;
1443 	}
1444 	tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1445 	scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
1446 
1447 	for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; d++) {
1448 		scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1449 
1450 		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1451 					PASS_ONE_BIT, &bit_chk, 0);
1452 		if (ret) {
1453 			*work_bgn = tmp_delay;
1454 			break;
1455 		}
1456 
1457 		tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1458 	}
1459 
1460 	/* Restore VFIFO to old state before we decremented it (if needed). */
1461 	(*p)++;
1462 	if (*p > IO_DQS_EN_PHASE_MAX) {
1463 		*p = 0;
1464 		rw_mgr_incr_vfifo(grp);
1465 	}
1466 
1467 	scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1468 }
1469 
1470 /**
1471  * sdr_nonworking_phase() - Find non-working DQS enable phase
1472  * @grp:	Read/Write group
1473  * @work_end:	Working window end position
1474  * @p:		DQS Phase Iterator
1475  * @i:		Iterator
1476  *
1477  * Find non-working DQS enable phase setting.
1478  */
1479 static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
1480 {
1481 	int ret;
1482 
1483 	(*p)++;
1484 	*work_end += IO_DELAY_PER_OPA_TAP;
1485 	if (*p > IO_DQS_EN_PHASE_MAX) {
1486 		/* Fiddle with FIFO. */
1487 		*p = 0;
1488 		rw_mgr_incr_vfifo(grp);
1489 	}
1490 
1491 	ret = sdr_find_phase(0, grp, work_end, i, p);
1492 	if (ret) {
1493 		/* Cannot see edge of failing read. */
1494 		debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
1495 			   __func__, __LINE__);
1496 	}
1497 
1498 	return ret;
1499 }
1500 
1501 /**
1502  * sdr_find_window_center() - Find center of the working DQS window.
1503  * @grp:	Read/Write group
1504  * @work_bgn:	First working settings
1505  * @work_end:	Last working settings
1506  *
1507  * Find center of the working DQS enable window.
1508  */
1509 static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
1510 				  const u32 work_end)
1511 {
1512 	u32 bit_chk, work_mid;
1513 	int tmp_delay = 0;
1514 	int i, p, d;
1515 
1516 	work_mid = (work_bgn + work_end) / 2;
1517 
1518 	debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1519 		   work_bgn, work_end, work_mid);
1520 	/* Get the middle delay to be less than a VFIFO delay */
1521 	tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP;
1522 
1523 	debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1524 	work_mid %= tmp_delay;
1525 	debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
1526 
1527 	tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP);
1528 	if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP)
1529 		tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP;
1530 	p = tmp_delay / IO_DELAY_PER_OPA_TAP;
1531 
1532 	debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
1533 
1534 	d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP);
1535 	if (d > IO_DQS_EN_DELAY_MAX)
1536 		d = IO_DQS_EN_DELAY_MAX;
1537 	tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1538 
1539 	debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
1540 
1541 	scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1542 	scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1543 
1544 	/*
1545 	 * push vfifo until we can successfully calibrate. We can do this
1546 	 * because the largest possible margin in 1 VFIFO cycle.
1547 	 */
1548 	for (i = 0; i < VFIFO_SIZE; i++) {
1549 		debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
1550 		if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1551 							     PASS_ONE_BIT,
1552 							     &bit_chk, 0)) {
1553 			debug_cond(DLEVEL == 2,
1554 				   "%s:%d center: found: ptap=%u dtap=%u\n",
1555 				   __func__, __LINE__, p, d);
1556 			return 0;
1557 		}
1558 
1559 		/* Fiddle with FIFO. */
1560 		rw_mgr_incr_vfifo(grp);
1561 	}
1562 
1563 	debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
1564 		   __func__, __LINE__);
1565 	return -EINVAL;
1566 }
1567 
1568 /* find a good dqs enable to use */
1569 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(u32 grp)
1570 {
1571 	uint32_t d, p, i;
1572 	uint32_t bit_chk;
1573 	uint32_t dtaps_per_ptap;
1574 	uint32_t work_bgn, work_end;
1575 	uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
1576 
1577 	debug("%s:%d %u\n", __func__, __LINE__, grp);
1578 
1579 	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1580 
1581 	scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1582 	scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1583 
1584 	/* Step 0: Determine number of delay taps for each phase tap. */
1585 	dtaps_per_ptap = IO_DELAY_PER_OPA_TAP / IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1586 
1587 	/* Step 1: First push vfifo until we get a failing read. */
1588 	find_vfifo_failing_read(grp);
1589 
1590 	/* Step 2: Find first working phase, increment in ptaps. */
1591 	work_bgn = 0;
1592 	if (sdr_working_phase(grp, &work_bgn, &d, &p, &i))
1593 		return 0;
1594 
1595 	work_end = work_bgn;
1596 
1597 	/*
1598 	 * If d is 0 then the working window covers a phase tap and we can
1599 	 * follow the old procedure. Otherwise, we've found the beginning
1600 	 * and we need to increment the dtaps until we find the end.
1601 	 */
1602 	if (d == 0) {
1603 		/*
1604 		 * Step 3a: If we have room, back off by one and
1605 		 *          increment in dtaps.
1606 		 */
1607 		sdr_backup_phase(grp, &work_bgn, &p);
1608 
1609 		/*
1610 		 * Step 4a: go forward from working phase to non working
1611 		 * phase, increment in ptaps.
1612 		 */
1613 		if (sdr_nonworking_phase(grp, &work_end, &p, &i))
1614 			return 0;
1615 
1616 		/* Step 5a: Back off one from last, increment in dtaps. */
1617 
1618 		/* Special case code for backing up a phase */
1619 		if (p == 0) {
1620 			p = IO_DQS_EN_PHASE_MAX;
1621 			rw_mgr_decr_vfifo(grp);
1622 		} else {
1623 			p = p - 1;
1624 		}
1625 
1626 		work_end -= IO_DELAY_PER_OPA_TAP;
1627 		scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1628 
1629 		d = 0;
1630 
1631 		debug_cond(DLEVEL == 2, "%s:%d p: ptap=%u\n",
1632 			   __func__, __LINE__, p);
1633 	}
1634 
1635 	/* The dtap increment to find the failing edge is done here. */
1636 	for (; d <= IO_DQS_EN_DELAY_MAX;
1637 	     d++, work_end += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1638 		debug_cond(DLEVEL == 2, "%s:%d end-2: dtap=%u\n",
1639 			   __func__, __LINE__, d);
1640 
1641 		scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1642 
1643 		if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1644 							      PASS_ONE_BIT,
1645 							      &bit_chk, 0)) {
1646 			break;
1647 		}
1648 	}
1649 
1650 	/* Go back to working dtap */
1651 	if (d != 0)
1652 		work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1653 
1654 	debug_cond(DLEVEL == 2,
1655 		   "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
1656 		   __func__, __LINE__, p, d - 1, work_end);
1657 
1658 	if (work_end < work_bgn) {
1659 		/* nil range */
1660 		debug_cond(DLEVEL == 2, "%s:%d end-2: failed\n",
1661 			   __func__, __LINE__);
1662 		return 0;
1663 	}
1664 
1665 	debug_cond(DLEVEL == 2, "%s:%d found range [%u,%u]\n",
1666 		   __func__, __LINE__, work_bgn, work_end);
1667 
1668 	/*
1669 	 * We need to calculate the number of dtaps that equal a ptap.
1670 	 * To do that we'll back up a ptap and re-find the edge of the
1671 	 * window using dtaps
1672 	 */
1673 	debug_cond(DLEVEL == 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
1674 		   __func__, __LINE__);
1675 
1676 	/* Special case code for backing up a phase */
1677 	if (p == 0) {
1678 		p = IO_DQS_EN_PHASE_MAX;
1679 		rw_mgr_decr_vfifo(grp);
1680 		debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n",
1681 			   __func__, __LINE__, p);
1682 	} else {
1683 		p = p - 1;
1684 		debug_cond(DLEVEL == 2, "%s:%d backedup phase only: p=%u",
1685 			   __func__, __LINE__, p);
1686 	}
1687 
1688 	scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1689 
1690 	/*
1691 	 * Increase dtap until we first see a passing read (in case the
1692 	 * window is smaller than a ptap), and then a failing read to
1693 	 * mark the edge of the window again.
1694 	 */
1695 
1696 	/* Find a passing read. */
1697 	debug_cond(DLEVEL == 2, "%s:%d find passing read\n",
1698 		   __func__, __LINE__);
1699 	found_passing_read = 0;
1700 	found_failing_read = 0;
1701 	initial_failing_dtap = d;
1702 	for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
1703 		debug_cond(DLEVEL == 2, "%s:%d testing read d=%u\n",
1704 			   __func__, __LINE__, d);
1705 		scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1706 
1707 		if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1708 							     PASS_ONE_BIT,
1709 							     &bit_chk, 0)) {
1710 			found_passing_read = 1;
1711 			break;
1712 		}
1713 	}
1714 
1715 	if (found_passing_read) {
1716 		/* Find a failing read. */
1717 		debug_cond(DLEVEL == 2, "%s:%d find failing read\n",
1718 			   __func__, __LINE__);
1719 		for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
1720 			debug_cond(DLEVEL == 2, "%s:%d testing read d=%u\n",
1721 				   __func__, __LINE__, d);
1722 			scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1723 
1724 			if (!rw_mgr_mem_calibrate_read_test_all_ranks
1725 				(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
1726 				found_failing_read = 1;
1727 				break;
1728 			}
1729 		}
1730 	} else {
1731 		debug_cond(DLEVEL == 1,
1732 			   "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
1733 			   __func__, __LINE__);
1734 	}
1735 
1736 	/*
1737 	 * The dynamically calculated dtaps_per_ptap is only valid if we
1738 	 * found a passing/failing read. If we didn't, it means d hit the max
1739 	 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1740 	 * statically calculated value.
1741 	 */
1742 	if (found_passing_read && found_failing_read)
1743 		dtaps_per_ptap = d - initial_failing_dtap;
1744 
1745 	writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1746 	debug_cond(DLEVEL == 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
1747 		   __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
1748 
1749 	/* Step 6: Find the centre of the window. */
1750 	if (sdr_find_window_centre(grp, work_bgn, work_end))
1751 		return 0;
1752 
1753 	return 1;
1754 }
1755 
1756 /* per-bit deskew DQ and center */
1757 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
1758 	uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
1759 	uint32_t use_read_test, uint32_t update_fom)
1760 {
1761 	uint32_t i, p, d, min_index;
1762 	/*
1763 	 * Store these as signed since there are comparisons with
1764 	 * signed numbers.
1765 	 */
1766 	uint32_t bit_chk;
1767 	uint32_t sticky_bit_chk;
1768 	int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1769 	int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1770 	int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
1771 	int32_t mid;
1772 	int32_t orig_mid_min, mid_min;
1773 	int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
1774 		final_dqs_en;
1775 	int32_t dq_margin, dqs_margin;
1776 	uint32_t stop;
1777 	uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
1778 	uint32_t addr;
1779 
1780 	debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
1781 
1782 	addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
1783 	start_dqs = readl(addr + (read_group << 2));
1784 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
1785 		start_dqs_en = readl(addr + ((read_group << 2)
1786 				     - IO_DQS_EN_DELAY_OFFSET));
1787 
1788 	/* set the left and right edge of each bit to an illegal value */
1789 	/* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
1790 	sticky_bit_chk = 0;
1791 	for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1792 		left_edge[i]  = IO_IO_IN_DELAY_MAX + 1;
1793 		right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1794 	}
1795 
1796 	/* Search for the left edge of the window for each bit */
1797 	for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
1798 		scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
1799 
1800 		writel(0, &sdr_scc_mgr->update);
1801 
1802 		/*
1803 		 * Stop searching when the read test doesn't pass AND when
1804 		 * we've seen a passing read on every bit.
1805 		 */
1806 		if (use_read_test) {
1807 			stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1808 				read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1809 				&bit_chk, 0, 0);
1810 		} else {
1811 			rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1812 							0, PASS_ONE_BIT,
1813 							&bit_chk, 0);
1814 			bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1815 				(read_group - (write_group *
1816 					RW_MGR_MEM_IF_READ_DQS_WIDTH /
1817 					RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1818 			stop = (bit_chk == 0);
1819 		}
1820 		sticky_bit_chk = sticky_bit_chk | bit_chk;
1821 		stop = stop && (sticky_bit_chk == param->read_correct_mask);
1822 		debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
1823 			   && %u", __func__, __LINE__, d,
1824 			   sticky_bit_chk,
1825 			param->read_correct_mask, stop);
1826 
1827 		if (stop == 1) {
1828 			break;
1829 		} else {
1830 			for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1831 				if (bit_chk & 1) {
1832 					/* Remember a passing test as the
1833 					left_edge */
1834 					left_edge[i] = d;
1835 				} else {
1836 					/* If a left edge has not been seen yet,
1837 					then a future passing test will mark
1838 					this edge as the right edge */
1839 					if (left_edge[i] ==
1840 						IO_IO_IN_DELAY_MAX + 1) {
1841 						right_edge[i] = -(d + 1);
1842 					}
1843 				}
1844 				bit_chk = bit_chk >> 1;
1845 			}
1846 		}
1847 	}
1848 
1849 	/* Reset DQ delay chains to 0 */
1850 	scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
1851 	sticky_bit_chk = 0;
1852 	for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
1853 		debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
1854 			   %d right_edge[%u]: %d\n", __func__, __LINE__,
1855 			   i, left_edge[i], i, right_edge[i]);
1856 
1857 		/*
1858 		 * Check for cases where we haven't found the left edge,
1859 		 * which makes our assignment of the the right edge invalid.
1860 		 * Reset it to the illegal value.
1861 		 */
1862 		if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
1863 			right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1864 			right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1865 			debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
1866 				   right_edge[%u]: %d\n", __func__, __LINE__,
1867 				   i, right_edge[i]);
1868 		}
1869 
1870 		/*
1871 		 * Reset sticky bit (except for bits where we have seen
1872 		 * both the left and right edge).
1873 		 */
1874 		sticky_bit_chk = sticky_bit_chk << 1;
1875 		if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
1876 		    (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1877 			sticky_bit_chk = sticky_bit_chk | 1;
1878 		}
1879 
1880 		if (i == 0)
1881 			break;
1882 	}
1883 
1884 	/* Search for the right edge of the window for each bit */
1885 	for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
1886 		scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
1887 		if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
1888 			uint32_t delay = d + start_dqs_en;
1889 			if (delay > IO_DQS_EN_DELAY_MAX)
1890 				delay = IO_DQS_EN_DELAY_MAX;
1891 			scc_mgr_set_dqs_en_delay(read_group, delay);
1892 		}
1893 		scc_mgr_load_dqs(read_group);
1894 
1895 		writel(0, &sdr_scc_mgr->update);
1896 
1897 		/*
1898 		 * Stop searching when the read test doesn't pass AND when
1899 		 * we've seen a passing read on every bit.
1900 		 */
1901 		if (use_read_test) {
1902 			stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1903 				read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1904 				&bit_chk, 0, 0);
1905 		} else {
1906 			rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1907 							0, PASS_ONE_BIT,
1908 							&bit_chk, 0);
1909 			bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1910 				(read_group - (write_group *
1911 					RW_MGR_MEM_IF_READ_DQS_WIDTH /
1912 					RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1913 			stop = (bit_chk == 0);
1914 		}
1915 		sticky_bit_chk = sticky_bit_chk | bit_chk;
1916 		stop = stop && (sticky_bit_chk == param->read_correct_mask);
1917 
1918 		debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
1919 			   %u && %u", __func__, __LINE__, d,
1920 			   sticky_bit_chk, param->read_correct_mask, stop);
1921 
1922 		if (stop == 1) {
1923 			break;
1924 		} else {
1925 			for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1926 				if (bit_chk & 1) {
1927 					/* Remember a passing test as
1928 					the right_edge */
1929 					right_edge[i] = d;
1930 				} else {
1931 					if (d != 0) {
1932 						/* If a right edge has not been
1933 						seen yet, then a future passing
1934 						test will mark this edge as the
1935 						left edge */
1936 						if (right_edge[i] ==
1937 						IO_IO_IN_DELAY_MAX + 1) {
1938 							left_edge[i] = -(d + 1);
1939 						}
1940 					} else {
1941 						/* d = 0 failed, but it passed
1942 						when testing the left edge,
1943 						so it must be marginal,
1944 						set it to -1 */
1945 						if (right_edge[i] ==
1946 							IO_IO_IN_DELAY_MAX + 1 &&
1947 							left_edge[i] !=
1948 							IO_IO_IN_DELAY_MAX
1949 							+ 1) {
1950 							right_edge[i] = -1;
1951 						}
1952 						/* If a right edge has not been
1953 						seen yet, then a future passing
1954 						test will mark this edge as the
1955 						left edge */
1956 						else if (right_edge[i] ==
1957 							IO_IO_IN_DELAY_MAX +
1958 							1) {
1959 							left_edge[i] = -(d + 1);
1960 						}
1961 					}
1962 				}
1963 
1964 				debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
1965 					   d=%u]: ", __func__, __LINE__, d);
1966 				debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
1967 					   (int)(bit_chk & 1), i, left_edge[i]);
1968 				debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
1969 					   right_edge[i]);
1970 				bit_chk = bit_chk >> 1;
1971 			}
1972 		}
1973 	}
1974 
1975 	/* Check that all bits have a window */
1976 	for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1977 		debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
1978 			   %d right_edge[%u]: %d", __func__, __LINE__,
1979 			   i, left_edge[i], i, right_edge[i]);
1980 		if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
1981 			== IO_IO_IN_DELAY_MAX + 1)) {
1982 			/*
1983 			 * Restore delay chain settings before letting the loop
1984 			 * in rw_mgr_mem_calibrate_vfifo to retry different
1985 			 * dqs/ck relationships.
1986 			 */
1987 			scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
1988 			if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
1989 				scc_mgr_set_dqs_en_delay(read_group,
1990 							 start_dqs_en);
1991 			}
1992 			scc_mgr_load_dqs(read_group);
1993 			writel(0, &sdr_scc_mgr->update);
1994 
1995 			debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
1996 				   find edge [%u]: %d %d", __func__, __LINE__,
1997 				   i, left_edge[i], right_edge[i]);
1998 			if (use_read_test) {
1999 				set_failing_group_stage(read_group *
2000 					RW_MGR_MEM_DQ_PER_READ_DQS + i,
2001 					CAL_STAGE_VFIFO,
2002 					CAL_SUBSTAGE_VFIFO_CENTER);
2003 			} else {
2004 				set_failing_group_stage(read_group *
2005 					RW_MGR_MEM_DQ_PER_READ_DQS + i,
2006 					CAL_STAGE_VFIFO_AFTER_WRITES,
2007 					CAL_SUBSTAGE_VFIFO_CENTER);
2008 			}
2009 			return 0;
2010 		}
2011 	}
2012 
2013 	/* Find middle of window for each DQ bit */
2014 	mid_min = left_edge[0] - right_edge[0];
2015 	min_index = 0;
2016 	for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2017 		mid = left_edge[i] - right_edge[i];
2018 		if (mid < mid_min) {
2019 			mid_min = mid;
2020 			min_index = i;
2021 		}
2022 	}
2023 
2024 	/*
2025 	 * -mid_min/2 represents the amount that we need to move DQS.
2026 	 * If mid_min is odd and positive we'll need to add one to
2027 	 * make sure the rounding in further calculations is correct
2028 	 * (always bias to the right), so just add 1 for all positive values.
2029 	 */
2030 	if (mid_min > 0)
2031 		mid_min++;
2032 
2033 	mid_min = mid_min / 2;
2034 
2035 	debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
2036 		   __func__, __LINE__, mid_min, min_index);
2037 
2038 	/* Determine the amount we can change DQS (which is -mid_min) */
2039 	orig_mid_min = mid_min;
2040 	new_dqs = start_dqs - mid_min;
2041 	if (new_dqs > IO_DQS_IN_DELAY_MAX)
2042 		new_dqs = IO_DQS_IN_DELAY_MAX;
2043 	else if (new_dqs < 0)
2044 		new_dqs = 0;
2045 
2046 	mid_min = start_dqs - new_dqs;
2047 	debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2048 		   mid_min, new_dqs);
2049 
2050 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2051 		if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2052 			mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2053 		else if (start_dqs_en - mid_min < 0)
2054 			mid_min += start_dqs_en - mid_min;
2055 	}
2056 	new_dqs = start_dqs - mid_min;
2057 
2058 	debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
2059 		   new_dqs=%d mid_min=%d\n", start_dqs,
2060 		   IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2061 		   new_dqs, mid_min);
2062 
2063 	/* Initialize data for export structures */
2064 	dqs_margin = IO_IO_IN_DELAY_MAX + 1;
2065 	dq_margin  = IO_IO_IN_DELAY_MAX + 1;
2066 
2067 	/* add delay to bring centre of all DQ windows to the same "level" */
2068 	for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
2069 		/* Use values before divide by 2 to reduce round off error */
2070 		shift_dq = (left_edge[i] - right_edge[i] -
2071 			(left_edge[min_index] - right_edge[min_index]))/2  +
2072 			(orig_mid_min - mid_min);
2073 
2074 		debug_cond(DLEVEL == 2, "vfifo_center: before: \
2075 			   shift_dq[%u]=%d\n", i, shift_dq);
2076 
2077 		addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
2078 		temp_dq_in_delay1 = readl(addr + (p << 2));
2079 		temp_dq_in_delay2 = readl(addr + (i << 2));
2080 
2081 		if (shift_dq + (int32_t)temp_dq_in_delay1 >
2082 			(int32_t)IO_IO_IN_DELAY_MAX) {
2083 			shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
2084 		} else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
2085 			shift_dq = -(int32_t)temp_dq_in_delay1;
2086 		}
2087 		debug_cond(DLEVEL == 2, "vfifo_center: after: \
2088 			   shift_dq[%u]=%d\n", i, shift_dq);
2089 		final_dq[i] = temp_dq_in_delay1 + shift_dq;
2090 		scc_mgr_set_dq_in_delay(p, final_dq[i]);
2091 		scc_mgr_load_dq(p);
2092 
2093 		debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
2094 			   left_edge[i] - shift_dq + (-mid_min),
2095 			   right_edge[i] + shift_dq - (-mid_min));
2096 		/* To determine values for export structures */
2097 		if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2098 			dq_margin = left_edge[i] - shift_dq + (-mid_min);
2099 
2100 		if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2101 			dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2102 	}
2103 
2104 	final_dqs = new_dqs;
2105 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2106 		final_dqs_en = start_dqs_en - mid_min;
2107 
2108 	/* Move DQS-en */
2109 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2110 		scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
2111 		scc_mgr_load_dqs(read_group);
2112 	}
2113 
2114 	/* Move DQS */
2115 	scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
2116 	scc_mgr_load_dqs(read_group);
2117 	debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
2118 		   dqs_margin=%d", __func__, __LINE__,
2119 		   dq_margin, dqs_margin);
2120 
2121 	/*
2122 	 * Do not remove this line as it makes sure all of our decisions
2123 	 * have been applied. Apply the update bit.
2124 	 */
2125 	writel(0, &sdr_scc_mgr->update);
2126 
2127 	return (dq_margin >= 0) && (dqs_margin >= 0);
2128 }
2129 
2130 /**
2131  * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
2132  * @rw_group:	Read/Write Group
2133  * @phase:	DQ/DQS phase
2134  *
2135  * Because initially no communication ca be reliably performed with the memory
2136  * device, the sequencer uses a guaranteed write mechanism to write data into
2137  * the memory device.
2138  */
2139 static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group,
2140 						 const u32 phase)
2141 {
2142 	int ret;
2143 
2144 	/* Set a particular DQ/DQS phase. */
2145 	scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase);
2146 
2147 	debug_cond(DLEVEL == 1, "%s:%d guaranteed write: g=%u p=%u\n",
2148 		   __func__, __LINE__, rw_group, phase);
2149 
2150 	/*
2151 	 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2152 	 * Load up the patterns used by read calibration using the
2153 	 * current DQDQS phase.
2154 	 */
2155 	rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2156 
2157 	if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
2158 		return 0;
2159 
2160 	/*
2161 	 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2162 	 * Back-to-Back reads of the patterns used for calibration.
2163 	 */
2164 	ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1);
2165 	if (ret)
2166 		debug_cond(DLEVEL == 1,
2167 			   "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2168 			   __func__, __LINE__, rw_group, phase);
2169 	return ret;
2170 }
2171 
2172 /**
2173  * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2174  * @rw_group:	Read/Write Group
2175  * @test_bgn:	Rank at which the test begins
2176  *
2177  * DQS enable calibration ensures reliable capture of the DQ signal without
2178  * glitches on the DQS line.
2179  */
2180 static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
2181 						       const u32 test_bgn)
2182 {
2183 	/*
2184 	 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2185 	 * DQS and DQS Eanble Signal Relationships.
2186 	 */
2187 
2188 	/* We start at zero, so have one less dq to devide among */
2189 	const u32 delay_step = IO_IO_IN_DELAY_MAX /
2190 			       (RW_MGR_MEM_DQ_PER_READ_DQS - 1);
2191 	int found;
2192 	u32 i, p, d, r;
2193 
2194 	debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2195 
2196 	/* Try different dq_in_delays since the DQ path is shorter than DQS. */
2197 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
2198 	     r += NUM_RANKS_PER_SHADOW_REG) {
2199 		for (i = 0, p = test_bgn, d = 0;
2200 		     i < RW_MGR_MEM_DQ_PER_READ_DQS;
2201 		     i++, p++, d += delay_step) {
2202 			debug_cond(DLEVEL == 1,
2203 				   "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2204 				   __func__, __LINE__, rw_group, r, i, p, d);
2205 
2206 			scc_mgr_set_dq_in_delay(p, d);
2207 			scc_mgr_load_dq(p);
2208 		}
2209 
2210 		writel(0, &sdr_scc_mgr->update);
2211 	}
2212 
2213 	/*
2214 	 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2215 	 * dq_in_delay values
2216 	 */
2217 	found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group);
2218 
2219 	debug_cond(DLEVEL == 1,
2220 		   "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
2221 		   __func__, __LINE__, rw_group, found);
2222 
2223 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
2224 	     r += NUM_RANKS_PER_SHADOW_REG) {
2225 		scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2226 		writel(0, &sdr_scc_mgr->update);
2227 	}
2228 
2229 	if (!found)
2230 		return -EINVAL;
2231 
2232 	return 0;
2233 
2234 }
2235 
2236 /**
2237  * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2238  * @rw_group:		Read/Write Group
2239  * @test_bgn:		Rank at which the test begins
2240  * @use_read_test:	Perform a read test
2241  * @update_fom:		Update FOM
2242  *
2243  * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2244  * within a group.
2245  */
2246 static int
2247 rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
2248 				      const int use_read_test,
2249 				      const int update_fom)
2250 
2251 {
2252 	int ret, grp_calibrated;
2253 	u32 rank_bgn, sr;
2254 
2255 	/*
2256 	 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2257 	 * Read per-bit deskew can be done on a per shadow register basis.
2258 	 */
2259 	grp_calibrated = 1;
2260 	for (rank_bgn = 0, sr = 0;
2261 	     rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2262 	     rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
2263 		/* Check if this set of ranks should be skipped entirely. */
2264 		if (param->skip_shadow_regs[sr])
2265 			continue;
2266 
2267 		ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
2268 							rw_group, test_bgn,
2269 							use_read_test,
2270 							update_fom);
2271 		if (ret)
2272 			continue;
2273 
2274 		grp_calibrated = 0;
2275 	}
2276 
2277 	if (!grp_calibrated)
2278 		return -EIO;
2279 
2280 	return 0;
2281 }
2282 
2283 /**
2284  * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2285  * @rw_group:		Read/Write Group
2286  * @test_bgn:		Rank at which the test begins
2287  *
2288  * Stage 1: Calibrate the read valid prediction FIFO.
2289  *
2290  * This function implements UniPHY calibration Stage 1, as explained in
2291  * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2292  *
2293  * - read valid prediction will consist of finding:
2294  *   - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2295  *   - DQS input phase  and DQS input delay (DQ/DQS Centering)
2296  *  - we also do a per-bit deskew on the DQ lines.
2297  */
2298 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
2299 {
2300 	uint32_t p, d;
2301 	uint32_t dtaps_per_ptap;
2302 	uint32_t failed_substage;
2303 
2304 	int ret;
2305 
2306 	debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
2307 
2308 	/* Update info for sims */
2309 	reg_file_set_group(rw_group);
2310 	reg_file_set_stage(CAL_STAGE_VFIFO);
2311 	reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2312 
2313 	failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2314 
2315 	/* USER Determine number of delay taps for each phase tap. */
2316 	dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP,
2317 				      IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1;
2318 
2319 	for (d = 0; d <= dtaps_per_ptap; d += 2) {
2320 		/*
2321 		 * In RLDRAMX we may be messing the delay of pins in
2322 		 * the same write rw_group but outside of the current read
2323 		 * the rw_group, but that's ok because we haven't calibrated
2324 		 * output side yet.
2325 		 */
2326 		if (d > 0) {
2327 			scc_mgr_apply_group_all_out_delay_add_all_ranks(
2328 								rw_group, d);
2329 		}
2330 
2331 		for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++) {
2332 			/* 1) Guaranteed Write */
2333 			ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
2334 			if (ret)
2335 				break;
2336 
2337 			/* 2) DQS Enable Calibration */
2338 			ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group,
2339 									  test_bgn);
2340 			if (ret) {
2341 				failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2342 				continue;
2343 			}
2344 
2345 			/* 3) Centering DQ/DQS */
2346 			/*
2347 			 * If doing read after write calibration, do not update
2348 			 * FOM now. Do it then.
2349 			 */
2350 			ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group,
2351 								test_bgn, 1, 0);
2352 			if (ret) {
2353 				failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
2354 				continue;
2355 			}
2356 
2357 			/* All done. */
2358 			goto cal_done_ok;
2359 		}
2360 	}
2361 
2362 	/* Calibration Stage 1 failed. */
2363 	set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
2364 	return 0;
2365 
2366 	/* Calibration Stage 1 completed OK. */
2367 cal_done_ok:
2368 	/*
2369 	 * Reset the delay chains back to zero if they have moved > 1
2370 	 * (check for > 1 because loop will increase d even when pass in
2371 	 * first case).
2372 	 */
2373 	if (d > 2)
2374 		scc_mgr_zero_group(rw_group, 1);
2375 
2376 	return 1;
2377 }
2378 
2379 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */
2380 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
2381 					       uint32_t test_bgn)
2382 {
2383 	uint32_t rank_bgn, sr;
2384 	uint32_t grp_calibrated;
2385 	uint32_t write_group;
2386 
2387 	debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
2388 
2389 	/* update info for sims */
2390 
2391 	reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2392 	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2393 
2394 	write_group = read_group;
2395 
2396 	/* update info for sims */
2397 	reg_file_set_group(read_group);
2398 
2399 	grp_calibrated = 1;
2400 	/* Read per-bit deskew can be done on a per shadow register basis */
2401 	for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2402 		rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
2403 		/* Determine if this set of ranks should be skipped entirely */
2404 		if (!param->skip_shadow_regs[sr]) {
2405 		/* This is the last calibration round, update FOM here */
2406 			if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
2407 								write_group,
2408 								read_group,
2409 								test_bgn, 0,
2410 								1)) {
2411 				grp_calibrated = 0;
2412 			}
2413 		}
2414 	}
2415 
2416 
2417 	if (grp_calibrated == 0) {
2418 		set_failing_group_stage(write_group,
2419 					CAL_STAGE_VFIFO_AFTER_WRITES,
2420 					CAL_SUBSTAGE_VFIFO_CENTER);
2421 		return 0;
2422 	}
2423 
2424 	return 1;
2425 }
2426 
2427 /* Calibrate LFIFO to find smallest read latency */
2428 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2429 {
2430 	uint32_t found_one;
2431 	uint32_t bit_chk;
2432 
2433 	debug("%s:%d\n", __func__, __LINE__);
2434 
2435 	/* update info for sims */
2436 	reg_file_set_stage(CAL_STAGE_LFIFO);
2437 	reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2438 
2439 	/* Load up the patterns used by read calibration for all ranks */
2440 	rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2441 	found_one = 0;
2442 
2443 	do {
2444 		writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2445 		debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2446 			   __func__, __LINE__, gbl->curr_read_lat);
2447 
2448 		if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
2449 							      NUM_READ_TESTS,
2450 							      PASS_ALL_BITS,
2451 							      &bit_chk, 1)) {
2452 			break;
2453 		}
2454 
2455 		found_one = 1;
2456 		/* reduce read latency and see if things are working */
2457 		/* correctly */
2458 		gbl->curr_read_lat--;
2459 	} while (gbl->curr_read_lat > 0);
2460 
2461 	/* reset the fifos to get pointers to known state */
2462 
2463 	writel(0, &phy_mgr_cmd->fifo_reset);
2464 
2465 	if (found_one) {
2466 		/* add a fudge factor to the read latency that was determined */
2467 		gbl->curr_read_lat += 2;
2468 		writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2469 		debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
2470 			   read_lat=%u\n", __func__, __LINE__,
2471 			   gbl->curr_read_lat);
2472 		return 1;
2473 	} else {
2474 		set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2475 					CAL_SUBSTAGE_READ_LATENCY);
2476 
2477 		debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
2478 			   read_lat=%u\n", __func__, __LINE__,
2479 			   gbl->curr_read_lat);
2480 		return 0;
2481 	}
2482 }
2483 
2484 /*
2485  * issue write test command.
2486  * two variants are provided. one that just tests a write pattern and
2487  * another that tests datamask functionality.
2488  */
2489 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
2490 						  uint32_t test_dm)
2491 {
2492 	uint32_t mcc_instruction;
2493 	uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
2494 		ENABLE_SUPER_QUICK_CALIBRATION);
2495 	uint32_t rw_wl_nop_cycles;
2496 	uint32_t addr;
2497 
2498 	/*
2499 	 * Set counter and jump addresses for the right
2500 	 * number of NOP cycles.
2501 	 * The number of supported NOP cycles can range from -1 to infinity
2502 	 * Three different cases are handled:
2503 	 *
2504 	 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
2505 	 *    mechanism will be used to insert the right number of NOPs
2506 	 *
2507 	 * 2. For a number of NOP cycles equals to 0, the micro-instruction
2508 	 *    issuing the write command will jump straight to the
2509 	 *    micro-instruction that turns on DQS (for DDRx), or outputs write
2510 	 *    data (for RLD), skipping
2511 	 *    the NOP micro-instruction all together
2512 	 *
2513 	 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
2514 	 *    turned on in the same micro-instruction that issues the write
2515 	 *    command. Then we need
2516 	 *    to directly jump to the micro-instruction that sends out the data
2517 	 *
2518 	 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
2519 	 *       (2 and 3). One jump-counter (0) is used to perform multiple
2520 	 *       write-read operations.
2521 	 *       one counter left to issue this command in "multiple-group" mode
2522 	 */
2523 
2524 	rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
2525 
2526 	if (rw_wl_nop_cycles == -1) {
2527 		/*
2528 		 * CNTR 2 - We want to execute the special write operation that
2529 		 * turns on DQS right away and then skip directly to the
2530 		 * instruction that sends out the data. We set the counter to a
2531 		 * large number so that the jump is always taken.
2532 		 */
2533 		writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2534 
2535 		/* CNTR 3 - Not used */
2536 		if (test_dm) {
2537 			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
2538 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
2539 			       &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2540 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2541 			       &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2542 		} else {
2543 			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
2544 			writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
2545 				&sdr_rw_load_jump_mgr_regs->load_jump_add2);
2546 			writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2547 				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
2548 		}
2549 	} else if (rw_wl_nop_cycles == 0) {
2550 		/*
2551 		 * CNTR 2 - We want to skip the NOP operation and go straight
2552 		 * to the DQS enable instruction. We set the counter to a large
2553 		 * number so that the jump is always taken.
2554 		 */
2555 		writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2556 
2557 		/* CNTR 3 - Not used */
2558 		if (test_dm) {
2559 			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2560 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
2561 			       &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2562 		} else {
2563 			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2564 			writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
2565 				&sdr_rw_load_jump_mgr_regs->load_jump_add2);
2566 		}
2567 	} else {
2568 		/*
2569 		 * CNTR 2 - In this case we want to execute the next instruction
2570 		 * and NOT take the jump. So we set the counter to 0. The jump
2571 		 * address doesn't count.
2572 		 */
2573 		writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
2574 		writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2575 
2576 		/*
2577 		 * CNTR 3 - Set the nop counter to the number of cycles we
2578 		 * need to loop for, minus 1.
2579 		 */
2580 		writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
2581 		if (test_dm) {
2582 			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2583 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2584 				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
2585 		} else {
2586 			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2587 			writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2588 				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
2589 		}
2590 	}
2591 
2592 	writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
2593 		  RW_MGR_RESET_READ_DATAPATH_OFFSET);
2594 
2595 	if (quick_write_mode)
2596 		writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
2597 	else
2598 		writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
2599 
2600 	writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
2601 
2602 	/*
2603 	 * CNTR 1 - This is used to ensure enough time elapses
2604 	 * for read data to come back.
2605 	 */
2606 	writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
2607 
2608 	if (test_dm) {
2609 		writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
2610 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
2611 	} else {
2612 		writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
2613 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
2614 	}
2615 
2616 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
2617 	writel(mcc_instruction, addr + (group << 2));
2618 }
2619 
2620 /* Test writes, can check for a single bit pass or multiple bit pass */
2621 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
2622 	uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
2623 	uint32_t *bit_chk, uint32_t all_ranks)
2624 {
2625 	uint32_t r;
2626 	uint32_t correct_mask_vg;
2627 	uint32_t tmp_bit_chk;
2628 	uint32_t vg;
2629 	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
2630 		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
2631 	uint32_t addr_rw_mgr;
2632 	uint32_t base_rw_mgr;
2633 
2634 	*bit_chk = param->write_correct_mask;
2635 	correct_mask_vg = param->write_correct_mask_vg;
2636 
2637 	for (r = rank_bgn; r < rank_end; r++) {
2638 		if (param->skip_ranks[r]) {
2639 			/* request to skip the rank */
2640 			continue;
2641 		}
2642 
2643 		/* set rank */
2644 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
2645 
2646 		tmp_bit_chk = 0;
2647 		addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
2648 		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
2649 			/* reset the fifos to get pointers to known state */
2650 			writel(0, &phy_mgr_cmd->fifo_reset);
2651 
2652 			tmp_bit_chk = tmp_bit_chk <<
2653 				(RW_MGR_MEM_DQ_PER_WRITE_DQS /
2654 				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
2655 			rw_mgr_mem_calibrate_write_test_issue(write_group *
2656 				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
2657 				use_dm);
2658 
2659 			base_rw_mgr = readl(addr_rw_mgr);
2660 			tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
2661 			if (vg == 0)
2662 				break;
2663 		}
2664 		*bit_chk &= tmp_bit_chk;
2665 	}
2666 
2667 	if (all_correct) {
2668 		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2669 		debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
2670 			   %u => %lu", write_group, use_dm,
2671 			   *bit_chk, param->write_correct_mask,
2672 			   (long unsigned int)(*bit_chk ==
2673 			   param->write_correct_mask));
2674 		return *bit_chk == param->write_correct_mask;
2675 	} else {
2676 		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2677 		debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
2678 		       write_group, use_dm, *bit_chk);
2679 		debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
2680 			(long unsigned int)(*bit_chk != 0));
2681 		return *bit_chk != 0x00;
2682 	}
2683 }
2684 
2685 /*
2686  * center all windows. do per-bit-deskew to possibly increase size of
2687  * certain windows.
2688  */
2689 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
2690 	uint32_t write_group, uint32_t test_bgn)
2691 {
2692 	uint32_t i, p, min_index;
2693 	int32_t d;
2694 	/*
2695 	 * Store these as signed since there are comparisons with
2696 	 * signed numbers.
2697 	 */
2698 	uint32_t bit_chk;
2699 	uint32_t sticky_bit_chk;
2700 	int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2701 	int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2702 	int32_t mid;
2703 	int32_t mid_min, orig_mid_min;
2704 	int32_t new_dqs, start_dqs, shift_dq;
2705 	int32_t dq_margin, dqs_margin, dm_margin;
2706 	uint32_t stop;
2707 	uint32_t temp_dq_out1_delay;
2708 	uint32_t addr;
2709 
2710 	debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2711 
2712 	dm_margin = 0;
2713 
2714 	addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2715 	start_dqs = readl(addr +
2716 			  (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2717 
2718 	/* per-bit deskew */
2719 
2720 	/*
2721 	 * set the left and right edge of each bit to an illegal value
2722 	 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2723 	 */
2724 	sticky_bit_chk = 0;
2725 	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2726 		left_edge[i]  = IO_IO_OUT1_DELAY_MAX + 1;
2727 		right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2728 	}
2729 
2730 	/* Search for the left edge of the window for each bit */
2731 	for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
2732 		scc_mgr_apply_group_dq_out1_delay(write_group, d);
2733 
2734 		writel(0, &sdr_scc_mgr->update);
2735 
2736 		/*
2737 		 * Stop searching when the read test doesn't pass AND when
2738 		 * we've seen a passing read on every bit.
2739 		 */
2740 		stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2741 			0, PASS_ONE_BIT, &bit_chk, 0);
2742 		sticky_bit_chk = sticky_bit_chk | bit_chk;
2743 		stop = stop && (sticky_bit_chk == param->write_correct_mask);
2744 		debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \
2745 			   == %u && %u [bit_chk= %u ]\n",
2746 			d, sticky_bit_chk, param->write_correct_mask,
2747 			stop, bit_chk);
2748 
2749 		if (stop == 1) {
2750 			break;
2751 		} else {
2752 			for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2753 				if (bit_chk & 1) {
2754 					/*
2755 					 * Remember a passing test as the
2756 					 * left_edge.
2757 					 */
2758 					left_edge[i] = d;
2759 				} else {
2760 					/*
2761 					 * If a left edge has not been seen
2762 					 * yet, then a future passing test will
2763 					 * mark this edge as the right edge.
2764 					 */
2765 					if (left_edge[i] ==
2766 						IO_IO_OUT1_DELAY_MAX + 1) {
2767 						right_edge[i] = -(d + 1);
2768 					}
2769 				}
2770 				debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
2771 				debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2772 					   (int)(bit_chk & 1), i, left_edge[i]);
2773 				debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2774 				       right_edge[i]);
2775 				bit_chk = bit_chk >> 1;
2776 			}
2777 		}
2778 	}
2779 
2780 	/* Reset DQ delay chains to 0 */
2781 	scc_mgr_apply_group_dq_out1_delay(0);
2782 	sticky_bit_chk = 0;
2783 	for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
2784 		debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2785 			   %d right_edge[%u]: %d\n", __func__, __LINE__,
2786 			   i, left_edge[i], i, right_edge[i]);
2787 
2788 		/*
2789 		 * Check for cases where we haven't found the left edge,
2790 		 * which makes our assignment of the the right edge invalid.
2791 		 * Reset it to the illegal value.
2792 		 */
2793 		if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
2794 		    (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
2795 			right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2796 			debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
2797 				   right_edge[%u]: %d\n", __func__, __LINE__,
2798 				   i, right_edge[i]);
2799 		}
2800 
2801 		/*
2802 		 * Reset sticky bit (except for bits where we have
2803 		 * seen the left edge).
2804 		 */
2805 		sticky_bit_chk = sticky_bit_chk << 1;
2806 		if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
2807 			sticky_bit_chk = sticky_bit_chk | 1;
2808 
2809 		if (i == 0)
2810 			break;
2811 	}
2812 
2813 	/* Search for the right edge of the window for each bit */
2814 	for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
2815 		scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2816 							d + start_dqs);
2817 
2818 		writel(0, &sdr_scc_mgr->update);
2819 
2820 		/*
2821 		 * Stop searching when the read test doesn't pass AND when
2822 		 * we've seen a passing read on every bit.
2823 		 */
2824 		stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2825 			0, PASS_ONE_BIT, &bit_chk, 0);
2826 
2827 		sticky_bit_chk = sticky_bit_chk | bit_chk;
2828 		stop = stop && (sticky_bit_chk == param->write_correct_mask);
2829 
2830 		debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \
2831 			   %u && %u\n", d, sticky_bit_chk,
2832 			   param->write_correct_mask, stop);
2833 
2834 		if (stop == 1) {
2835 			if (d == 0) {
2836 				for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
2837 					i++) {
2838 					/* d = 0 failed, but it passed when
2839 					testing the left edge, so it must be
2840 					marginal, set it to -1 */
2841 					if (right_edge[i] ==
2842 						IO_IO_OUT1_DELAY_MAX + 1 &&
2843 						left_edge[i] !=
2844 						IO_IO_OUT1_DELAY_MAX + 1) {
2845 						right_edge[i] = -1;
2846 					}
2847 				}
2848 			}
2849 			break;
2850 		} else {
2851 			for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2852 				if (bit_chk & 1) {
2853 					/*
2854 					 * Remember a passing test as
2855 					 * the right_edge.
2856 					 */
2857 					right_edge[i] = d;
2858 				} else {
2859 					if (d != 0) {
2860 						/*
2861 						 * If a right edge has not
2862 						 * been seen yet, then a future
2863 						 * passing test will mark this
2864 						 * edge as the left edge.
2865 						 */
2866 						if (right_edge[i] ==
2867 						    IO_IO_OUT1_DELAY_MAX + 1)
2868 							left_edge[i] = -(d + 1);
2869 					} else {
2870 						/*
2871 						 * d = 0 failed, but it passed
2872 						 * when testing the left edge,
2873 						 * so it must be marginal, set
2874 						 * it to -1.
2875 						 */
2876 						if (right_edge[i] ==
2877 						    IO_IO_OUT1_DELAY_MAX + 1 &&
2878 						    left_edge[i] !=
2879 						    IO_IO_OUT1_DELAY_MAX + 1)
2880 							right_edge[i] = -1;
2881 						/*
2882 						 * If a right edge has not been
2883 						 * seen yet, then a future
2884 						 * passing test will mark this
2885 						 * edge as the left edge.
2886 						 */
2887 						else if (right_edge[i] ==
2888 							IO_IO_OUT1_DELAY_MAX +
2889 							1)
2890 							left_edge[i] = -(d + 1);
2891 					}
2892 				}
2893 				debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
2894 				debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2895 					   (int)(bit_chk & 1), i, left_edge[i]);
2896 				debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2897 					   right_edge[i]);
2898 				bit_chk = bit_chk >> 1;
2899 			}
2900 		}
2901 	}
2902 
2903 	/* Check that all bits have a window */
2904 	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2905 		debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2906 			   %d right_edge[%u]: %d", __func__, __LINE__,
2907 			   i, left_edge[i], i, right_edge[i]);
2908 		if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
2909 		    (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
2910 			set_failing_group_stage(test_bgn + i,
2911 						CAL_STAGE_WRITES,
2912 						CAL_SUBSTAGE_WRITES_CENTER);
2913 			return 0;
2914 		}
2915 	}
2916 
2917 	/* Find middle of window for each DQ bit */
2918 	mid_min = left_edge[0] - right_edge[0];
2919 	min_index = 0;
2920 	for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2921 		mid = left_edge[i] - right_edge[i];
2922 		if (mid < mid_min) {
2923 			mid_min = mid;
2924 			min_index = i;
2925 		}
2926 	}
2927 
2928 	/*
2929 	 * -mid_min/2 represents the amount that we need to move DQS.
2930 	 * If mid_min is odd and positive we'll need to add one to
2931 	 * make sure the rounding in further calculations is correct
2932 	 * (always bias to the right), so just add 1 for all positive values.
2933 	 */
2934 	if (mid_min > 0)
2935 		mid_min++;
2936 	mid_min = mid_min / 2;
2937 	debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
2938 		   __LINE__, mid_min);
2939 
2940 	/* Determine the amount we can change DQS (which is -mid_min) */
2941 	orig_mid_min = mid_min;
2942 	new_dqs = start_dqs;
2943 	mid_min = 0;
2944 	debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
2945 		   mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
2946 	/* Initialize data for export structures */
2947 	dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
2948 	dq_margin  = IO_IO_OUT1_DELAY_MAX + 1;
2949 
2950 	/* add delay to bring centre of all DQ windows to the same "level" */
2951 	for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
2952 		/* Use values before divide by 2 to reduce round off error */
2953 		shift_dq = (left_edge[i] - right_edge[i] -
2954 			(left_edge[min_index] - right_edge[min_index]))/2  +
2955 		(orig_mid_min - mid_min);
2956 
2957 		debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
2958 			   [%u]=%d\n", __func__, __LINE__, i, shift_dq);
2959 
2960 		addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2961 		temp_dq_out1_delay = readl(addr + (i << 2));
2962 		if (shift_dq + (int32_t)temp_dq_out1_delay >
2963 			(int32_t)IO_IO_OUT1_DELAY_MAX) {
2964 			shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
2965 		} else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
2966 			shift_dq = -(int32_t)temp_dq_out1_delay;
2967 		}
2968 		debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
2969 			   i, shift_dq);
2970 		scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
2971 		scc_mgr_load_dq(i);
2972 
2973 		debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
2974 			   left_edge[i] - shift_dq + (-mid_min),
2975 			   right_edge[i] + shift_dq - (-mid_min));
2976 		/* To determine values for export structures */
2977 		if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2978 			dq_margin = left_edge[i] - shift_dq + (-mid_min);
2979 
2980 		if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2981 			dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2982 	}
2983 
2984 	/* Move DQS */
2985 	scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
2986 	writel(0, &sdr_scc_mgr->update);
2987 
2988 	/* Centre DM */
2989 	debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
2990 
2991 	/*
2992 	 * set the left and right edge of each bit to an illegal value,
2993 	 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
2994 	 */
2995 	left_edge[0]  = IO_IO_OUT1_DELAY_MAX + 1;
2996 	right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2997 	int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2998 	int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2999 	int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
3000 	int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
3001 	int32_t win_best = 0;
3002 
3003 	/* Search for the/part of the window with DM shift */
3004 	for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
3005 		scc_mgr_apply_group_dm_out1_delay(d);
3006 		writel(0, &sdr_scc_mgr->update);
3007 
3008 		if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3009 						    PASS_ALL_BITS, &bit_chk,
3010 						    0)) {
3011 			/* USE Set current end of the window */
3012 			end_curr = -d;
3013 			/*
3014 			 * If a starting edge of our window has not been seen
3015 			 * this is our current start of the DM window.
3016 			 */
3017 			if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3018 				bgn_curr = -d;
3019 
3020 			/*
3021 			 * If current window is bigger than best seen.
3022 			 * Set best seen to be current window.
3023 			 */
3024 			if ((end_curr-bgn_curr+1) > win_best) {
3025 				win_best = end_curr-bgn_curr+1;
3026 				bgn_best = bgn_curr;
3027 				end_best = end_curr;
3028 			}
3029 		} else {
3030 			/* We just saw a failing test. Reset temp edge */
3031 			bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3032 			end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3033 			}
3034 		}
3035 
3036 
3037 	/* Reset DM delay chains to 0 */
3038 	scc_mgr_apply_group_dm_out1_delay(0);
3039 
3040 	/*
3041 	 * Check to see if the current window nudges up aganist 0 delay.
3042 	 * If so we need to continue the search by shifting DQS otherwise DQS
3043 	 * search begins as a new search. */
3044 	if (end_curr != 0) {
3045 		bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3046 		end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3047 	}
3048 
3049 	/* Search for the/part of the window with DQS shifts */
3050 	for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
3051 		/*
3052 		 * Note: This only shifts DQS, so are we limiting ourselve to
3053 		 * width of DQ unnecessarily.
3054 		 */
3055 		scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
3056 							d + new_dqs);
3057 
3058 		writel(0, &sdr_scc_mgr->update);
3059 		if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3060 						    PASS_ALL_BITS, &bit_chk,
3061 						    0)) {
3062 			/* USE Set current end of the window */
3063 			end_curr = d;
3064 			/*
3065 			 * If a beginning edge of our window has not been seen
3066 			 * this is our current begin of the DM window.
3067 			 */
3068 			if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3069 				bgn_curr = d;
3070 
3071 			/*
3072 			 * If current window is bigger than best seen. Set best
3073 			 * seen to be current window.
3074 			 */
3075 			if ((end_curr-bgn_curr+1) > win_best) {
3076 				win_best = end_curr-bgn_curr+1;
3077 				bgn_best = bgn_curr;
3078 				end_best = end_curr;
3079 			}
3080 		} else {
3081 			/* We just saw a failing test. Reset temp edge */
3082 			bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3083 			end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3084 
3085 			/* Early exit optimization: if ther remaining delay
3086 			chain space is less than already seen largest window
3087 			we can exit */
3088 			if ((win_best-1) >
3089 				(IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
3090 					break;
3091 				}
3092 			}
3093 		}
3094 
3095 	/* assign left and right edge for cal and reporting; */
3096 	left_edge[0] = -1*bgn_best;
3097 	right_edge[0] = end_best;
3098 
3099 	debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
3100 		   __LINE__, left_edge[0], right_edge[0]);
3101 
3102 	/* Move DQS (back to orig) */
3103 	scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3104 
3105 	/* Move DM */
3106 
3107 	/* Find middle of window for the DM bit */
3108 	mid = (left_edge[0] - right_edge[0]) / 2;
3109 
3110 	/* only move right, since we are not moving DQS/DQ */
3111 	if (mid < 0)
3112 		mid = 0;
3113 
3114 	/* dm_marign should fail if we never find a window */
3115 	if (win_best == 0)
3116 		dm_margin = -1;
3117 	else
3118 		dm_margin = left_edge[0] - mid;
3119 
3120 	scc_mgr_apply_group_dm_out1_delay(mid);
3121 	writel(0, &sdr_scc_mgr->update);
3122 
3123 	debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
3124 		   dm_margin=%d\n", __func__, __LINE__, left_edge[0],
3125 		   right_edge[0], mid, dm_margin);
3126 	/* Export values */
3127 	gbl->fom_out += dq_margin + dqs_margin;
3128 
3129 	debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
3130 		   dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
3131 		   dq_margin, dqs_margin, dm_margin);
3132 
3133 	/*
3134 	 * Do not remove this line as it makes sure all of our
3135 	 * decisions have been applied.
3136 	 */
3137 	writel(0, &sdr_scc_mgr->update);
3138 	return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
3139 }
3140 
3141 /* calibrate the write operations */
3142 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
3143 	uint32_t test_bgn)
3144 {
3145 	/* update info for sims */
3146 	debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
3147 
3148 	reg_file_set_stage(CAL_STAGE_WRITES);
3149 	reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3150 
3151 	reg_file_set_group(g);
3152 
3153 	if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
3154 		set_failing_group_stage(g, CAL_STAGE_WRITES,
3155 					CAL_SUBSTAGE_WRITES_CENTER);
3156 		return 0;
3157 	}
3158 
3159 	return 1;
3160 }
3161 
3162 /**
3163  * mem_precharge_and_activate() - Precharge all banks and activate
3164  *
3165  * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3166  */
3167 static void mem_precharge_and_activate(void)
3168 {
3169 	int r;
3170 
3171 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3172 		/* Test if the rank should be skipped. */
3173 		if (param->skip_ranks[r])
3174 			continue;
3175 
3176 		/* Set rank. */
3177 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3178 
3179 		/* Precharge all banks. */
3180 		writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3181 					     RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3182 
3183 		writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3184 		writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3185 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
3186 
3187 		writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3188 		writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3189 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
3190 
3191 		/* Activate rows. */
3192 		writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3193 						RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3194 	}
3195 }
3196 
3197 /**
3198  * mem_init_latency() - Configure memory RLAT and WLAT settings
3199  *
3200  * Configure memory RLAT and WLAT parameters.
3201  */
3202 static void mem_init_latency(void)
3203 {
3204 	/*
3205 	 * For AV/CV, LFIFO is hardened and always runs at full rate
3206 	 * so max latency in AFI clocks, used here, is correspondingly
3207 	 * smaller.
3208 	 */
3209 	const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
3210 	u32 rlat, wlat;
3211 
3212 	debug("%s:%d\n", __func__, __LINE__);
3213 
3214 	/*
3215 	 * Read in write latency.
3216 	 * WL for Hard PHY does not include additive latency.
3217 	 */
3218 	wlat = readl(&data_mgr->t_wl_add);
3219 	wlat += readl(&data_mgr->mem_t_add);
3220 
3221 	gbl->rw_wl_nop_cycles = wlat - 1;
3222 
3223 	/* Read in readl latency. */
3224 	rlat = readl(&data_mgr->t_rl_add);
3225 
3226 	/* Set a pretty high read latency initially. */
3227 	gbl->curr_read_lat = rlat + 16;
3228 	if (gbl->curr_read_lat > max_latency)
3229 		gbl->curr_read_lat = max_latency;
3230 
3231 	writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3232 
3233 	/* Advertise write latency. */
3234 	writel(wlat, &phy_mgr_cfg->afi_wlat);
3235 }
3236 
3237 /**
3238  * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3239  *
3240  * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3241  */
3242 static void mem_skip_calibrate(void)
3243 {
3244 	uint32_t vfifo_offset;
3245 	uint32_t i, j, r;
3246 
3247 	debug("%s:%d\n", __func__, __LINE__);
3248 	/* Need to update every shadow register set used by the interface */
3249 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3250 	     r += NUM_RANKS_PER_SHADOW_REG) {
3251 		/*
3252 		 * Set output phase alignment settings appropriate for
3253 		 * skip calibration.
3254 		 */
3255 		for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3256 			scc_mgr_set_dqs_en_phase(i, 0);
3257 #if IO_DLL_CHAIN_LENGTH == 6
3258 			scc_mgr_set_dqdqs_output_phase(i, 6);
3259 #else
3260 			scc_mgr_set_dqdqs_output_phase(i, 7);
3261 #endif
3262 			/*
3263 			 * Case:33398
3264 			 *
3265 			 * Write data arrives to the I/O two cycles before write
3266 			 * latency is reached (720 deg).
3267 			 *   -> due to bit-slip in a/c bus
3268 			 *   -> to allow board skew where dqs is longer than ck
3269 			 *      -> how often can this happen!?
3270 			 *      -> can claim back some ptaps for high freq
3271 			 *       support if we can relax this, but i digress...
3272 			 *
3273 			 * The write_clk leads mem_ck by 90 deg
3274 			 * The minimum ptap of the OPA is 180 deg
3275 			 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3276 			 * The write_clk is always delayed by 2 ptaps
3277 			 *
3278 			 * Hence, to make DQS aligned to CK, we need to delay
3279 			 * DQS by:
3280 			 *    (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3281 			 *
3282 			 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3283 			 * gives us the number of ptaps, which simplies to:
3284 			 *
3285 			 *    (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3286 			 */
3287 			scc_mgr_set_dqdqs_output_phase(i,
3288 					1.25 * IO_DLL_CHAIN_LENGTH - 2);
3289 		}
3290 		writel(0xff, &sdr_scc_mgr->dqs_ena);
3291 		writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3292 
3293 		for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
3294 			writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3295 				  SCC_MGR_GROUP_COUNTER_OFFSET);
3296 		}
3297 		writel(0xff, &sdr_scc_mgr->dq_ena);
3298 		writel(0xff, &sdr_scc_mgr->dm_ena);
3299 		writel(0, &sdr_scc_mgr->update);
3300 	}
3301 
3302 	/* Compensate for simulation model behaviour */
3303 	for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3304 		scc_mgr_set_dqs_bus_in_delay(i, 10);
3305 		scc_mgr_load_dqs(i);
3306 	}
3307 	writel(0, &sdr_scc_mgr->update);
3308 
3309 	/*
3310 	 * ArriaV has hard FIFOs that can only be initialized by incrementing
3311 	 * in sequencer.
3312 	 */
3313 	vfifo_offset = CALIB_VFIFO_OFFSET;
3314 	for (j = 0; j < vfifo_offset; j++)
3315 		writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3316 	writel(0, &phy_mgr_cmd->fifo_reset);
3317 
3318 	/*
3319 	 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3320 	 * setting from generation-time constant.
3321 	 */
3322 	gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
3323 	writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3324 }
3325 
3326 /**
3327  * mem_calibrate() - Memory calibration entry point.
3328  *
3329  * Perform memory calibration.
3330  */
3331 static uint32_t mem_calibrate(void)
3332 {
3333 	uint32_t i;
3334 	uint32_t rank_bgn, sr;
3335 	uint32_t write_group, write_test_bgn;
3336 	uint32_t read_group, read_test_bgn;
3337 	uint32_t run_groups, current_run;
3338 	uint32_t failing_groups = 0;
3339 	uint32_t group_failed = 0;
3340 
3341 	const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
3342 				RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
3343 
3344 	debug("%s:%d\n", __func__, __LINE__);
3345 
3346 	/* Initialize the data settings */
3347 	gbl->error_substage = CAL_SUBSTAGE_NIL;
3348 	gbl->error_stage = CAL_STAGE_NIL;
3349 	gbl->error_group = 0xff;
3350 	gbl->fom_in = 0;
3351 	gbl->fom_out = 0;
3352 
3353 	/* Initialize WLAT and RLAT. */
3354 	mem_init_latency();
3355 
3356 	/* Initialize bit slips. */
3357 	mem_precharge_and_activate();
3358 
3359 	for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3360 		writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3361 			  SCC_MGR_GROUP_COUNTER_OFFSET);
3362 		/* Only needed once to set all groups, pins, DQ, DQS, DM. */
3363 		if (i == 0)
3364 			scc_mgr_set_hhp_extras();
3365 
3366 		scc_set_bypass_mode(i);
3367 	}
3368 
3369 	/* Calibration is skipped. */
3370 	if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3371 		/*
3372 		 * Set VFIFO and LFIFO to instant-on settings in skip
3373 		 * calibration mode.
3374 		 */
3375 		mem_skip_calibrate();
3376 
3377 		/*
3378 		 * Do not remove this line as it makes sure all of our
3379 		 * decisions have been applied.
3380 		 */
3381 		writel(0, &sdr_scc_mgr->update);
3382 		return 1;
3383 	}
3384 
3385 	/* Calibration is not skipped. */
3386 	for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3387 		/*
3388 		 * Zero all delay chain/phase settings for all
3389 		 * groups and all shadow register sets.
3390 		 */
3391 		scc_mgr_zero_all();
3392 
3393 		run_groups = ~param->skip_groups;
3394 
3395 		for (write_group = 0, write_test_bgn = 0; write_group
3396 			< RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3397 			write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3398 
3399 			/* Initialize the group failure */
3400 			group_failed = 0;
3401 
3402 			current_run = run_groups & ((1 <<
3403 				RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3404 			run_groups = run_groups >>
3405 				RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3406 
3407 			if (current_run == 0)
3408 				continue;
3409 
3410 			writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3411 					    SCC_MGR_GROUP_COUNTER_OFFSET);
3412 			scc_mgr_zero_group(write_group, 0);
3413 
3414 			for (read_group = write_group * rwdqs_ratio,
3415 			     read_test_bgn = 0;
3416 			     read_group < (write_group + 1) * rwdqs_ratio;
3417 			     read_group++,
3418 			     read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3419 				if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3420 					continue;
3421 
3422 				/* Calibrate the VFIFO */
3423 				if (rw_mgr_mem_calibrate_vfifo(read_group,
3424 							       read_test_bgn))
3425 					continue;
3426 
3427 				if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3428 					return 0;
3429 
3430 				/* The group failed, we're done. */
3431 				goto grp_failed;
3432 			}
3433 
3434 			/* Calibrate the output side */
3435 			for (rank_bgn = 0, sr = 0;
3436 			     rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
3437 			     rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3438 				if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3439 					continue;
3440 
3441 				/* Not needed in quick mode! */
3442 				if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
3443 					continue;
3444 
3445 				/*
3446 				 * Determine if this set of ranks
3447 				 * should be skipped entirely.
3448 				 */
3449 				if (param->skip_shadow_regs[sr])
3450 					continue;
3451 
3452 				/* Calibrate WRITEs */
3453 				if (rw_mgr_mem_calibrate_writes(rank_bgn,
3454 						write_group, write_test_bgn))
3455 					continue;
3456 
3457 				group_failed = 1;
3458 				if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3459 					return 0;
3460 			}
3461 
3462 			/* Some group failed, we're done. */
3463 			if (group_failed)
3464 				goto grp_failed;
3465 
3466 			for (read_group = write_group * rwdqs_ratio,
3467 			     read_test_bgn = 0;
3468 			     read_group < (write_group + 1) * rwdqs_ratio;
3469 			     read_group++,
3470 			     read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3471 				if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3472 					continue;
3473 
3474 				if (rw_mgr_mem_calibrate_vfifo_end(read_group,
3475 								read_test_bgn))
3476 					continue;
3477 
3478 				if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3479 					return 0;
3480 
3481 				/* The group failed, we're done. */
3482 				goto grp_failed;
3483 			}
3484 
3485 			/* No group failed, continue as usual. */
3486 			continue;
3487 
3488 grp_failed:		/* A group failed, increment the counter. */
3489 			failing_groups++;
3490 		}
3491 
3492 		/*
3493 		 * USER If there are any failing groups then report
3494 		 * the failure.
3495 		 */
3496 		if (failing_groups != 0)
3497 			return 0;
3498 
3499 		if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3500 			continue;
3501 
3502 		/*
3503 		 * If we're skipping groups as part of debug,
3504 		 * don't calibrate LFIFO.
3505 		 */
3506 		if (param->skip_groups != 0)
3507 			continue;
3508 
3509 		/* Calibrate the LFIFO */
3510 		if (!rw_mgr_mem_calibrate_lfifo())
3511 			return 0;
3512 	}
3513 
3514 	/*
3515 	 * Do not remove this line as it makes sure all of our decisions
3516 	 * have been applied.
3517 	 */
3518 	writel(0, &sdr_scc_mgr->update);
3519 	return 1;
3520 }
3521 
3522 /**
3523  * run_mem_calibrate() - Perform memory calibration
3524  *
3525  * This function triggers the entire memory calibration procedure.
3526  */
3527 static int run_mem_calibrate(void)
3528 {
3529 	int pass;
3530 
3531 	debug("%s:%d\n", __func__, __LINE__);
3532 
3533 	/* Reset pass/fail status shown on afi_cal_success/fail */
3534 	writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3535 
3536 	/* Stop tracking manager. */
3537 	clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3538 
3539 	phy_mgr_initialize();
3540 	rw_mgr_mem_initialize();
3541 
3542 	/* Perform the actual memory calibration. */
3543 	pass = mem_calibrate();
3544 
3545 	mem_precharge_and_activate();
3546 	writel(0, &phy_mgr_cmd->fifo_reset);
3547 
3548 	/* Handoff. */
3549 	rw_mgr_mem_handoff();
3550 	/*
3551 	 * In Hard PHY this is a 2-bit control:
3552 	 * 0: AFI Mux Select
3553 	 * 1: DDIO Mux Select
3554 	 */
3555 	writel(0x2, &phy_mgr_cfg->mux_sel);
3556 
3557 	/* Start tracking manager. */
3558 	setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3559 
3560 	return pass;
3561 }
3562 
3563 /**
3564  * debug_mem_calibrate() - Report result of memory calibration
3565  * @pass:	Value indicating whether calibration passed or failed
3566  *
3567  * This function reports the results of the memory calibration
3568  * and writes debug information into the register file.
3569  */
3570 static void debug_mem_calibrate(int pass)
3571 {
3572 	uint32_t debug_info;
3573 
3574 	if (pass) {
3575 		printf("%s: CALIBRATION PASSED\n", __FILE__);
3576 
3577 		gbl->fom_in /= 2;
3578 		gbl->fom_out /= 2;
3579 
3580 		if (gbl->fom_in > 0xff)
3581 			gbl->fom_in = 0xff;
3582 
3583 		if (gbl->fom_out > 0xff)
3584 			gbl->fom_out = 0xff;
3585 
3586 		/* Update the FOM in the register file */
3587 		debug_info = gbl->fom_in;
3588 		debug_info |= gbl->fom_out << 8;
3589 		writel(debug_info, &sdr_reg_file->fom);
3590 
3591 		writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3592 		writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3593 	} else {
3594 		printf("%s: CALIBRATION FAILED\n", __FILE__);
3595 
3596 		debug_info = gbl->error_stage;
3597 		debug_info |= gbl->error_substage << 8;
3598 		debug_info |= gbl->error_group << 16;
3599 
3600 		writel(debug_info, &sdr_reg_file->failing_stage);
3601 		writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3602 		writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3603 
3604 		/* Update the failing group/stage in the register file */
3605 		debug_info = gbl->error_stage;
3606 		debug_info |= gbl->error_substage << 8;
3607 		debug_info |= gbl->error_group << 16;
3608 		writel(debug_info, &sdr_reg_file->failing_stage);
3609 	}
3610 
3611 	printf("%s: Calibration complete\n", __FILE__);
3612 }
3613 
3614 /**
3615  * hc_initialize_rom_data() - Initialize ROM data
3616  *
3617  * Initialize ROM data.
3618  */
3619 static void hc_initialize_rom_data(void)
3620 {
3621 	u32 i, addr;
3622 
3623 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3624 	for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3625 		writel(inst_rom_init[i], addr + (i << 2));
3626 
3627 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3628 	for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3629 		writel(ac_rom_init[i], addr + (i << 2));
3630 }
3631 
3632 /**
3633  * initialize_reg_file() - Initialize SDR register file
3634  *
3635  * Initialize SDR register file.
3636  */
3637 static void initialize_reg_file(void)
3638 {
3639 	/* Initialize the register file with the correct data */
3640 	writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3641 	writel(0, &sdr_reg_file->debug_data_addr);
3642 	writel(0, &sdr_reg_file->cur_stage);
3643 	writel(0, &sdr_reg_file->fom);
3644 	writel(0, &sdr_reg_file->failing_stage);
3645 	writel(0, &sdr_reg_file->debug1);
3646 	writel(0, &sdr_reg_file->debug2);
3647 }
3648 
3649 /**
3650  * initialize_hps_phy() - Initialize HPS PHY
3651  *
3652  * Initialize HPS PHY.
3653  */
3654 static void initialize_hps_phy(void)
3655 {
3656 	uint32_t reg;
3657 	/*
3658 	 * Tracking also gets configured here because it's in the
3659 	 * same register.
3660 	 */
3661 	uint32_t trk_sample_count = 7500;
3662 	uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3663 	/*
3664 	 * Format is number of outer loops in the 16 MSB, sample
3665 	 * count in 16 LSB.
3666 	 */
3667 
3668 	reg = 0;
3669 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3670 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3671 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3672 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3673 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3674 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3675 	/*
3676 	 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3677 	 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3678 	 */
3679 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3680 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3681 		trk_sample_count);
3682 	writel(reg, &sdr_ctrl->phy_ctrl0);
3683 
3684 	reg = 0;
3685 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3686 		trk_sample_count >>
3687 		SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3688 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3689 		trk_long_idle_sample_count);
3690 	writel(reg, &sdr_ctrl->phy_ctrl1);
3691 
3692 	reg = 0;
3693 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3694 		trk_long_idle_sample_count >>
3695 		SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3696 	writel(reg, &sdr_ctrl->phy_ctrl2);
3697 }
3698 
3699 /**
3700  * initialize_tracking() - Initialize tracking
3701  *
3702  * Initialize the register file with usable initial data.
3703  */
3704 static void initialize_tracking(void)
3705 {
3706 	/*
3707 	 * Initialize the register file with the correct data.
3708 	 * Compute usable version of value in case we skip full
3709 	 * computation later.
3710 	 */
3711 	writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
3712 	       &sdr_reg_file->dtaps_per_ptap);
3713 
3714 	/* trk_sample_count */
3715 	writel(7500, &sdr_reg_file->trk_sample_count);
3716 
3717 	/* longidle outer loop [15:0] */
3718 	writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3719 
3720 	/*
3721 	 * longidle sample count [31:24]
3722 	 * trfc, worst case of 933Mhz 4Gb [23:16]
3723 	 * trcd, worst case [15:8]
3724 	 * vfifo wait [7:0]
3725 	 */
3726 	writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3727 	       &sdr_reg_file->delays);
3728 
3729 	/* mux delay */
3730 	writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
3731 	       (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
3732 	       &sdr_reg_file->trk_rw_mgr_addr);
3733 
3734 	writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
3735 	       &sdr_reg_file->trk_read_dqs_width);
3736 
3737 	/* trefi [7:0] */
3738 	writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
3739 	       &sdr_reg_file->trk_rfsh);
3740 }
3741 
3742 int sdram_calibration_full(void)
3743 {
3744 	struct param_type my_param;
3745 	struct gbl_type my_gbl;
3746 	uint32_t pass;
3747 
3748 	memset(&my_param, 0, sizeof(my_param));
3749 	memset(&my_gbl, 0, sizeof(my_gbl));
3750 
3751 	param = &my_param;
3752 	gbl = &my_gbl;
3753 
3754 	/* Set the calibration enabled by default */
3755 	gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3756 	/*
3757 	 * Only sweep all groups (regardless of fail state) by default
3758 	 * Set enabled read test by default.
3759 	 */
3760 #if DISABLE_GUARANTEED_READ
3761 	gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3762 #endif
3763 	/* Initialize the register file */
3764 	initialize_reg_file();
3765 
3766 	/* Initialize any PHY CSR */
3767 	initialize_hps_phy();
3768 
3769 	scc_mgr_initialize();
3770 
3771 	initialize_tracking();
3772 
3773 	printf("%s: Preparing to start memory calibration\n", __FILE__);
3774 
3775 	debug("%s:%d\n", __func__, __LINE__);
3776 	debug_cond(DLEVEL == 1,
3777 		   "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3778 		   RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3779 		   RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3780 		   RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3781 		   RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3782 	debug_cond(DLEVEL == 1,
3783 		   "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3784 		   RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3785 		   RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3786 		   IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3787 	debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3788 		   IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3789 	debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3790 		   IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3791 		   IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3792 	debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3793 		   IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3794 		   IO_IO_OUT2_DELAY_MAX);
3795 	debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3796 		   IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
3797 
3798 	hc_initialize_rom_data();
3799 
3800 	/* update info for sims */
3801 	reg_file_set_stage(CAL_STAGE_NIL);
3802 	reg_file_set_group(0);
3803 
3804 	/*
3805 	 * Load global needed for those actions that require
3806 	 * some dynamic calibration support.
3807 	 */
3808 	dyn_calib_steps = STATIC_CALIB_STEPS;
3809 	/*
3810 	 * Load global to allow dynamic selection of delay loop settings
3811 	 * based on calibration mode.
3812 	 */
3813 	if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3814 		skip_delay_mask = 0xff;
3815 	else
3816 		skip_delay_mask = 0x0;
3817 
3818 	pass = run_mem_calibrate();
3819 	debug_mem_calibrate(pass);
3820 	return pass;
3821 }
3822