xref: /openbmc/u-boot/drivers/ddr/altera/sequencer.c (revision 71120773bf1206434f35b4933b16afb4166054e7)
1 /*
2  * Copyright Altera Corporation (C) 2012-2015
3  *
4  * SPDX-License-Identifier:    BSD-3-Clause
5  */
6 
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/arch/sdram.h>
10 #include <errno.h>
11 #include "sequencer.h"
12 #include "sequencer_auto.h"
13 #include "sequencer_auto_ac_init.h"
14 #include "sequencer_auto_inst_init.h"
15 #include "sequencer_defines.h"
16 
17 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
18 	(struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
19 
20 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
21 	(struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
22 
23 static struct socfpga_sdr_reg_file *sdr_reg_file =
24 	(struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
25 
26 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
27 	(struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
28 
29 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
30 	(struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
31 
32 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
33 	(struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
34 
35 static struct socfpga_data_mgr *data_mgr =
36 	(struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
37 
38 static struct socfpga_sdr_ctrl *sdr_ctrl =
39 	(struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
40 
41 #define DELTA_D		1
42 
43 /*
44  * In order to reduce ROM size, most of the selectable calibration steps are
45  * decided at compile time based on the user's calibration mode selection,
46  * as captured by the STATIC_CALIB_STEPS selection below.
47  *
48  * However, to support simulation-time selection of fast simulation mode, where
49  * we skip everything except the bare minimum, we need a few of the steps to
50  * be dynamic.  In those cases, we either use the DYNAMIC_CALIB_STEPS for the
51  * check, which is based on the rtl-supplied value, or we dynamically compute
52  * the value to use based on the dynamically-chosen calibration mode
53  */
54 
55 #define DLEVEL 0
56 #define STATIC_IN_RTL_SIM 0
57 #define STATIC_SKIP_DELAY_LOOPS 0
58 
59 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
60 	STATIC_SKIP_DELAY_LOOPS)
61 
62 /* calibration steps requested by the rtl */
63 uint16_t dyn_calib_steps;
64 
65 /*
66  * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
67  * instead of static, we use boolean logic to select between
68  * non-skip and skip values
69  *
70  * The mask is set to include all bits when not-skipping, but is
71  * zero when skipping
72  */
73 
74 uint16_t skip_delay_mask;	/* mask off bits when skipping/not-skipping */
75 
76 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
77 	((non_skip_value) & skip_delay_mask)
78 
79 struct gbl_type *gbl;
80 struct param_type *param;
81 uint32_t curr_shadow_reg;
82 
83 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
84 	uint32_t write_group, uint32_t use_dm,
85 	uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
86 
87 static void set_failing_group_stage(uint32_t group, uint32_t stage,
88 	uint32_t substage)
89 {
90 	/*
91 	 * Only set the global stage if there was not been any other
92 	 * failing group
93 	 */
94 	if (gbl->error_stage == CAL_STAGE_NIL)	{
95 		gbl->error_substage = substage;
96 		gbl->error_stage = stage;
97 		gbl->error_group = group;
98 	}
99 }
100 
101 static void reg_file_set_group(u16 set_group)
102 {
103 	clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
104 }
105 
106 static void reg_file_set_stage(u8 set_stage)
107 {
108 	clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
109 }
110 
111 static void reg_file_set_sub_stage(u8 set_sub_stage)
112 {
113 	set_sub_stage &= 0xff;
114 	clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
115 }
116 
117 /**
118  * phy_mgr_initialize() - Initialize PHY Manager
119  *
120  * Initialize PHY Manager.
121  */
122 static void phy_mgr_initialize(void)
123 {
124 	u32 ratio;
125 
126 	debug("%s:%d\n", __func__, __LINE__);
127 	/* Calibration has control over path to memory */
128 	/*
129 	 * In Hard PHY this is a 2-bit control:
130 	 * 0: AFI Mux Select
131 	 * 1: DDIO Mux Select
132 	 */
133 	writel(0x3, &phy_mgr_cfg->mux_sel);
134 
135 	/* USER memory clock is not stable we begin initialization  */
136 	writel(0, &phy_mgr_cfg->reset_mem_stbl);
137 
138 	/* USER calibration status all set to zero */
139 	writel(0, &phy_mgr_cfg->cal_status);
140 
141 	writel(0, &phy_mgr_cfg->cal_debug_info);
142 
143 	/* Init params only if we do NOT skip calibration. */
144 	if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
145 		return;
146 
147 	ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
148 		RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
149 	param->read_correct_mask_vg = (1 << ratio) - 1;
150 	param->write_correct_mask_vg = (1 << ratio) - 1;
151 	param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
152 	param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
153 	ratio = RW_MGR_MEM_DATA_WIDTH /
154 		RW_MGR_MEM_DATA_MASK_WIDTH;
155 	param->dm_correct_mask = (1 << ratio) - 1;
156 }
157 
158 /**
159  * set_rank_and_odt_mask() - Set Rank and ODT mask
160  * @rank:	Rank mask
161  * @odt_mode:	ODT mode, OFF or READ_WRITE
162  *
163  * Set Rank and ODT mask (On-Die Termination).
164  */
165 static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
166 {
167 	u32 odt_mask_0 = 0;
168 	u32 odt_mask_1 = 0;
169 	u32 cs_and_odt_mask;
170 
171 	if (odt_mode == RW_MGR_ODT_MODE_OFF) {
172 		odt_mask_0 = 0x0;
173 		odt_mask_1 = 0x0;
174 	} else {	/* RW_MGR_ODT_MODE_READ_WRITE */
175 		switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
176 		case 1:	/* 1 Rank */
177 			/* Read: ODT = 0 ; Write: ODT = 1 */
178 			odt_mask_0 = 0x0;
179 			odt_mask_1 = 0x1;
180 			break;
181 		case 2:	/* 2 Ranks */
182 			if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
183 				/*
184 				 * - Dual-Slot , Single-Rank (1 CS per DIMM)
185 				 *   OR
186 				 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
187 				 *
188 				 * Since MEM_NUMBER_OF_RANKS is 2, they
189 				 * are both single rank with 2 CS each
190 				 * (special for RDIMM).
191 				 *
192 				 * Read: Turn on ODT on the opposite rank
193 				 * Write: Turn on ODT on all ranks
194 				 */
195 				odt_mask_0 = 0x3 & ~(1 << rank);
196 				odt_mask_1 = 0x3;
197 			} else {
198 				/*
199 				 * - Single-Slot , Dual-Rank (2 CS per DIMM)
200 				 *
201 				 * Read: Turn on ODT off on all ranks
202 				 * Write: Turn on ODT on active rank
203 				 */
204 				odt_mask_0 = 0x0;
205 				odt_mask_1 = 0x3 & (1 << rank);
206 			}
207 			break;
208 		case 4:	/* 4 Ranks */
209 			/* Read:
210 			 * ----------+-----------------------+
211 			 *           |         ODT           |
212 			 * Read From +-----------------------+
213 			 *   Rank    |  3  |  2  |  1  |  0  |
214 			 * ----------+-----+-----+-----+-----+
215 			 *     0     |  0  |  1  |  0  |  0  |
216 			 *     1     |  1  |  0  |  0  |  0  |
217 			 *     2     |  0  |  0  |  0  |  1  |
218 			 *     3     |  0  |  0  |  1  |  0  |
219 			 * ----------+-----+-----+-----+-----+
220 			 *
221 			 * Write:
222 			 * ----------+-----------------------+
223 			 *           |         ODT           |
224 			 * Write To  +-----------------------+
225 			 *   Rank    |  3  |  2  |  1  |  0  |
226 			 * ----------+-----+-----+-----+-----+
227 			 *     0     |  0  |  1  |  0  |  1  |
228 			 *     1     |  1  |  0  |  1  |  0  |
229 			 *     2     |  0  |  1  |  0  |  1  |
230 			 *     3     |  1  |  0  |  1  |  0  |
231 			 * ----------+-----+-----+-----+-----+
232 			 */
233 			switch (rank) {
234 			case 0:
235 				odt_mask_0 = 0x4;
236 				odt_mask_1 = 0x5;
237 				break;
238 			case 1:
239 				odt_mask_0 = 0x8;
240 				odt_mask_1 = 0xA;
241 				break;
242 			case 2:
243 				odt_mask_0 = 0x1;
244 				odt_mask_1 = 0x5;
245 				break;
246 			case 3:
247 				odt_mask_0 = 0x2;
248 				odt_mask_1 = 0xA;
249 				break;
250 			}
251 			break;
252 		}
253 	}
254 
255 	cs_and_odt_mask = (0xFF & ~(1 << rank)) |
256 			  ((0xFF & odt_mask_0) << 8) |
257 			  ((0xFF & odt_mask_1) << 16);
258 	writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
259 				RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
260 }
261 
262 /**
263  * scc_mgr_set() - Set SCC Manager register
264  * @off:	Base offset in SCC Manager space
265  * @grp:	Read/Write group
266  * @val:	Value to be set
267  *
268  * This function sets the SCC Manager (Scan Chain Control Manager) register.
269  */
270 static void scc_mgr_set(u32 off, u32 grp, u32 val)
271 {
272 	writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
273 }
274 
275 /**
276  * scc_mgr_initialize() - Initialize SCC Manager registers
277  *
278  * Initialize SCC Manager registers.
279  */
280 static void scc_mgr_initialize(void)
281 {
282 	/*
283 	 * Clear register file for HPS. 16 (2^4) is the size of the
284 	 * full register file in the scc mgr:
285 	 *	RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
286 	 *                             MEM_IF_READ_DQS_WIDTH - 1);
287 	 */
288 	int i;
289 
290 	for (i = 0; i < 16; i++) {
291 		debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
292 			   __func__, __LINE__, i);
293 		scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
294 	}
295 }
296 
297 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
298 {
299 	scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
300 }
301 
302 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
303 {
304 	scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
305 }
306 
307 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
308 {
309 	scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
310 }
311 
312 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
313 {
314 	scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
315 }
316 
317 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay)
318 {
319 	scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
320 		    delay);
321 }
322 
323 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
324 {
325 	scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
326 }
327 
328 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
329 {
330 	scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
331 }
332 
333 static void scc_mgr_set_dqs_out1_delay(uint32_t delay)
334 {
335 	scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
336 		    delay);
337 }
338 
339 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
340 {
341 	scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
342 		    RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
343 		    delay);
344 }
345 
346 /* load up dqs config settings */
347 static void scc_mgr_load_dqs(uint32_t dqs)
348 {
349 	writel(dqs, &sdr_scc_mgr->dqs_ena);
350 }
351 
352 /* load up dqs io config settings */
353 static void scc_mgr_load_dqs_io(void)
354 {
355 	writel(0, &sdr_scc_mgr->dqs_io_ena);
356 }
357 
358 /* load up dq config settings */
359 static void scc_mgr_load_dq(uint32_t dq_in_group)
360 {
361 	writel(dq_in_group, &sdr_scc_mgr->dq_ena);
362 }
363 
364 /* load up dm config settings */
365 static void scc_mgr_load_dm(uint32_t dm)
366 {
367 	writel(dm, &sdr_scc_mgr->dm_ena);
368 }
369 
370 /**
371  * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
372  * @off:	Base offset in SCC Manager space
373  * @grp:	Read/Write group
374  * @val:	Value to be set
375  * @update:	If non-zero, trigger SCC Manager update for all ranks
376  *
377  * This function sets the SCC Manager (Scan Chain Control Manager) register
378  * and optionally triggers the SCC update for all ranks.
379  */
380 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
381 				  const int update)
382 {
383 	u32 r;
384 
385 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
386 	     r += NUM_RANKS_PER_SHADOW_REG) {
387 		scc_mgr_set(off, grp, val);
388 
389 		if (update || (r == 0)) {
390 			writel(grp, &sdr_scc_mgr->dqs_ena);
391 			writel(0, &sdr_scc_mgr->update);
392 		}
393 	}
394 }
395 
396 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
397 {
398 	/*
399 	 * USER although the h/w doesn't support different phases per
400 	 * shadow register, for simplicity our scc manager modeling
401 	 * keeps different phase settings per shadow reg, and it's
402 	 * important for us to keep them in sync to match h/w.
403 	 * for efficiency, the scan chain update should occur only
404 	 * once to sr0.
405 	 */
406 	scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
407 			      read_group, phase, 0);
408 }
409 
410 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
411 						     uint32_t phase)
412 {
413 	/*
414 	 * USER although the h/w doesn't support different phases per
415 	 * shadow register, for simplicity our scc manager modeling
416 	 * keeps different phase settings per shadow reg, and it's
417 	 * important for us to keep them in sync to match h/w.
418 	 * for efficiency, the scan chain update should occur only
419 	 * once to sr0.
420 	 */
421 	scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
422 			      write_group, phase, 0);
423 }
424 
425 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
426 					       uint32_t delay)
427 {
428 	/*
429 	 * In shadow register mode, the T11 settings are stored in
430 	 * registers in the core, which are updated by the DQS_ENA
431 	 * signals. Not issuing the SCC_MGR_UPD command allows us to
432 	 * save lots of rank switching overhead, by calling
433 	 * select_shadow_regs_for_update with update_scan_chains
434 	 * set to 0.
435 	 */
436 	scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
437 			      read_group, delay, 1);
438 	writel(0, &sdr_scc_mgr->update);
439 }
440 
441 /**
442  * scc_mgr_set_oct_out1_delay() - Set OCT output delay
443  * @write_group:	Write group
444  * @delay:		Delay value
445  *
446  * This function sets the OCT output delay in SCC manager.
447  */
448 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
449 {
450 	const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
451 			  RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
452 	const int base = write_group * ratio;
453 	int i;
454 	/*
455 	 * Load the setting in the SCC manager
456 	 * Although OCT affects only write data, the OCT delay is controlled
457 	 * by the DQS logic block which is instantiated once per read group.
458 	 * For protocols where a write group consists of multiple read groups,
459 	 * the setting must be set multiple times.
460 	 */
461 	for (i = 0; i < ratio; i++)
462 		scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
463 }
464 
465 /**
466  * scc_mgr_set_hhp_extras() - Set HHP extras.
467  *
468  * Load the fixed setting in the SCC manager HHP extras.
469  */
470 static void scc_mgr_set_hhp_extras(void)
471 {
472 	/*
473 	 * Load the fixed setting in the SCC manager
474 	 * bits: 0:0 = 1'b1	- DQS bypass
475 	 * bits: 1:1 = 1'b1	- DQ bypass
476 	 * bits: 4:2 = 3'b001	- rfifo_mode
477 	 * bits: 6:5 = 2'b01	- rfifo clock_select
478 	 * bits: 7:7 = 1'b0	- separate gating from ungating setting
479 	 * bits: 8:8 = 1'b0	- separate OE from Output delay setting
480 	 */
481 	const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
482 			  (1 << 2) | (1 << 1) | (1 << 0);
483 	const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
484 			 SCC_MGR_HHP_GLOBALS_OFFSET |
485 			 SCC_MGR_HHP_EXTRAS_OFFSET;
486 
487 	debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
488 		   __func__, __LINE__);
489 	writel(value, addr);
490 	debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
491 		   __func__, __LINE__);
492 }
493 
494 /**
495  * scc_mgr_zero_all() - Zero all DQS config
496  *
497  * Zero all DQS config.
498  */
499 static void scc_mgr_zero_all(void)
500 {
501 	int i, r;
502 
503 	/*
504 	 * USER Zero all DQS config settings, across all groups and all
505 	 * shadow registers
506 	 */
507 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
508 	     r += NUM_RANKS_PER_SHADOW_REG) {
509 		for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
510 			/*
511 			 * The phases actually don't exist on a per-rank basis,
512 			 * but there's no harm updating them several times, so
513 			 * let's keep the code simple.
514 			 */
515 			scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
516 			scc_mgr_set_dqs_en_phase(i, 0);
517 			scc_mgr_set_dqs_en_delay(i, 0);
518 		}
519 
520 		for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
521 			scc_mgr_set_dqdqs_output_phase(i, 0);
522 			/* Arria V/Cyclone V don't have out2. */
523 			scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
524 		}
525 	}
526 
527 	/* Multicast to all DQS group enables. */
528 	writel(0xff, &sdr_scc_mgr->dqs_ena);
529 	writel(0, &sdr_scc_mgr->update);
530 }
531 
532 /**
533  * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
534  * @write_group:	Write group
535  *
536  * Set bypass mode and trigger SCC update.
537  */
538 static void scc_set_bypass_mode(const u32 write_group)
539 {
540 	/* Multicast to all DQ enables. */
541 	writel(0xff, &sdr_scc_mgr->dq_ena);
542 	writel(0xff, &sdr_scc_mgr->dm_ena);
543 
544 	/* Update current DQS IO enable. */
545 	writel(0, &sdr_scc_mgr->dqs_io_ena);
546 
547 	/* Update the DQS logic. */
548 	writel(write_group, &sdr_scc_mgr->dqs_ena);
549 
550 	/* Hit update. */
551 	writel(0, &sdr_scc_mgr->update);
552 }
553 
554 /**
555  * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
556  * @write_group:	Write group
557  *
558  * Load DQS settings for Write Group, do not trigger SCC update.
559  */
560 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
561 {
562 	const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
563 			  RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
564 	const int base = write_group * ratio;
565 	int i;
566 	/*
567 	 * Load the setting in the SCC manager
568 	 * Although OCT affects only write data, the OCT delay is controlled
569 	 * by the DQS logic block which is instantiated once per read group.
570 	 * For protocols where a write group consists of multiple read groups,
571 	 * the setting must be set multiple times.
572 	 */
573 	for (i = 0; i < ratio; i++)
574 		writel(base + i, &sdr_scc_mgr->dqs_ena);
575 }
576 
577 /**
578  * scc_mgr_zero_group() - Zero all configs for a group
579  *
580  * Zero DQ, DM, DQS and OCT configs for a group.
581  */
582 static void scc_mgr_zero_group(const u32 write_group, const int out_only)
583 {
584 	int i, r;
585 
586 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
587 	     r += NUM_RANKS_PER_SHADOW_REG) {
588 		/* Zero all DQ config settings. */
589 		for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
590 			scc_mgr_set_dq_out1_delay(i, 0);
591 			if (!out_only)
592 				scc_mgr_set_dq_in_delay(i, 0);
593 		}
594 
595 		/* Multicast to all DQ enables. */
596 		writel(0xff, &sdr_scc_mgr->dq_ena);
597 
598 		/* Zero all DM config settings. */
599 		for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
600 			scc_mgr_set_dm_out1_delay(i, 0);
601 
602 		/* Multicast to all DM enables. */
603 		writel(0xff, &sdr_scc_mgr->dm_ena);
604 
605 		/* Zero all DQS IO settings. */
606 		if (!out_only)
607 			scc_mgr_set_dqs_io_in_delay(0);
608 
609 		/* Arria V/Cyclone V don't have out2. */
610 		scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
611 		scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
612 		scc_mgr_load_dqs_for_write_group(write_group);
613 
614 		/* Multicast to all DQS IO enables (only 1 in total). */
615 		writel(0, &sdr_scc_mgr->dqs_io_ena);
616 
617 		/* Hit update to zero everything. */
618 		writel(0, &sdr_scc_mgr->update);
619 	}
620 }
621 
622 /*
623  * apply and load a particular input delay for the DQ pins in a group
624  * group_bgn is the index of the first dq pin (in the write group)
625  */
626 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay)
627 {
628 	uint32_t i, p;
629 
630 	for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
631 		scc_mgr_set_dq_in_delay(p, delay);
632 		scc_mgr_load_dq(p);
633 	}
634 }
635 
636 /**
637  * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
638  * @delay:		Delay value
639  *
640  * Apply and load a particular output delay for the DQ pins in a group.
641  */
642 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
643 {
644 	int i;
645 
646 	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
647 		scc_mgr_set_dq_out1_delay(i, delay);
648 		scc_mgr_load_dq(i);
649 	}
650 }
651 
652 /* apply and load a particular output delay for the DM pins in a group */
653 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
654 {
655 	uint32_t i;
656 
657 	for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
658 		scc_mgr_set_dm_out1_delay(i, delay1);
659 		scc_mgr_load_dm(i);
660 	}
661 }
662 
663 
664 /* apply and load delay on both DQS and OCT out1 */
665 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
666 						    uint32_t delay)
667 {
668 	scc_mgr_set_dqs_out1_delay(delay);
669 	scc_mgr_load_dqs_io();
670 
671 	scc_mgr_set_oct_out1_delay(write_group, delay);
672 	scc_mgr_load_dqs_for_write_group(write_group);
673 }
674 
675 /**
676  * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
677  * @write_group:	Write group
678  * @delay:		Delay value
679  *
680  * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
681  */
682 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
683 						  const u32 delay)
684 {
685 	u32 i, new_delay;
686 
687 	/* DQ shift */
688 	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
689 		scc_mgr_load_dq(i);
690 
691 	/* DM shift */
692 	for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
693 		scc_mgr_load_dm(i);
694 
695 	/* DQS shift */
696 	new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
697 	if (new_delay > IO_IO_OUT2_DELAY_MAX) {
698 		debug_cond(DLEVEL == 1,
699 			   "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
700 			   __func__, __LINE__, write_group, delay, new_delay,
701 			   IO_IO_OUT2_DELAY_MAX,
702 			   new_delay - IO_IO_OUT2_DELAY_MAX);
703 		new_delay -= IO_IO_OUT2_DELAY_MAX;
704 		scc_mgr_set_dqs_out1_delay(new_delay);
705 	}
706 
707 	scc_mgr_load_dqs_io();
708 
709 	/* OCT shift */
710 	new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
711 	if (new_delay > IO_IO_OUT2_DELAY_MAX) {
712 		debug_cond(DLEVEL == 1,
713 			   "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
714 			   __func__, __LINE__, write_group, delay,
715 			   new_delay, IO_IO_OUT2_DELAY_MAX,
716 			   new_delay - IO_IO_OUT2_DELAY_MAX);
717 		new_delay -= IO_IO_OUT2_DELAY_MAX;
718 		scc_mgr_set_oct_out1_delay(write_group, new_delay);
719 	}
720 
721 	scc_mgr_load_dqs_for_write_group(write_group);
722 }
723 
724 /**
725  * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
726  * @write_group:	Write group
727  * @delay:		Delay value
728  *
729  * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
730  */
731 static void
732 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
733 						const u32 delay)
734 {
735 	int r;
736 
737 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
738 	     r += NUM_RANKS_PER_SHADOW_REG) {
739 		scc_mgr_apply_group_all_out_delay_add(write_group, delay);
740 		writel(0, &sdr_scc_mgr->update);
741 	}
742 }
743 
744 /**
745  * set_jump_as_return() - Return instruction optimization
746  *
747  * Optimization used to recover some slots in ddr3 inst_rom could be
748  * applied to other protocols if we wanted to
749  */
750 static void set_jump_as_return(void)
751 {
752 	/*
753 	 * To save space, we replace return with jump to special shared
754 	 * RETURN instruction so we set the counter to large value so that
755 	 * we always jump.
756 	 */
757 	writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
758 	writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
759 }
760 
761 /*
762  * should always use constants as argument to ensure all computations are
763  * performed at compile time
764  */
765 static void delay_for_n_mem_clocks(const uint32_t clocks)
766 {
767 	uint32_t afi_clocks;
768 	uint8_t inner = 0;
769 	uint8_t outer = 0;
770 	uint16_t c_loop = 0;
771 
772 	debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
773 
774 
775 	afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
776 	/* scale (rounding up) to get afi clocks */
777 
778 	/*
779 	 * Note, we don't bother accounting for being off a little bit
780 	 * because of a few extra instructions in outer loops
781 	 * Note, the loops have a test at the end, and do the test before
782 	 * the decrement, and so always perform the loop
783 	 * 1 time more than the counter value
784 	 */
785 	if (afi_clocks == 0) {
786 		;
787 	} else if (afi_clocks <= 0x100) {
788 		inner = afi_clocks-1;
789 		outer = 0;
790 		c_loop = 0;
791 	} else if (afi_clocks <= 0x10000) {
792 		inner = 0xff;
793 		outer = (afi_clocks-1) >> 8;
794 		c_loop = 0;
795 	} else {
796 		inner = 0xff;
797 		outer = 0xff;
798 		c_loop = (afi_clocks-1) >> 16;
799 	}
800 
801 	/*
802 	 * rom instructions are structured as follows:
803 	 *
804 	 *    IDLE_LOOP2: jnz cntr0, TARGET_A
805 	 *    IDLE_LOOP1: jnz cntr1, TARGET_B
806 	 *                return
807 	 *
808 	 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
809 	 * TARGET_B is set to IDLE_LOOP2 as well
810 	 *
811 	 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
812 	 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
813 	 *
814 	 * a little confusing, but it helps save precious space in the inst_rom
815 	 * and sequencer rom and keeps the delays more accurate and reduces
816 	 * overhead
817 	 */
818 	if (afi_clocks <= 0x100) {
819 		writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
820 			&sdr_rw_load_mgr_regs->load_cntr1);
821 
822 		writel(RW_MGR_IDLE_LOOP1,
823 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
824 
825 		writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
826 					  RW_MGR_RUN_SINGLE_GROUP_OFFSET);
827 	} else {
828 		writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
829 			&sdr_rw_load_mgr_regs->load_cntr0);
830 
831 		writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
832 			&sdr_rw_load_mgr_regs->load_cntr1);
833 
834 		writel(RW_MGR_IDLE_LOOP2,
835 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
836 
837 		writel(RW_MGR_IDLE_LOOP2,
838 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
839 
840 		/* hack to get around compiler not being smart enough */
841 		if (afi_clocks <= 0x10000) {
842 			/* only need to run once */
843 			writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
844 						  RW_MGR_RUN_SINGLE_GROUP_OFFSET);
845 		} else {
846 			do {
847 				writel(RW_MGR_IDLE_LOOP2,
848 					SDR_PHYGRP_RWMGRGRP_ADDRESS |
849 					RW_MGR_RUN_SINGLE_GROUP_OFFSET);
850 			} while (c_loop-- != 0);
851 		}
852 	}
853 	debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
854 }
855 
856 /**
857  * rw_mgr_mem_init_load_regs() - Load instruction registers
858  * @cntr0:	Counter 0 value
859  * @cntr1:	Counter 1 value
860  * @cntr2:	Counter 2 value
861  * @jump:	Jump instruction value
862  *
863  * Load instruction registers.
864  */
865 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
866 {
867 	uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
868 			   RW_MGR_RUN_SINGLE_GROUP_OFFSET;
869 
870 	/* Load counters */
871 	writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
872 	       &sdr_rw_load_mgr_regs->load_cntr0);
873 	writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
874 	       &sdr_rw_load_mgr_regs->load_cntr1);
875 	writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
876 	       &sdr_rw_load_mgr_regs->load_cntr2);
877 
878 	/* Load jump address */
879 	writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
880 	writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
881 	writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
882 
883 	/* Execute count instruction */
884 	writel(jump, grpaddr);
885 }
886 
887 /**
888  * rw_mgr_mem_load_user() - Load user calibration values
889  * @fin1:	Final instruction 1
890  * @fin2:	Final instruction 2
891  * @precharge:	If 1, precharge the banks at the end
892  *
893  * Load user calibration values and optionally precharge the banks.
894  */
895 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
896 				 const int precharge)
897 {
898 	u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
899 		      RW_MGR_RUN_SINGLE_GROUP_OFFSET;
900 	u32 r;
901 
902 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
903 		if (param->skip_ranks[r]) {
904 			/* request to skip the rank */
905 			continue;
906 		}
907 
908 		/* set rank */
909 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
910 
911 		/* precharge all banks ... */
912 		if (precharge)
913 			writel(RW_MGR_PRECHARGE_ALL, grpaddr);
914 
915 		/*
916 		 * USER Use Mirror-ed commands for odd ranks if address
917 		 * mirrorring is on
918 		 */
919 		if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
920 			set_jump_as_return();
921 			writel(RW_MGR_MRS2_MIRR, grpaddr);
922 			delay_for_n_mem_clocks(4);
923 			set_jump_as_return();
924 			writel(RW_MGR_MRS3_MIRR, grpaddr);
925 			delay_for_n_mem_clocks(4);
926 			set_jump_as_return();
927 			writel(RW_MGR_MRS1_MIRR, grpaddr);
928 			delay_for_n_mem_clocks(4);
929 			set_jump_as_return();
930 			writel(fin1, grpaddr);
931 		} else {
932 			set_jump_as_return();
933 			writel(RW_MGR_MRS2, grpaddr);
934 			delay_for_n_mem_clocks(4);
935 			set_jump_as_return();
936 			writel(RW_MGR_MRS3, grpaddr);
937 			delay_for_n_mem_clocks(4);
938 			set_jump_as_return();
939 			writel(RW_MGR_MRS1, grpaddr);
940 			set_jump_as_return();
941 			writel(fin2, grpaddr);
942 		}
943 
944 		if (precharge)
945 			continue;
946 
947 		set_jump_as_return();
948 		writel(RW_MGR_ZQCL, grpaddr);
949 
950 		/* tZQinit = tDLLK = 512 ck cycles */
951 		delay_for_n_mem_clocks(512);
952 	}
953 }
954 
955 /**
956  * rw_mgr_mem_initialize() - Initialize RW Manager
957  *
958  * Initialize RW Manager.
959  */
960 static void rw_mgr_mem_initialize(void)
961 {
962 	debug("%s:%d\n", __func__, __LINE__);
963 
964 	/* The reset / cke part of initialization is broadcasted to all ranks */
965 	writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
966 				RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
967 
968 	/*
969 	 * Here's how you load register for a loop
970 	 * Counters are located @ 0x800
971 	 * Jump address are located @ 0xC00
972 	 * For both, registers 0 to 3 are selected using bits 3 and 2, like
973 	 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
974 	 * I know this ain't pretty, but Avalon bus throws away the 2 least
975 	 * significant bits
976 	 */
977 
978 	/* Start with memory RESET activated */
979 
980 	/* tINIT = 200us */
981 
982 	/*
983 	 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
984 	 * If a and b are the number of iteration in 2 nested loops
985 	 * it takes the following number of cycles to complete the operation:
986 	 * number_of_cycles = ((2 + n) * a + 2) * b
987 	 * where n is the number of instruction in the inner loop
988 	 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
989 	 * b = 6A
990 	 */
991 	rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL,
992 				  SEQ_TINIT_CNTR2_VAL,
993 				  RW_MGR_INIT_RESET_0_CKE_0);
994 
995 	/* Indicate that memory is stable. */
996 	writel(1, &phy_mgr_cfg->reset_mem_stbl);
997 
998 	/*
999 	 * transition the RESET to high
1000 	 * Wait for 500us
1001 	 */
1002 
1003 	/*
1004 	 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
1005 	 * If a and b are the number of iteration in 2 nested loops
1006 	 * it takes the following number of cycles to complete the operation
1007 	 * number_of_cycles = ((2 + n) * a + 2) * b
1008 	 * where n is the number of instruction in the inner loop
1009 	 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
1010 	 * b = FF
1011 	 */
1012 	rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
1013 				  SEQ_TRESET_CNTR2_VAL,
1014 				  RW_MGR_INIT_RESET_1_CKE_0);
1015 
1016 	/* Bring up clock enable. */
1017 
1018 	/* tXRP < 250 ck cycles */
1019 	delay_for_n_mem_clocks(250);
1020 
1021 	rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
1022 			     0);
1023 }
1024 
1025 /*
1026  * At the end of calibration we have to program the user settings in, and
1027  * USER  hand off the memory to the user.
1028  */
1029 static void rw_mgr_mem_handoff(void)
1030 {
1031 	rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
1032 	/*
1033 	 * USER  need to wait tMOD (12CK or 15ns) time before issuing
1034 	 * other commands, but we will have plenty of NIOS cycles before
1035 	 * actual handoff so its okay.
1036 	 */
1037 }
1038 
1039 /**
1040  * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1041  * @rank_bgn:	Rank number
1042  * @group:	Read/Write Group
1043  * @all_ranks:	Test all ranks
1044  *
1045  * Performs a guaranteed read on the patterns we are going to use during a
1046  * read test to ensure memory works.
1047  */
1048 static int
1049 rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
1050 					const u32 all_ranks)
1051 {
1052 	const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1053 			 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1054 	const u32 addr_offset =
1055 			 (group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2;
1056 	const u32 rank_end = all_ranks ?
1057 				RW_MGR_MEM_NUMBER_OF_RANKS :
1058 				(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1059 	const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
1060 				RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
1061 	const u32 correct_mask_vg = param->read_correct_mask_vg;
1062 
1063 	u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1064 	int vg, r;
1065 	int ret = 0;
1066 
1067 	bit_chk = param->read_correct_mask;
1068 
1069 	for (r = rank_bgn; r < rank_end; r++) {
1070 		/* Request to skip the rank */
1071 		if (param->skip_ranks[r])
1072 			continue;
1073 
1074 		/* Set rank */
1075 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1076 
1077 		/* Load up a constant bursts of read commands */
1078 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1079 		writel(RW_MGR_GUARANTEED_READ,
1080 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
1081 
1082 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1083 		writel(RW_MGR_GUARANTEED_READ_CONT,
1084 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1085 
1086 		tmp_bit_chk = 0;
1087 		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;
1088 		     vg >= 0; vg--) {
1089 			/* Reset the FIFOs to get pointers to known state. */
1090 			writel(0, &phy_mgr_cmd->fifo_reset);
1091 			writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1092 				  RW_MGR_RESET_READ_DATAPATH_OFFSET);
1093 			writel(RW_MGR_GUARANTEED_READ,
1094 			       addr + addr_offset + (vg << 2));
1095 
1096 			base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1097 			tmp_bit_chk <<= shift_ratio;
1098 			tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
1099 		}
1100 
1101 		bit_chk &= tmp_bit_chk;
1102 	}
1103 
1104 	writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1105 
1106 	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1107 
1108 	if (bit_chk != param->read_correct_mask)
1109 		ret = -EIO;
1110 
1111 	debug_cond(DLEVEL == 1,
1112 		   "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1113 		   __func__, __LINE__, group, bit_chk,
1114 		   param->read_correct_mask, ret);
1115 
1116 	return ret;
1117 }
1118 
1119 /**
1120  * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
1121  * @rank_bgn:	Rank number
1122  * @all_ranks:	Test all ranks
1123  *
1124  * Load up the patterns we are going to use during a read test.
1125  */
1126 static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
1127 						    const int all_ranks)
1128 {
1129 	const u32 rank_end = all_ranks ?
1130 			RW_MGR_MEM_NUMBER_OF_RANKS :
1131 			(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1132 	u32 r;
1133 
1134 	debug("%s:%d\n", __func__, __LINE__);
1135 
1136 	for (r = rank_bgn; r < rank_end; r++) {
1137 		if (param->skip_ranks[r])
1138 			/* request to skip the rank */
1139 			continue;
1140 
1141 		/* set rank */
1142 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1143 
1144 		/* Load up a constant bursts */
1145 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1146 
1147 		writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1148 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
1149 
1150 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1151 
1152 		writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1153 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1154 
1155 		writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1156 
1157 		writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1158 			&sdr_rw_load_jump_mgr_regs->load_jump_add2);
1159 
1160 		writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1161 
1162 		writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1163 			&sdr_rw_load_jump_mgr_regs->load_jump_add3);
1164 
1165 		writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1166 						RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1167 	}
1168 
1169 	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1170 }
1171 
1172 /**
1173  * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
1174  * @rank_bgn:		Rank number
1175  * @group:		Read/Write group
1176  * @num_tries:		Number of retries of the test
1177  * @all_correct:	All bits must be correct in the mask
1178  * @bit_chk:		Resulting bit mask after the test
1179  * @all_groups:		Test all R/W groups
1180  * @all_ranks:		Test all ranks
1181  *
1182  * Try a read and see if it returns correct data back. Test has dummy reads
1183  * inserted into the mix used to align DQS enable. Test has more thorough
1184  * checks than the regular read test.
1185  */
1186 static int
1187 rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
1188 			       const u32 num_tries, const u32 all_correct,
1189 			       u32 *bit_chk,
1190 			       const u32 all_groups, const u32 all_ranks)
1191 {
1192 	const u32 rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1193 		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1194 	const u32 quick_read_mode =
1195 		((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
1196 		 ENABLE_SUPER_QUICK_CALIBRATION);
1197 	u32 correct_mask_vg = param->read_correct_mask_vg;
1198 	u32 tmp_bit_chk;
1199 	u32 base_rw_mgr;
1200 	u32 addr;
1201 
1202 	int r, vg, ret;
1203 
1204 	*bit_chk = param->read_correct_mask;
1205 
1206 	for (r = rank_bgn; r < rank_end; r++) {
1207 		if (param->skip_ranks[r])
1208 			/* request to skip the rank */
1209 			continue;
1210 
1211 		/* set rank */
1212 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1213 
1214 		writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1215 
1216 		writel(RW_MGR_READ_B2B_WAIT1,
1217 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1218 
1219 		writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1220 		writel(RW_MGR_READ_B2B_WAIT2,
1221 			&sdr_rw_load_jump_mgr_regs->load_jump_add2);
1222 
1223 		if (quick_read_mode)
1224 			writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1225 			/* need at least two (1+1) reads to capture failures */
1226 		else if (all_groups)
1227 			writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1228 		else
1229 			writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1230 
1231 		writel(RW_MGR_READ_B2B,
1232 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
1233 		if (all_groups)
1234 			writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1235 			       RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
1236 			       &sdr_rw_load_mgr_regs->load_cntr3);
1237 		else
1238 			writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1239 
1240 		writel(RW_MGR_READ_B2B,
1241 			&sdr_rw_load_jump_mgr_regs->load_jump_add3);
1242 
1243 		tmp_bit_chk = 0;
1244 		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1; vg >= 0;
1245 		     vg--) {
1246 			/* Reset the FIFOs to get pointers to known state. */
1247 			writel(0, &phy_mgr_cmd->fifo_reset);
1248 			writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1249 				  RW_MGR_RESET_READ_DATAPATH_OFFSET);
1250 
1251 			if (all_groups) {
1252 				addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1253 				       RW_MGR_RUN_ALL_GROUPS_OFFSET;
1254 			} else {
1255 				addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1256 				       RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1257 			}
1258 
1259 			writel(RW_MGR_READ_B2B, addr +
1260 			       ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1261 			       vg) << 2));
1262 
1263 			base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1264 			tmp_bit_chk <<= RW_MGR_MEM_DQ_PER_READ_DQS /
1265 					RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
1266 			tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
1267 		}
1268 
1269 		*bit_chk &= tmp_bit_chk;
1270 	}
1271 
1272 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1273 	writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1274 
1275 	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1276 
1277 	if (all_correct) {
1278 		ret = (*bit_chk == param->read_correct_mask);
1279 		debug_cond(DLEVEL == 2,
1280 			   "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
1281 			   __func__, __LINE__, group, all_groups, *bit_chk,
1282 			   param->read_correct_mask, ret);
1283 	} else	{
1284 		ret = (*bit_chk != 0x00);
1285 		debug_cond(DLEVEL == 2,
1286 			   "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
1287 			   __func__, __LINE__, group, all_groups, *bit_chk,
1288 			   0, ret);
1289 	}
1290 
1291 	return ret;
1292 }
1293 
1294 /**
1295  * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
1296  * @grp:		Read/Write group
1297  * @num_tries:		Number of retries of the test
1298  * @all_correct:	All bits must be correct in the mask
1299  * @all_groups:		Test all R/W groups
1300  *
1301  * Perform a READ test across all memory ranks.
1302  */
1303 static int
1304 rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries,
1305 					 const u32 all_correct,
1306 					 const u32 all_groups)
1307 {
1308 	u32 bit_chk;
1309 	return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct,
1310 					      &bit_chk, all_groups, 1);
1311 }
1312 
1313 /**
1314  * rw_mgr_incr_vfifo() - Increase VFIFO value
1315  * @grp:	Read/Write group
1316  *
1317  * Increase VFIFO value.
1318  */
1319 static void rw_mgr_incr_vfifo(const u32 grp)
1320 {
1321 	writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1322 }
1323 
1324 /**
1325  * rw_mgr_decr_vfifo() - Decrease VFIFO value
1326  * @grp:	Read/Write group
1327  *
1328  * Decrease VFIFO value.
1329  */
1330 static void rw_mgr_decr_vfifo(const u32 grp)
1331 {
1332 	u32 i;
1333 
1334 	for (i = 0; i < VFIFO_SIZE - 1; i++)
1335 		rw_mgr_incr_vfifo(grp);
1336 }
1337 
1338 /**
1339  * find_vfifo_failing_read() - Push VFIFO to get a failing read
1340  * @grp:	Read/Write group
1341  *
1342  * Push VFIFO until a failing read happens.
1343  */
1344 static int find_vfifo_failing_read(const u32 grp)
1345 {
1346 	u32 v, ret, fail_cnt = 0;
1347 
1348 	for (v = 0; v < VFIFO_SIZE; v++) {
1349 		debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
1350 			   __func__, __LINE__, v);
1351 		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1352 						PASS_ONE_BIT, 0);
1353 		if (!ret) {
1354 			fail_cnt++;
1355 
1356 			if (fail_cnt == 2)
1357 				return v;
1358 		}
1359 
1360 		/* Fiddle with FIFO. */
1361 		rw_mgr_incr_vfifo(grp);
1362 	}
1363 
1364 	/* No failing read found! Something must have gone wrong. */
1365 	debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
1366 	return 0;
1367 }
1368 
1369 /**
1370  * sdr_find_phase_delay() - Find DQS enable phase or delay
1371  * @working:	If 1, look for working phase/delay, if 0, look for non-working
1372  * @delay:	If 1, look for delay, if 0, look for phase
1373  * @grp:	Read/Write group
1374  * @work:	Working window position
1375  * @work_inc:	Working window increment
1376  * @pd:		DQS Phase/Delay Iterator
1377  *
1378  * Find working or non-working DQS enable phase setting.
1379  */
1380 static int sdr_find_phase_delay(int working, int delay, const u32 grp,
1381 				u32 *work, const u32 work_inc, u32 *pd)
1382 {
1383 	const u32 max = delay ? IO_DQS_EN_DELAY_MAX : IO_DQS_EN_PHASE_MAX;
1384 	u32 ret;
1385 
1386 	for (; *pd <= max; (*pd)++) {
1387 		if (delay)
1388 			scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd);
1389 		else
1390 			scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd);
1391 
1392 		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1393 					PASS_ONE_BIT, 0);
1394 		if (!working)
1395 			ret = !ret;
1396 
1397 		if (ret)
1398 			return 0;
1399 
1400 		if (work)
1401 			*work += work_inc;
1402 	}
1403 
1404 	return -EINVAL;
1405 }
1406 /**
1407  * sdr_find_phase() - Find DQS enable phase
1408  * @working:	If 1, look for working phase, if 0, look for non-working phase
1409  * @grp:	Read/Write group
1410  * @work:	Working window position
1411  * @i:		Iterator
1412  * @p:		DQS Phase Iterator
1413  *
1414  * Find working or non-working DQS enable phase setting.
1415  */
1416 static int sdr_find_phase(int working, const u32 grp, u32 *work,
1417 			  u32 *i, u32 *p)
1418 {
1419 	const u32 end = VFIFO_SIZE + (working ? 0 : 1);
1420 	int ret;
1421 
1422 	for (; *i < end; (*i)++) {
1423 		if (working)
1424 			*p = 0;
1425 
1426 		ret = sdr_find_phase_delay(working, 0, grp, work,
1427 					   IO_DELAY_PER_OPA_TAP, p);
1428 		if (!ret)
1429 			return 0;
1430 
1431 		if (*p > IO_DQS_EN_PHASE_MAX) {
1432 			/* Fiddle with FIFO. */
1433 			rw_mgr_incr_vfifo(grp);
1434 			if (!working)
1435 				*p = 0;
1436 		}
1437 	}
1438 
1439 	return -EINVAL;
1440 }
1441 
1442 /**
1443  * sdr_working_phase() - Find working DQS enable phase
1444  * @grp:	Read/Write group
1445  * @work_bgn:	Working window start position
1446  * @d:		dtaps output value
1447  * @p:		DQS Phase Iterator
1448  * @i:		Iterator
1449  *
1450  * Find working DQS enable phase setting.
1451  */
1452 static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
1453 			     u32 *p, u32 *i)
1454 {
1455 	const u32 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP /
1456 				   IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1457 	int ret;
1458 
1459 	*work_bgn = 0;
1460 
1461 	for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1462 		*i = 0;
1463 		scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
1464 		ret = sdr_find_phase(1, grp, work_bgn, i, p);
1465 		if (!ret)
1466 			return 0;
1467 		*work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1468 	}
1469 
1470 	/* Cannot find working solution */
1471 	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
1472 		   __func__, __LINE__);
1473 	return -EINVAL;
1474 }
1475 
1476 /**
1477  * sdr_backup_phase() - Find DQS enable backup phase
1478  * @grp:	Read/Write group
1479  * @work_bgn:	Working window start position
1480  * @p:		DQS Phase Iterator
1481  *
1482  * Find DQS enable backup phase setting.
1483  */
1484 static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
1485 {
1486 	u32 tmp_delay, d;
1487 	int ret;
1488 
1489 	/* Special case code for backing up a phase */
1490 	if (*p == 0) {
1491 		*p = IO_DQS_EN_PHASE_MAX;
1492 		rw_mgr_decr_vfifo(grp);
1493 	} else {
1494 		(*p)--;
1495 	}
1496 	tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1497 	scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
1498 
1499 	for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; d++) {
1500 		scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1501 
1502 		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1503 					PASS_ONE_BIT, 0);
1504 		if (ret) {
1505 			*work_bgn = tmp_delay;
1506 			break;
1507 		}
1508 
1509 		tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1510 	}
1511 
1512 	/* Restore VFIFO to old state before we decremented it (if needed). */
1513 	(*p)++;
1514 	if (*p > IO_DQS_EN_PHASE_MAX) {
1515 		*p = 0;
1516 		rw_mgr_incr_vfifo(grp);
1517 	}
1518 
1519 	scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1520 }
1521 
1522 /**
1523  * sdr_nonworking_phase() - Find non-working DQS enable phase
1524  * @grp:	Read/Write group
1525  * @work_end:	Working window end position
1526  * @p:		DQS Phase Iterator
1527  * @i:		Iterator
1528  *
1529  * Find non-working DQS enable phase setting.
1530  */
1531 static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
1532 {
1533 	int ret;
1534 
1535 	(*p)++;
1536 	*work_end += IO_DELAY_PER_OPA_TAP;
1537 	if (*p > IO_DQS_EN_PHASE_MAX) {
1538 		/* Fiddle with FIFO. */
1539 		*p = 0;
1540 		rw_mgr_incr_vfifo(grp);
1541 	}
1542 
1543 	ret = sdr_find_phase(0, grp, work_end, i, p);
1544 	if (ret) {
1545 		/* Cannot see edge of failing read. */
1546 		debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
1547 			   __func__, __LINE__);
1548 	}
1549 
1550 	return ret;
1551 }
1552 
1553 /**
1554  * sdr_find_window_center() - Find center of the working DQS window.
1555  * @grp:	Read/Write group
1556  * @work_bgn:	First working settings
1557  * @work_end:	Last working settings
1558  *
1559  * Find center of the working DQS enable window.
1560  */
1561 static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
1562 				  const u32 work_end)
1563 {
1564 	u32 work_mid;
1565 	int tmp_delay = 0;
1566 	int i, p, d;
1567 
1568 	work_mid = (work_bgn + work_end) / 2;
1569 
1570 	debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1571 		   work_bgn, work_end, work_mid);
1572 	/* Get the middle delay to be less than a VFIFO delay */
1573 	tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP;
1574 
1575 	debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1576 	work_mid %= tmp_delay;
1577 	debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
1578 
1579 	tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP);
1580 	if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP)
1581 		tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP;
1582 	p = tmp_delay / IO_DELAY_PER_OPA_TAP;
1583 
1584 	debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
1585 
1586 	d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP);
1587 	if (d > IO_DQS_EN_DELAY_MAX)
1588 		d = IO_DQS_EN_DELAY_MAX;
1589 	tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1590 
1591 	debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
1592 
1593 	scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1594 	scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1595 
1596 	/*
1597 	 * push vfifo until we can successfully calibrate. We can do this
1598 	 * because the largest possible margin in 1 VFIFO cycle.
1599 	 */
1600 	for (i = 0; i < VFIFO_SIZE; i++) {
1601 		debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
1602 		if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1603 							     PASS_ONE_BIT,
1604 							     0)) {
1605 			debug_cond(DLEVEL == 2,
1606 				   "%s:%d center: found: ptap=%u dtap=%u\n",
1607 				   __func__, __LINE__, p, d);
1608 			return 0;
1609 		}
1610 
1611 		/* Fiddle with FIFO. */
1612 		rw_mgr_incr_vfifo(grp);
1613 	}
1614 
1615 	debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
1616 		   __func__, __LINE__);
1617 	return -EINVAL;
1618 }
1619 
1620 /**
1621  * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use
1622  * @grp:	Read/Write Group
1623  *
1624  * Find a good DQS enable to use.
1625  */
1626 static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
1627 {
1628 	u32 d, p, i;
1629 	u32 dtaps_per_ptap;
1630 	u32 work_bgn, work_end;
1631 	u32 found_passing_read, found_failing_read, initial_failing_dtap;
1632 	int ret;
1633 
1634 	debug("%s:%d %u\n", __func__, __LINE__, grp);
1635 
1636 	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1637 
1638 	scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1639 	scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1640 
1641 	/* Step 0: Determine number of delay taps for each phase tap. */
1642 	dtaps_per_ptap = IO_DELAY_PER_OPA_TAP / IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1643 
1644 	/* Step 1: First push vfifo until we get a failing read. */
1645 	find_vfifo_failing_read(grp);
1646 
1647 	/* Step 2: Find first working phase, increment in ptaps. */
1648 	work_bgn = 0;
1649 	ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i);
1650 	if (ret)
1651 		return ret;
1652 
1653 	work_end = work_bgn;
1654 
1655 	/*
1656 	 * If d is 0 then the working window covers a phase tap and we can
1657 	 * follow the old procedure. Otherwise, we've found the beginning
1658 	 * and we need to increment the dtaps until we find the end.
1659 	 */
1660 	if (d == 0) {
1661 		/*
1662 		 * Step 3a: If we have room, back off by one and
1663 		 *          increment in dtaps.
1664 		 */
1665 		sdr_backup_phase(grp, &work_bgn, &p);
1666 
1667 		/*
1668 		 * Step 4a: go forward from working phase to non working
1669 		 * phase, increment in ptaps.
1670 		 */
1671 		ret = sdr_nonworking_phase(grp, &work_end, &p, &i);
1672 		if (ret)
1673 			return ret;
1674 
1675 		/* Step 5a: Back off one from last, increment in dtaps. */
1676 
1677 		/* Special case code for backing up a phase */
1678 		if (p == 0) {
1679 			p = IO_DQS_EN_PHASE_MAX;
1680 			rw_mgr_decr_vfifo(grp);
1681 		} else {
1682 			p = p - 1;
1683 		}
1684 
1685 		work_end -= IO_DELAY_PER_OPA_TAP;
1686 		scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1687 
1688 		d = 0;
1689 
1690 		debug_cond(DLEVEL == 2, "%s:%d p: ptap=%u\n",
1691 			   __func__, __LINE__, p);
1692 	}
1693 
1694 	/* The dtap increment to find the failing edge is done here. */
1695 	sdr_find_phase_delay(0, 1, grp, &work_end,
1696 			     IO_DELAY_PER_DQS_EN_DCHAIN_TAP, &d);
1697 
1698 	/* Go back to working dtap */
1699 	if (d != 0)
1700 		work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1701 
1702 	debug_cond(DLEVEL == 2,
1703 		   "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
1704 		   __func__, __LINE__, p, d - 1, work_end);
1705 
1706 	if (work_end < work_bgn) {
1707 		/* nil range */
1708 		debug_cond(DLEVEL == 2, "%s:%d end-2: failed\n",
1709 			   __func__, __LINE__);
1710 		return -EINVAL;
1711 	}
1712 
1713 	debug_cond(DLEVEL == 2, "%s:%d found range [%u,%u]\n",
1714 		   __func__, __LINE__, work_bgn, work_end);
1715 
1716 	/*
1717 	 * We need to calculate the number of dtaps that equal a ptap.
1718 	 * To do that we'll back up a ptap and re-find the edge of the
1719 	 * window using dtaps
1720 	 */
1721 	debug_cond(DLEVEL == 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
1722 		   __func__, __LINE__);
1723 
1724 	/* Special case code for backing up a phase */
1725 	if (p == 0) {
1726 		p = IO_DQS_EN_PHASE_MAX;
1727 		rw_mgr_decr_vfifo(grp);
1728 		debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n",
1729 			   __func__, __LINE__, p);
1730 	} else {
1731 		p = p - 1;
1732 		debug_cond(DLEVEL == 2, "%s:%d backedup phase only: p=%u",
1733 			   __func__, __LINE__, p);
1734 	}
1735 
1736 	scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1737 
1738 	/*
1739 	 * Increase dtap until we first see a passing read (in case the
1740 	 * window is smaller than a ptap), and then a failing read to
1741 	 * mark the edge of the window again.
1742 	 */
1743 
1744 	/* Find a passing read. */
1745 	debug_cond(DLEVEL == 2, "%s:%d find passing read\n",
1746 		   __func__, __LINE__);
1747 
1748 	initial_failing_dtap = d;
1749 
1750 	found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d);
1751 	if (found_passing_read) {
1752 		/* Find a failing read. */
1753 		debug_cond(DLEVEL == 2, "%s:%d find failing read\n",
1754 			   __func__, __LINE__);
1755 		d++;
1756 		found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0,
1757 							   &d);
1758 	} else {
1759 		debug_cond(DLEVEL == 1,
1760 			   "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
1761 			   __func__, __LINE__);
1762 	}
1763 
1764 	/*
1765 	 * The dynamically calculated dtaps_per_ptap is only valid if we
1766 	 * found a passing/failing read. If we didn't, it means d hit the max
1767 	 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1768 	 * statically calculated value.
1769 	 */
1770 	if (found_passing_read && found_failing_read)
1771 		dtaps_per_ptap = d - initial_failing_dtap;
1772 
1773 	writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1774 	debug_cond(DLEVEL == 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
1775 		   __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
1776 
1777 	/* Step 6: Find the centre of the window. */
1778 	ret = sdr_find_window_center(grp, work_bgn, work_end);
1779 
1780 	return ret;
1781 }
1782 
1783 /**
1784  * search_left_edge() - Find left edge of DQ/DQS working phase
1785  * @write:		Perform read (Stage 2) or write (Stage 3) calibration
1786  * @rank_bgn:		Rank number
1787  * @write_group:	Write Group
1788  * @read_group:		Read Group
1789  * @test_bgn:		Rank number to begin the test
1790  * @bit_chk:		Resulting bit mask after the test
1791  * @sticky_bit_chk:	Resulting sticky bit mask after the test
1792  * @left_edge:		Left edge of the DQ/DQS phase
1793  * @right_edge:		Right edge of the DQ/DQS phase
1794  * @use_read_test:	Perform read test
1795  *
1796  * Find left edge of DQ/DQS working phase.
1797  */
1798 static void search_left_edge(const int write, const int rank_bgn,
1799 	const u32 write_group, const u32 read_group, const u32 test_bgn,
1800 	u32 *bit_chk, u32 *sticky_bit_chk,
1801 	int *left_edge, int *right_edge, const u32 use_read_test)
1802 {
1803 	const u32 correct_mask = write ? param->write_correct_mask :
1804 					 param->read_correct_mask;
1805 	const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
1806 	const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
1807 	const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
1808 				    RW_MGR_MEM_DQ_PER_READ_DQS;
1809 	u32 stop;
1810 	int i, d;
1811 
1812 	for (d = 0; d <= dqs_max; d++) {
1813 		if (write)
1814 			scc_mgr_apply_group_dq_out1_delay(d);
1815 		else
1816 			scc_mgr_apply_group_dq_in_delay(test_bgn, d);
1817 
1818 		writel(0, &sdr_scc_mgr->update);
1819 
1820 		/*
1821 		 * Stop searching when the read test doesn't pass AND when
1822 		 * we've seen a passing read on every bit.
1823 		 */
1824 		if (write) {			/* WRITE-ONLY */
1825 			stop = !rw_mgr_mem_calibrate_write_test(rank_bgn,
1826 						write_group,
1827 						0, PASS_ONE_BIT,
1828 						bit_chk, 0);
1829 		} else if (use_read_test) {	/* READ-ONLY */
1830 			stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1831 				read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1832 				bit_chk, 0, 0);
1833 		} else {			/* READ-ONLY */
1834 			rw_mgr_mem_calibrate_write_test(rank_bgn,
1835 							write_group,
1836 							0, PASS_ONE_BIT,
1837 							bit_chk, 0);
1838 			*bit_chk = *bit_chk >> (per_dqs *
1839 				(read_group - (write_group *
1840 					RW_MGR_MEM_IF_READ_DQS_WIDTH /
1841 					RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1842 			stop = (*bit_chk == 0);
1843 		}
1844 		*sticky_bit_chk = *sticky_bit_chk | *bit_chk;
1845 		stop = stop && (*sticky_bit_chk == correct_mask);
1846 		debug_cond(DLEVEL == 2,
1847 			   "%s:%d center(left): dtap=%u => %u == %u && %u", __func__, __LINE__, d,
1848 			   *sticky_bit_chk, correct_mask, stop);
1849 
1850 		if (stop == 1)
1851 			break;
1852 
1853 		/* stop != 1 */
1854 		for (i = 0; i < per_dqs; i++) {
1855 			if (*bit_chk & 1) {
1856 				/*
1857 				 * Remember a passing test as
1858 				 * the left_edge.
1859 				 */
1860 				left_edge[i] = d;
1861 			} else {
1862 				/*
1863 				 * If a left edge has not been seen
1864 				 * yet, then a future passing test
1865 				 * will mark this edge as the right
1866 				 * edge.
1867 				 */
1868 				if (left_edge[i] == delay_max + 1)
1869 					right_edge[i] = -(d + 1);
1870 			}
1871 			*bit_chk = *bit_chk >> 1;
1872 		}
1873 	}
1874 
1875 	/* Reset DQ delay chains to 0 */
1876 	if (write)
1877 		scc_mgr_apply_group_dq_out1_delay(0);
1878 	else
1879 		scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
1880 
1881 	*sticky_bit_chk = 0;
1882 	for (i = per_dqs - 1; i >= 0; i--) {
1883 		debug_cond(DLEVEL == 2,
1884 			   "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
1885 			   __func__, __LINE__, i, left_edge[i],
1886 			   i, right_edge[i]);
1887 
1888 		/*
1889 		 * Check for cases where we haven't found the left edge,
1890 		 * which makes our assignment of the the right edge invalid.
1891 		 * Reset it to the illegal value.
1892 		 */
1893 		if ((left_edge[i] == delay_max + 1) &&
1894 		    (right_edge[i] != delay_max + 1)) {
1895 			right_edge[i] = delay_max + 1;
1896 			debug_cond(DLEVEL == 2,
1897 				   "%s:%d vfifo_center: reset right_edge[%u]: %d\n",
1898 				   __func__, __LINE__, i, right_edge[i]);
1899 		}
1900 
1901 		/*
1902 		 * Reset sticky bit
1903 		 * READ: except for bits where we have seen both
1904 		 *       the left and right edge.
1905 		 * WRITE: except for bits where we have seen the
1906 		 *        left edge.
1907 		 */
1908 		*sticky_bit_chk <<= 1;
1909 		if (write) {
1910 			if (left_edge[i] != delay_max + 1)
1911 				*sticky_bit_chk |= 1;
1912 		} else {
1913 			if ((left_edge[i] != delay_max + 1) &&
1914 			    (right_edge[i] != delay_max + 1))
1915 				*sticky_bit_chk |= 1;
1916 		}
1917 	}
1918 
1919 
1920 }
1921 
1922 /**
1923  * search_right_edge() - Find right edge of DQ/DQS working phase
1924  * @write:		Perform read (Stage 2) or write (Stage 3) calibration
1925  * @rank_bgn:		Rank number
1926  * @write_group:	Write Group
1927  * @read_group:		Read Group
1928  * @start_dqs:		DQS start phase
1929  * @start_dqs_en:	DQS enable start phase
1930  * @bit_chk:		Resulting bit mask after the test
1931  * @sticky_bit_chk:	Resulting sticky bit mask after the test
1932  * @left_edge:		Left edge of the DQ/DQS phase
1933  * @right_edge:		Right edge of the DQ/DQS phase
1934  * @use_read_test:	Perform read test
1935  *
1936  * Find right edge of DQ/DQS working phase.
1937  */
1938 static int search_right_edge(const int write, const int rank_bgn,
1939 	const u32 write_group, const u32 read_group,
1940 	const int start_dqs, const int start_dqs_en,
1941 	u32 *bit_chk, u32 *sticky_bit_chk,
1942 	int *left_edge, int *right_edge, const u32 use_read_test)
1943 {
1944 	const u32 correct_mask = write ? param->write_correct_mask :
1945 					 param->read_correct_mask;
1946 	const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
1947 	const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
1948 	const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
1949 				    RW_MGR_MEM_DQ_PER_READ_DQS;
1950 	u32 stop;
1951 	int i, d;
1952 
1953 	for (d = 0; d <= dqs_max - start_dqs; d++) {
1954 		if (write) {	/* WRITE-ONLY */
1955 			scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
1956 								d + start_dqs);
1957 		} else {	/* READ-ONLY */
1958 			scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
1959 			if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
1960 				uint32_t delay = d + start_dqs_en;
1961 				if (delay > IO_DQS_EN_DELAY_MAX)
1962 					delay = IO_DQS_EN_DELAY_MAX;
1963 				scc_mgr_set_dqs_en_delay(read_group, delay);
1964 			}
1965 			scc_mgr_load_dqs(read_group);
1966 		}
1967 
1968 		writel(0, &sdr_scc_mgr->update);
1969 
1970 		/*
1971 		 * Stop searching when the read test doesn't pass AND when
1972 		 * we've seen a passing read on every bit.
1973 		 */
1974 		if (write) {	/* WRITE-ONLY */
1975 			stop = !rw_mgr_mem_calibrate_write_test(rank_bgn,
1976 							write_group,
1977 							0, PASS_ONE_BIT,
1978 							bit_chk, 0);
1979 
1980 		} else if (use_read_test) {	/* READ-ONLY */
1981 			stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1982 				read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1983 				bit_chk, 0, 0);
1984 		} else {			/* READ-ONLY */
1985 			rw_mgr_mem_calibrate_write_test(rank_bgn,
1986 							write_group,
1987 							0, PASS_ONE_BIT,
1988 							bit_chk, 0);
1989 			*bit_chk = *bit_chk >> (per_dqs *
1990 				(read_group - (write_group *
1991 					RW_MGR_MEM_IF_READ_DQS_WIDTH /
1992 					RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1993 			stop = (*bit_chk == 0);
1994 		}
1995 		*sticky_bit_chk = *sticky_bit_chk | *bit_chk;
1996 		stop = stop && (*sticky_bit_chk == correct_mask);
1997 
1998 		debug_cond(DLEVEL == 2,
1999 			   "%s:%d center(right): dtap=%u => %u == %u && %u", __func__, __LINE__, d,
2000 			   *sticky_bit_chk, correct_mask, stop);
2001 
2002 		if (stop == 1) {
2003 			if (write && (d == 0)) {	/* WRITE-ONLY */
2004 				for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2005 					/*
2006 					 * d = 0 failed, but it passed when
2007 					 * testing the left edge, so it must be
2008 					 * marginal, set it to -1
2009 					 */
2010 					if (right_edge[i] == delay_max + 1 &&
2011 					    left_edge[i] != delay_max + 1)
2012 						right_edge[i] = -1;
2013 				}
2014 			}
2015 			break;
2016 		}
2017 
2018 		/* stop != 1 */
2019 		for (i = 0; i < per_dqs; i++) {
2020 			if (*bit_chk & 1) {
2021 				/*
2022 				 * Remember a passing test as
2023 				 * the right_edge.
2024 				 */
2025 				right_edge[i] = d;
2026 			} else {
2027 				if (d != 0) {
2028 					/*
2029 					 * If a right edge has not
2030 					 * been seen yet, then a future
2031 					 * passing test will mark this
2032 					 * edge as the left edge.
2033 					 */
2034 					if (right_edge[i] == delay_max + 1)
2035 						left_edge[i] = -(d + 1);
2036 				} else {
2037 					/*
2038 					 * d = 0 failed, but it passed
2039 					 * when testing the left edge,
2040 					 * so it must be marginal, set
2041 					 * it to -1
2042 					 */
2043 					if (right_edge[i] == delay_max + 1 &&
2044 					    left_edge[i] != delay_max + 1)
2045 						right_edge[i] = -1;
2046 					/*
2047 					 * If a right edge has not been
2048 					 * seen yet, then a future
2049 					 * passing test will mark this
2050 					 * edge as the left edge.
2051 					 */
2052 					else if (right_edge[i] == delay_max + 1)
2053 						left_edge[i] = -(d + 1);
2054 				}
2055 			}
2056 
2057 			debug_cond(DLEVEL == 2, "%s:%d center[r,d=%u]: ",
2058 				   __func__, __LINE__, d);
2059 			debug_cond(DLEVEL == 2,
2060 				   "bit_chk_test=%i left_edge[%u]: %d ",
2061 				   *bit_chk & 1, i, left_edge[i]);
2062 			debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2063 				   right_edge[i]);
2064 			*bit_chk = *bit_chk >> 1;
2065 		}
2066 	}
2067 
2068 	/* Check that all bits have a window */
2069 	for (i = 0; i < per_dqs; i++) {
2070 		debug_cond(DLEVEL == 2,
2071 			   "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
2072 			   __func__, __LINE__, i, left_edge[i],
2073 			   i, right_edge[i]);
2074 		if ((left_edge[i] == dqs_max + 1) ||
2075 		    (right_edge[i] == dqs_max + 1))
2076 			return i + 1;	/* FIXME: If we fail, retval > 0 */
2077 	}
2078 
2079 	return 0;
2080 }
2081 
2082 /* per-bit deskew DQ and center */
2083 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
2084 	uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
2085 	uint32_t use_read_test, uint32_t update_fom)
2086 {
2087 	uint32_t i, p, min_index;
2088 	/*
2089 	 * Store these as signed since there are comparisons with
2090 	 * signed numbers.
2091 	 */
2092 	uint32_t bit_chk;
2093 	uint32_t sticky_bit_chk;
2094 	int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
2095 	int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
2096 	int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
2097 	int32_t mid;
2098 	int32_t orig_mid_min, mid_min;
2099 	int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
2100 		final_dqs_en;
2101 	int32_t dq_margin, dqs_margin;
2102 	uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
2103 	uint32_t addr;
2104 	int ret;
2105 
2106 	debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
2107 
2108 	addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
2109 	start_dqs = readl(addr + (read_group << 2));
2110 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2111 		start_dqs_en = readl(addr + ((read_group << 2)
2112 				     - IO_DQS_EN_DELAY_OFFSET));
2113 
2114 	/* set the left and right edge of each bit to an illegal value */
2115 	/* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
2116 	sticky_bit_chk = 0;
2117 	for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2118 		left_edge[i]  = IO_IO_IN_DELAY_MAX + 1;
2119 		right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
2120 	}
2121 
2122 	/* Search for the left edge of the window for each bit */
2123 	search_left_edge(0, rank_bgn, write_group, read_group, test_bgn,
2124 			 &bit_chk, &sticky_bit_chk,
2125 			 left_edge, right_edge, use_read_test);
2126 
2127 	/* Search for the right edge of the window for each bit */
2128 	ret = search_right_edge(0, rank_bgn, write_group, read_group,
2129 				start_dqs, start_dqs_en,
2130 				&bit_chk, &sticky_bit_chk,
2131 				left_edge, right_edge, use_read_test);
2132 	if (ret) {
2133 		/*
2134 		 * Restore delay chain settings before letting the loop
2135 		 * in rw_mgr_mem_calibrate_vfifo to retry different
2136 		 * dqs/ck relationships.
2137 		 */
2138 		scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
2139 		if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2140 			scc_mgr_set_dqs_en_delay(read_group, start_dqs_en);
2141 
2142 		scc_mgr_load_dqs(read_group);
2143 		writel(0, &sdr_scc_mgr->update);
2144 
2145 		debug_cond(DLEVEL == 1,
2146 			   "%s:%d vfifo_center: failed to find edge [%u]: %d %d",
2147 			   __func__, __LINE__, i, left_edge[i], right_edge[i]);
2148 		if (use_read_test) {
2149 			set_failing_group_stage(read_group *
2150 				RW_MGR_MEM_DQ_PER_READ_DQS + i,
2151 				CAL_STAGE_VFIFO,
2152 				CAL_SUBSTAGE_VFIFO_CENTER);
2153 		} else {
2154 			set_failing_group_stage(read_group *
2155 				RW_MGR_MEM_DQ_PER_READ_DQS + i,
2156 				CAL_STAGE_VFIFO_AFTER_WRITES,
2157 				CAL_SUBSTAGE_VFIFO_CENTER);
2158 		}
2159 		return 0;
2160 	}
2161 
2162 	/* Find middle of window for each DQ bit */
2163 	mid_min = left_edge[0] - right_edge[0];
2164 	min_index = 0;
2165 	for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2166 		mid = left_edge[i] - right_edge[i];
2167 		if (mid < mid_min) {
2168 			mid_min = mid;
2169 			min_index = i;
2170 		}
2171 	}
2172 
2173 	/*
2174 	 * -mid_min/2 represents the amount that we need to move DQS.
2175 	 * If mid_min is odd and positive we'll need to add one to
2176 	 * make sure the rounding in further calculations is correct
2177 	 * (always bias to the right), so just add 1 for all positive values.
2178 	 */
2179 	if (mid_min > 0)
2180 		mid_min++;
2181 
2182 	mid_min = mid_min / 2;
2183 
2184 	debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
2185 		   __func__, __LINE__, mid_min, min_index);
2186 
2187 	/* Determine the amount we can change DQS (which is -mid_min) */
2188 	orig_mid_min = mid_min;
2189 	new_dqs = start_dqs - mid_min;
2190 	if (new_dqs > IO_DQS_IN_DELAY_MAX)
2191 		new_dqs = IO_DQS_IN_DELAY_MAX;
2192 	else if (new_dqs < 0)
2193 		new_dqs = 0;
2194 
2195 	mid_min = start_dqs - new_dqs;
2196 	debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2197 		   mid_min, new_dqs);
2198 
2199 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2200 		if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2201 			mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2202 		else if (start_dqs_en - mid_min < 0)
2203 			mid_min += start_dqs_en - mid_min;
2204 	}
2205 	new_dqs = start_dqs - mid_min;
2206 
2207 	debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
2208 		   new_dqs=%d mid_min=%d\n", start_dqs,
2209 		   IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2210 		   new_dqs, mid_min);
2211 
2212 	/* Initialize data for export structures */
2213 	dqs_margin = IO_IO_IN_DELAY_MAX + 1;
2214 	dq_margin  = IO_IO_IN_DELAY_MAX + 1;
2215 
2216 	/* add delay to bring centre of all DQ windows to the same "level" */
2217 	for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
2218 		/* Use values before divide by 2 to reduce round off error */
2219 		shift_dq = (left_edge[i] - right_edge[i] -
2220 			(left_edge[min_index] - right_edge[min_index]))/2  +
2221 			(orig_mid_min - mid_min);
2222 
2223 		debug_cond(DLEVEL == 2, "vfifo_center: before: \
2224 			   shift_dq[%u]=%d\n", i, shift_dq);
2225 
2226 		addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
2227 		temp_dq_in_delay1 = readl(addr + (p << 2));
2228 		temp_dq_in_delay2 = readl(addr + (i << 2));
2229 
2230 		if (shift_dq + (int32_t)temp_dq_in_delay1 >
2231 			(int32_t)IO_IO_IN_DELAY_MAX) {
2232 			shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
2233 		} else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
2234 			shift_dq = -(int32_t)temp_dq_in_delay1;
2235 		}
2236 		debug_cond(DLEVEL == 2, "vfifo_center: after: \
2237 			   shift_dq[%u]=%d\n", i, shift_dq);
2238 		final_dq[i] = temp_dq_in_delay1 + shift_dq;
2239 		scc_mgr_set_dq_in_delay(p, final_dq[i]);
2240 		scc_mgr_load_dq(p);
2241 
2242 		debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
2243 			   left_edge[i] - shift_dq + (-mid_min),
2244 			   right_edge[i] + shift_dq - (-mid_min));
2245 		/* To determine values for export structures */
2246 		if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2247 			dq_margin = left_edge[i] - shift_dq + (-mid_min);
2248 
2249 		if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2250 			dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2251 	}
2252 
2253 	final_dqs = new_dqs;
2254 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2255 		final_dqs_en = start_dqs_en - mid_min;
2256 
2257 	/* Move DQS-en */
2258 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2259 		scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
2260 		scc_mgr_load_dqs(read_group);
2261 	}
2262 
2263 	/* Move DQS */
2264 	scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
2265 	scc_mgr_load_dqs(read_group);
2266 	debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
2267 		   dqs_margin=%d", __func__, __LINE__,
2268 		   dq_margin, dqs_margin);
2269 
2270 	/*
2271 	 * Do not remove this line as it makes sure all of our decisions
2272 	 * have been applied. Apply the update bit.
2273 	 */
2274 	writel(0, &sdr_scc_mgr->update);
2275 
2276 	return (dq_margin >= 0) && (dqs_margin >= 0);
2277 }
2278 
2279 /**
2280  * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
2281  * @rw_group:	Read/Write Group
2282  * @phase:	DQ/DQS phase
2283  *
2284  * Because initially no communication ca be reliably performed with the memory
2285  * device, the sequencer uses a guaranteed write mechanism to write data into
2286  * the memory device.
2287  */
2288 static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group,
2289 						 const u32 phase)
2290 {
2291 	int ret;
2292 
2293 	/* Set a particular DQ/DQS phase. */
2294 	scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase);
2295 
2296 	debug_cond(DLEVEL == 1, "%s:%d guaranteed write: g=%u p=%u\n",
2297 		   __func__, __LINE__, rw_group, phase);
2298 
2299 	/*
2300 	 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2301 	 * Load up the patterns used by read calibration using the
2302 	 * current DQDQS phase.
2303 	 */
2304 	rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2305 
2306 	if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
2307 		return 0;
2308 
2309 	/*
2310 	 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2311 	 * Back-to-Back reads of the patterns used for calibration.
2312 	 */
2313 	ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1);
2314 	if (ret)
2315 		debug_cond(DLEVEL == 1,
2316 			   "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2317 			   __func__, __LINE__, rw_group, phase);
2318 	return ret;
2319 }
2320 
2321 /**
2322  * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2323  * @rw_group:	Read/Write Group
2324  * @test_bgn:	Rank at which the test begins
2325  *
2326  * DQS enable calibration ensures reliable capture of the DQ signal without
2327  * glitches on the DQS line.
2328  */
2329 static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
2330 						       const u32 test_bgn)
2331 {
2332 	/*
2333 	 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2334 	 * DQS and DQS Eanble Signal Relationships.
2335 	 */
2336 
2337 	/* We start at zero, so have one less dq to devide among */
2338 	const u32 delay_step = IO_IO_IN_DELAY_MAX /
2339 			       (RW_MGR_MEM_DQ_PER_READ_DQS - 1);
2340 	int ret;
2341 	u32 i, p, d, r;
2342 
2343 	debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2344 
2345 	/* Try different dq_in_delays since the DQ path is shorter than DQS. */
2346 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
2347 	     r += NUM_RANKS_PER_SHADOW_REG) {
2348 		for (i = 0, p = test_bgn, d = 0;
2349 		     i < RW_MGR_MEM_DQ_PER_READ_DQS;
2350 		     i++, p++, d += delay_step) {
2351 			debug_cond(DLEVEL == 1,
2352 				   "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2353 				   __func__, __LINE__, rw_group, r, i, p, d);
2354 
2355 			scc_mgr_set_dq_in_delay(p, d);
2356 			scc_mgr_load_dq(p);
2357 		}
2358 
2359 		writel(0, &sdr_scc_mgr->update);
2360 	}
2361 
2362 	/*
2363 	 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2364 	 * dq_in_delay values
2365 	 */
2366 	ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group);
2367 
2368 	debug_cond(DLEVEL == 1,
2369 		   "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
2370 		   __func__, __LINE__, rw_group, !ret);
2371 
2372 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
2373 	     r += NUM_RANKS_PER_SHADOW_REG) {
2374 		scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2375 		writel(0, &sdr_scc_mgr->update);
2376 	}
2377 
2378 	return ret;
2379 }
2380 
2381 /**
2382  * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2383  * @rw_group:		Read/Write Group
2384  * @test_bgn:		Rank at which the test begins
2385  * @use_read_test:	Perform a read test
2386  * @update_fom:		Update FOM
2387  *
2388  * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2389  * within a group.
2390  */
2391 static int
2392 rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
2393 				      const int use_read_test,
2394 				      const int update_fom)
2395 
2396 {
2397 	int ret, grp_calibrated;
2398 	u32 rank_bgn, sr;
2399 
2400 	/*
2401 	 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2402 	 * Read per-bit deskew can be done on a per shadow register basis.
2403 	 */
2404 	grp_calibrated = 1;
2405 	for (rank_bgn = 0, sr = 0;
2406 	     rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2407 	     rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
2408 		/* Check if this set of ranks should be skipped entirely. */
2409 		if (param->skip_shadow_regs[sr])
2410 			continue;
2411 
2412 		ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
2413 							rw_group, test_bgn,
2414 							use_read_test,
2415 							update_fom);
2416 		if (ret)
2417 			continue;
2418 
2419 		grp_calibrated = 0;
2420 	}
2421 
2422 	if (!grp_calibrated)
2423 		return -EIO;
2424 
2425 	return 0;
2426 }
2427 
2428 /**
2429  * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2430  * @rw_group:		Read/Write Group
2431  * @test_bgn:		Rank at which the test begins
2432  *
2433  * Stage 1: Calibrate the read valid prediction FIFO.
2434  *
2435  * This function implements UniPHY calibration Stage 1, as explained in
2436  * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2437  *
2438  * - read valid prediction will consist of finding:
2439  *   - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2440  *   - DQS input phase  and DQS input delay (DQ/DQS Centering)
2441  *  - we also do a per-bit deskew on the DQ lines.
2442  */
2443 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
2444 {
2445 	uint32_t p, d;
2446 	uint32_t dtaps_per_ptap;
2447 	uint32_t failed_substage;
2448 
2449 	int ret;
2450 
2451 	debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
2452 
2453 	/* Update info for sims */
2454 	reg_file_set_group(rw_group);
2455 	reg_file_set_stage(CAL_STAGE_VFIFO);
2456 	reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2457 
2458 	failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2459 
2460 	/* USER Determine number of delay taps for each phase tap. */
2461 	dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP,
2462 				      IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1;
2463 
2464 	for (d = 0; d <= dtaps_per_ptap; d += 2) {
2465 		/*
2466 		 * In RLDRAMX we may be messing the delay of pins in
2467 		 * the same write rw_group but outside of the current read
2468 		 * the rw_group, but that's ok because we haven't calibrated
2469 		 * output side yet.
2470 		 */
2471 		if (d > 0) {
2472 			scc_mgr_apply_group_all_out_delay_add_all_ranks(
2473 								rw_group, d);
2474 		}
2475 
2476 		for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++) {
2477 			/* 1) Guaranteed Write */
2478 			ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
2479 			if (ret)
2480 				break;
2481 
2482 			/* 2) DQS Enable Calibration */
2483 			ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group,
2484 									  test_bgn);
2485 			if (ret) {
2486 				failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2487 				continue;
2488 			}
2489 
2490 			/* 3) Centering DQ/DQS */
2491 			/*
2492 			 * If doing read after write calibration, do not update
2493 			 * FOM now. Do it then.
2494 			 */
2495 			ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group,
2496 								test_bgn, 1, 0);
2497 			if (ret) {
2498 				failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
2499 				continue;
2500 			}
2501 
2502 			/* All done. */
2503 			goto cal_done_ok;
2504 		}
2505 	}
2506 
2507 	/* Calibration Stage 1 failed. */
2508 	set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
2509 	return 0;
2510 
2511 	/* Calibration Stage 1 completed OK. */
2512 cal_done_ok:
2513 	/*
2514 	 * Reset the delay chains back to zero if they have moved > 1
2515 	 * (check for > 1 because loop will increase d even when pass in
2516 	 * first case).
2517 	 */
2518 	if (d > 2)
2519 		scc_mgr_zero_group(rw_group, 1);
2520 
2521 	return 1;
2522 }
2523 
2524 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */
2525 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
2526 					       uint32_t test_bgn)
2527 {
2528 	uint32_t rank_bgn, sr;
2529 	uint32_t grp_calibrated;
2530 	uint32_t write_group;
2531 
2532 	debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
2533 
2534 	/* update info for sims */
2535 
2536 	reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2537 	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2538 
2539 	write_group = read_group;
2540 
2541 	/* update info for sims */
2542 	reg_file_set_group(read_group);
2543 
2544 	grp_calibrated = 1;
2545 	/* Read per-bit deskew can be done on a per shadow register basis */
2546 	for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2547 		rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
2548 		/* Determine if this set of ranks should be skipped entirely */
2549 		if (!param->skip_shadow_regs[sr]) {
2550 		/* This is the last calibration round, update FOM here */
2551 			if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
2552 								write_group,
2553 								read_group,
2554 								test_bgn, 0,
2555 								1)) {
2556 				grp_calibrated = 0;
2557 			}
2558 		}
2559 	}
2560 
2561 
2562 	if (grp_calibrated == 0) {
2563 		set_failing_group_stage(write_group,
2564 					CAL_STAGE_VFIFO_AFTER_WRITES,
2565 					CAL_SUBSTAGE_VFIFO_CENTER);
2566 		return 0;
2567 	}
2568 
2569 	return 1;
2570 }
2571 
2572 /* Calibrate LFIFO to find smallest read latency */
2573 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2574 {
2575 	uint32_t found_one;
2576 
2577 	debug("%s:%d\n", __func__, __LINE__);
2578 
2579 	/* update info for sims */
2580 	reg_file_set_stage(CAL_STAGE_LFIFO);
2581 	reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2582 
2583 	/* Load up the patterns used by read calibration for all ranks */
2584 	rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2585 	found_one = 0;
2586 
2587 	do {
2588 		writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2589 		debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2590 			   __func__, __LINE__, gbl->curr_read_lat);
2591 
2592 		if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
2593 							      NUM_READ_TESTS,
2594 							      PASS_ALL_BITS,
2595 							      1)) {
2596 			break;
2597 		}
2598 
2599 		found_one = 1;
2600 		/* reduce read latency and see if things are working */
2601 		/* correctly */
2602 		gbl->curr_read_lat--;
2603 	} while (gbl->curr_read_lat > 0);
2604 
2605 	/* reset the fifos to get pointers to known state */
2606 
2607 	writel(0, &phy_mgr_cmd->fifo_reset);
2608 
2609 	if (found_one) {
2610 		/* add a fudge factor to the read latency that was determined */
2611 		gbl->curr_read_lat += 2;
2612 		writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2613 		debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
2614 			   read_lat=%u\n", __func__, __LINE__,
2615 			   gbl->curr_read_lat);
2616 		return 1;
2617 	} else {
2618 		set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2619 					CAL_SUBSTAGE_READ_LATENCY);
2620 
2621 		debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
2622 			   read_lat=%u\n", __func__, __LINE__,
2623 			   gbl->curr_read_lat);
2624 		return 0;
2625 	}
2626 }
2627 
2628 /*
2629  * issue write test command.
2630  * two variants are provided. one that just tests a write pattern and
2631  * another that tests datamask functionality.
2632  */
2633 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
2634 						  uint32_t test_dm)
2635 {
2636 	uint32_t mcc_instruction;
2637 	uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
2638 		ENABLE_SUPER_QUICK_CALIBRATION);
2639 	uint32_t rw_wl_nop_cycles;
2640 	uint32_t addr;
2641 
2642 	/*
2643 	 * Set counter and jump addresses for the right
2644 	 * number of NOP cycles.
2645 	 * The number of supported NOP cycles can range from -1 to infinity
2646 	 * Three different cases are handled:
2647 	 *
2648 	 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
2649 	 *    mechanism will be used to insert the right number of NOPs
2650 	 *
2651 	 * 2. For a number of NOP cycles equals to 0, the micro-instruction
2652 	 *    issuing the write command will jump straight to the
2653 	 *    micro-instruction that turns on DQS (for DDRx), or outputs write
2654 	 *    data (for RLD), skipping
2655 	 *    the NOP micro-instruction all together
2656 	 *
2657 	 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
2658 	 *    turned on in the same micro-instruction that issues the write
2659 	 *    command. Then we need
2660 	 *    to directly jump to the micro-instruction that sends out the data
2661 	 *
2662 	 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
2663 	 *       (2 and 3). One jump-counter (0) is used to perform multiple
2664 	 *       write-read operations.
2665 	 *       one counter left to issue this command in "multiple-group" mode
2666 	 */
2667 
2668 	rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
2669 
2670 	if (rw_wl_nop_cycles == -1) {
2671 		/*
2672 		 * CNTR 2 - We want to execute the special write operation that
2673 		 * turns on DQS right away and then skip directly to the
2674 		 * instruction that sends out the data. We set the counter to a
2675 		 * large number so that the jump is always taken.
2676 		 */
2677 		writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2678 
2679 		/* CNTR 3 - Not used */
2680 		if (test_dm) {
2681 			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
2682 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
2683 			       &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2684 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2685 			       &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2686 		} else {
2687 			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
2688 			writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
2689 				&sdr_rw_load_jump_mgr_regs->load_jump_add2);
2690 			writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2691 				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
2692 		}
2693 	} else if (rw_wl_nop_cycles == 0) {
2694 		/*
2695 		 * CNTR 2 - We want to skip the NOP operation and go straight
2696 		 * to the DQS enable instruction. We set the counter to a large
2697 		 * number so that the jump is always taken.
2698 		 */
2699 		writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2700 
2701 		/* CNTR 3 - Not used */
2702 		if (test_dm) {
2703 			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2704 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
2705 			       &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2706 		} else {
2707 			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2708 			writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
2709 				&sdr_rw_load_jump_mgr_regs->load_jump_add2);
2710 		}
2711 	} else {
2712 		/*
2713 		 * CNTR 2 - In this case we want to execute the next instruction
2714 		 * and NOT take the jump. So we set the counter to 0. The jump
2715 		 * address doesn't count.
2716 		 */
2717 		writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
2718 		writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2719 
2720 		/*
2721 		 * CNTR 3 - Set the nop counter to the number of cycles we
2722 		 * need to loop for, minus 1.
2723 		 */
2724 		writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
2725 		if (test_dm) {
2726 			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2727 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2728 				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
2729 		} else {
2730 			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2731 			writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2732 				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
2733 		}
2734 	}
2735 
2736 	writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
2737 		  RW_MGR_RESET_READ_DATAPATH_OFFSET);
2738 
2739 	if (quick_write_mode)
2740 		writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
2741 	else
2742 		writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
2743 
2744 	writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
2745 
2746 	/*
2747 	 * CNTR 1 - This is used to ensure enough time elapses
2748 	 * for read data to come back.
2749 	 */
2750 	writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
2751 
2752 	if (test_dm) {
2753 		writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
2754 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
2755 	} else {
2756 		writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
2757 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
2758 	}
2759 
2760 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
2761 	writel(mcc_instruction, addr + (group << 2));
2762 }
2763 
2764 /* Test writes, can check for a single bit pass or multiple bit pass */
2765 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
2766 	uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
2767 	uint32_t *bit_chk, uint32_t all_ranks)
2768 {
2769 	uint32_t r;
2770 	uint32_t correct_mask_vg;
2771 	uint32_t tmp_bit_chk;
2772 	uint32_t vg;
2773 	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
2774 		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
2775 	uint32_t addr_rw_mgr;
2776 	uint32_t base_rw_mgr;
2777 
2778 	*bit_chk = param->write_correct_mask;
2779 	correct_mask_vg = param->write_correct_mask_vg;
2780 
2781 	for (r = rank_bgn; r < rank_end; r++) {
2782 		if (param->skip_ranks[r]) {
2783 			/* request to skip the rank */
2784 			continue;
2785 		}
2786 
2787 		/* set rank */
2788 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
2789 
2790 		tmp_bit_chk = 0;
2791 		addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
2792 		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
2793 			/* reset the fifos to get pointers to known state */
2794 			writel(0, &phy_mgr_cmd->fifo_reset);
2795 
2796 			tmp_bit_chk = tmp_bit_chk <<
2797 				(RW_MGR_MEM_DQ_PER_WRITE_DQS /
2798 				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
2799 			rw_mgr_mem_calibrate_write_test_issue(write_group *
2800 				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
2801 				use_dm);
2802 
2803 			base_rw_mgr = readl(addr_rw_mgr);
2804 			tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
2805 			if (vg == 0)
2806 				break;
2807 		}
2808 		*bit_chk &= tmp_bit_chk;
2809 	}
2810 
2811 	if (all_correct) {
2812 		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2813 		debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
2814 			   %u => %lu", write_group, use_dm,
2815 			   *bit_chk, param->write_correct_mask,
2816 			   (long unsigned int)(*bit_chk ==
2817 			   param->write_correct_mask));
2818 		return *bit_chk == param->write_correct_mask;
2819 	} else {
2820 		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2821 		debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
2822 		       write_group, use_dm, *bit_chk);
2823 		debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
2824 			(long unsigned int)(*bit_chk != 0));
2825 		return *bit_chk != 0x00;
2826 	}
2827 }
2828 
2829 /*
2830  * center all windows. do per-bit-deskew to possibly increase size of
2831  * certain windows.
2832  */
2833 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
2834 	uint32_t write_group, uint32_t test_bgn)
2835 {
2836 	uint32_t i, p, min_index;
2837 	int32_t d;
2838 	/*
2839 	 * Store these as signed since there are comparisons with
2840 	 * signed numbers.
2841 	 */
2842 	uint32_t bit_chk;
2843 	uint32_t sticky_bit_chk;
2844 	int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2845 	int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2846 	int32_t mid;
2847 	int32_t mid_min, orig_mid_min;
2848 	int32_t new_dqs, start_dqs, shift_dq;
2849 	int32_t dq_margin, dqs_margin, dm_margin;
2850 	uint32_t temp_dq_out1_delay;
2851 	uint32_t addr;
2852 
2853 	int ret;
2854 
2855 	debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2856 
2857 	dm_margin = 0;
2858 
2859 	addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2860 	start_dqs = readl(addr +
2861 			  (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2862 
2863 	/* per-bit deskew */
2864 
2865 	/*
2866 	 * set the left and right edge of each bit to an illegal value
2867 	 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2868 	 */
2869 	sticky_bit_chk = 0;
2870 	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2871 		left_edge[i]  = IO_IO_OUT1_DELAY_MAX + 1;
2872 		right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2873 	}
2874 
2875 	/* Search for the left edge of the window for each bit */
2876 	search_left_edge(1, rank_bgn, write_group, 0, test_bgn,
2877 			 &bit_chk, &sticky_bit_chk,
2878 			 left_edge, right_edge, 0);
2879 
2880 	/* Search for the right edge of the window for each bit */
2881 	ret = search_right_edge(1, rank_bgn, write_group, 0,
2882 				start_dqs, 0,
2883 				&bit_chk, &sticky_bit_chk,
2884 				left_edge, right_edge, 0);
2885 	if (ret) {
2886 		set_failing_group_stage(test_bgn + ret - 1, CAL_STAGE_WRITES,
2887 					CAL_SUBSTAGE_WRITES_CENTER);
2888 		return 0;
2889 	}
2890 
2891 	/* Find middle of window for each DQ bit */
2892 	mid_min = left_edge[0] - right_edge[0];
2893 	min_index = 0;
2894 	for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2895 		mid = left_edge[i] - right_edge[i];
2896 		if (mid < mid_min) {
2897 			mid_min = mid;
2898 			min_index = i;
2899 		}
2900 	}
2901 
2902 	/*
2903 	 * -mid_min/2 represents the amount that we need to move DQS.
2904 	 * If mid_min is odd and positive we'll need to add one to
2905 	 * make sure the rounding in further calculations is correct
2906 	 * (always bias to the right), so just add 1 for all positive values.
2907 	 */
2908 	if (mid_min > 0)
2909 		mid_min++;
2910 	mid_min = mid_min / 2;
2911 	debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
2912 		   __LINE__, mid_min);
2913 
2914 	/* Determine the amount we can change DQS (which is -mid_min) */
2915 	orig_mid_min = mid_min;
2916 	new_dqs = start_dqs;
2917 	mid_min = 0;
2918 	debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
2919 		   mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
2920 	/* Initialize data for export structures */
2921 	dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
2922 	dq_margin  = IO_IO_OUT1_DELAY_MAX + 1;
2923 
2924 	/* add delay to bring centre of all DQ windows to the same "level" */
2925 	for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
2926 		/* Use values before divide by 2 to reduce round off error */
2927 		shift_dq = (left_edge[i] - right_edge[i] -
2928 			(left_edge[min_index] - right_edge[min_index]))/2  +
2929 		(orig_mid_min - mid_min);
2930 
2931 		debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
2932 			   [%u]=%d\n", __func__, __LINE__, i, shift_dq);
2933 
2934 		addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2935 		temp_dq_out1_delay = readl(addr + (i << 2));
2936 		if (shift_dq + (int32_t)temp_dq_out1_delay >
2937 			(int32_t)IO_IO_OUT1_DELAY_MAX) {
2938 			shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
2939 		} else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
2940 			shift_dq = -(int32_t)temp_dq_out1_delay;
2941 		}
2942 		debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
2943 			   i, shift_dq);
2944 		scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
2945 		scc_mgr_load_dq(i);
2946 
2947 		debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
2948 			   left_edge[i] - shift_dq + (-mid_min),
2949 			   right_edge[i] + shift_dq - (-mid_min));
2950 		/* To determine values for export structures */
2951 		if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2952 			dq_margin = left_edge[i] - shift_dq + (-mid_min);
2953 
2954 		if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2955 			dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2956 	}
2957 
2958 	/* Move DQS */
2959 	scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
2960 	writel(0, &sdr_scc_mgr->update);
2961 
2962 	/* Centre DM */
2963 	debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
2964 
2965 	/*
2966 	 * set the left and right edge of each bit to an illegal value,
2967 	 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
2968 	 */
2969 	left_edge[0]  = IO_IO_OUT1_DELAY_MAX + 1;
2970 	right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2971 	int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2972 	int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2973 	int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
2974 	int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
2975 	int32_t win_best = 0;
2976 
2977 	/* Search for the/part of the window with DM shift */
2978 	for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
2979 		scc_mgr_apply_group_dm_out1_delay(d);
2980 		writel(0, &sdr_scc_mgr->update);
2981 
2982 		if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2983 						    PASS_ALL_BITS, &bit_chk,
2984 						    0)) {
2985 			/* USE Set current end of the window */
2986 			end_curr = -d;
2987 			/*
2988 			 * If a starting edge of our window has not been seen
2989 			 * this is our current start of the DM window.
2990 			 */
2991 			if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
2992 				bgn_curr = -d;
2993 
2994 			/*
2995 			 * If current window is bigger than best seen.
2996 			 * Set best seen to be current window.
2997 			 */
2998 			if ((end_curr-bgn_curr+1) > win_best) {
2999 				win_best = end_curr-bgn_curr+1;
3000 				bgn_best = bgn_curr;
3001 				end_best = end_curr;
3002 			}
3003 		} else {
3004 			/* We just saw a failing test. Reset temp edge */
3005 			bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3006 			end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3007 			}
3008 		}
3009 
3010 
3011 	/* Reset DM delay chains to 0 */
3012 	scc_mgr_apply_group_dm_out1_delay(0);
3013 
3014 	/*
3015 	 * Check to see if the current window nudges up aganist 0 delay.
3016 	 * If so we need to continue the search by shifting DQS otherwise DQS
3017 	 * search begins as a new search. */
3018 	if (end_curr != 0) {
3019 		bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3020 		end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3021 	}
3022 
3023 	/* Search for the/part of the window with DQS shifts */
3024 	for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
3025 		/*
3026 		 * Note: This only shifts DQS, so are we limiting ourselve to
3027 		 * width of DQ unnecessarily.
3028 		 */
3029 		scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
3030 							d + new_dqs);
3031 
3032 		writel(0, &sdr_scc_mgr->update);
3033 		if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3034 						    PASS_ALL_BITS, &bit_chk,
3035 						    0)) {
3036 			/* USE Set current end of the window */
3037 			end_curr = d;
3038 			/*
3039 			 * If a beginning edge of our window has not been seen
3040 			 * this is our current begin of the DM window.
3041 			 */
3042 			if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3043 				bgn_curr = d;
3044 
3045 			/*
3046 			 * If current window is bigger than best seen. Set best
3047 			 * seen to be current window.
3048 			 */
3049 			if ((end_curr-bgn_curr+1) > win_best) {
3050 				win_best = end_curr-bgn_curr+1;
3051 				bgn_best = bgn_curr;
3052 				end_best = end_curr;
3053 			}
3054 		} else {
3055 			/* We just saw a failing test. Reset temp edge */
3056 			bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3057 			end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3058 
3059 			/* Early exit optimization: if ther remaining delay
3060 			chain space is less than already seen largest window
3061 			we can exit */
3062 			if ((win_best-1) >
3063 				(IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
3064 					break;
3065 				}
3066 			}
3067 		}
3068 
3069 	/* assign left and right edge for cal and reporting; */
3070 	left_edge[0] = -1*bgn_best;
3071 	right_edge[0] = end_best;
3072 
3073 	debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
3074 		   __LINE__, left_edge[0], right_edge[0]);
3075 
3076 	/* Move DQS (back to orig) */
3077 	scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3078 
3079 	/* Move DM */
3080 
3081 	/* Find middle of window for the DM bit */
3082 	mid = (left_edge[0] - right_edge[0]) / 2;
3083 
3084 	/* only move right, since we are not moving DQS/DQ */
3085 	if (mid < 0)
3086 		mid = 0;
3087 
3088 	/* dm_marign should fail if we never find a window */
3089 	if (win_best == 0)
3090 		dm_margin = -1;
3091 	else
3092 		dm_margin = left_edge[0] - mid;
3093 
3094 	scc_mgr_apply_group_dm_out1_delay(mid);
3095 	writel(0, &sdr_scc_mgr->update);
3096 
3097 	debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
3098 		   dm_margin=%d\n", __func__, __LINE__, left_edge[0],
3099 		   right_edge[0], mid, dm_margin);
3100 	/* Export values */
3101 	gbl->fom_out += dq_margin + dqs_margin;
3102 
3103 	debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
3104 		   dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
3105 		   dq_margin, dqs_margin, dm_margin);
3106 
3107 	/*
3108 	 * Do not remove this line as it makes sure all of our
3109 	 * decisions have been applied.
3110 	 */
3111 	writel(0, &sdr_scc_mgr->update);
3112 	return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
3113 }
3114 
3115 /**
3116  * rw_mgr_mem_calibrate_writes() - Write Calibration Part One
3117  * @rank_bgn:		Rank number
3118  * @group:		Read/Write Group
3119  * @test_bgn:		Rank at which the test begins
3120  *
3121  * Stage 2: Write Calibration Part One.
3122  *
3123  * This function implements UniPHY calibration Stage 2, as explained in
3124  * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
3125  */
3126 static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group,
3127 				       const u32 test_bgn)
3128 {
3129 	int ret;
3130 
3131 	/* Update info for sims */
3132 	debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn);
3133 
3134 	reg_file_set_group(group);
3135 	reg_file_set_stage(CAL_STAGE_WRITES);
3136 	reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3137 
3138 	ret = rw_mgr_mem_calibrate_writes_center(rank_bgn, group, test_bgn);
3139 	if (!ret) {
3140 		set_failing_group_stage(group, CAL_STAGE_WRITES,
3141 					CAL_SUBSTAGE_WRITES_CENTER);
3142 		return -EIO;
3143 	}
3144 
3145 	return 0;
3146 }
3147 
3148 /**
3149  * mem_precharge_and_activate() - Precharge all banks and activate
3150  *
3151  * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3152  */
3153 static void mem_precharge_and_activate(void)
3154 {
3155 	int r;
3156 
3157 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3158 		/* Test if the rank should be skipped. */
3159 		if (param->skip_ranks[r])
3160 			continue;
3161 
3162 		/* Set rank. */
3163 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3164 
3165 		/* Precharge all banks. */
3166 		writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3167 					     RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3168 
3169 		writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3170 		writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3171 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
3172 
3173 		writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3174 		writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3175 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
3176 
3177 		/* Activate rows. */
3178 		writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3179 						RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3180 	}
3181 }
3182 
3183 /**
3184  * mem_init_latency() - Configure memory RLAT and WLAT settings
3185  *
3186  * Configure memory RLAT and WLAT parameters.
3187  */
3188 static void mem_init_latency(void)
3189 {
3190 	/*
3191 	 * For AV/CV, LFIFO is hardened and always runs at full rate
3192 	 * so max latency in AFI clocks, used here, is correspondingly
3193 	 * smaller.
3194 	 */
3195 	const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
3196 	u32 rlat, wlat;
3197 
3198 	debug("%s:%d\n", __func__, __LINE__);
3199 
3200 	/*
3201 	 * Read in write latency.
3202 	 * WL for Hard PHY does not include additive latency.
3203 	 */
3204 	wlat = readl(&data_mgr->t_wl_add);
3205 	wlat += readl(&data_mgr->mem_t_add);
3206 
3207 	gbl->rw_wl_nop_cycles = wlat - 1;
3208 
3209 	/* Read in readl latency. */
3210 	rlat = readl(&data_mgr->t_rl_add);
3211 
3212 	/* Set a pretty high read latency initially. */
3213 	gbl->curr_read_lat = rlat + 16;
3214 	if (gbl->curr_read_lat > max_latency)
3215 		gbl->curr_read_lat = max_latency;
3216 
3217 	writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3218 
3219 	/* Advertise write latency. */
3220 	writel(wlat, &phy_mgr_cfg->afi_wlat);
3221 }
3222 
3223 /**
3224  * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3225  *
3226  * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3227  */
3228 static void mem_skip_calibrate(void)
3229 {
3230 	uint32_t vfifo_offset;
3231 	uint32_t i, j, r;
3232 
3233 	debug("%s:%d\n", __func__, __LINE__);
3234 	/* Need to update every shadow register set used by the interface */
3235 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3236 	     r += NUM_RANKS_PER_SHADOW_REG) {
3237 		/*
3238 		 * Set output phase alignment settings appropriate for
3239 		 * skip calibration.
3240 		 */
3241 		for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3242 			scc_mgr_set_dqs_en_phase(i, 0);
3243 #if IO_DLL_CHAIN_LENGTH == 6
3244 			scc_mgr_set_dqdqs_output_phase(i, 6);
3245 #else
3246 			scc_mgr_set_dqdqs_output_phase(i, 7);
3247 #endif
3248 			/*
3249 			 * Case:33398
3250 			 *
3251 			 * Write data arrives to the I/O two cycles before write
3252 			 * latency is reached (720 deg).
3253 			 *   -> due to bit-slip in a/c bus
3254 			 *   -> to allow board skew where dqs is longer than ck
3255 			 *      -> how often can this happen!?
3256 			 *      -> can claim back some ptaps for high freq
3257 			 *       support if we can relax this, but i digress...
3258 			 *
3259 			 * The write_clk leads mem_ck by 90 deg
3260 			 * The minimum ptap of the OPA is 180 deg
3261 			 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3262 			 * The write_clk is always delayed by 2 ptaps
3263 			 *
3264 			 * Hence, to make DQS aligned to CK, we need to delay
3265 			 * DQS by:
3266 			 *    (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3267 			 *
3268 			 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3269 			 * gives us the number of ptaps, which simplies to:
3270 			 *
3271 			 *    (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3272 			 */
3273 			scc_mgr_set_dqdqs_output_phase(i,
3274 					1.25 * IO_DLL_CHAIN_LENGTH - 2);
3275 		}
3276 		writel(0xff, &sdr_scc_mgr->dqs_ena);
3277 		writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3278 
3279 		for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
3280 			writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3281 				  SCC_MGR_GROUP_COUNTER_OFFSET);
3282 		}
3283 		writel(0xff, &sdr_scc_mgr->dq_ena);
3284 		writel(0xff, &sdr_scc_mgr->dm_ena);
3285 		writel(0, &sdr_scc_mgr->update);
3286 	}
3287 
3288 	/* Compensate for simulation model behaviour */
3289 	for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3290 		scc_mgr_set_dqs_bus_in_delay(i, 10);
3291 		scc_mgr_load_dqs(i);
3292 	}
3293 	writel(0, &sdr_scc_mgr->update);
3294 
3295 	/*
3296 	 * ArriaV has hard FIFOs that can only be initialized by incrementing
3297 	 * in sequencer.
3298 	 */
3299 	vfifo_offset = CALIB_VFIFO_OFFSET;
3300 	for (j = 0; j < vfifo_offset; j++)
3301 		writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3302 	writel(0, &phy_mgr_cmd->fifo_reset);
3303 
3304 	/*
3305 	 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3306 	 * setting from generation-time constant.
3307 	 */
3308 	gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
3309 	writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3310 }
3311 
3312 /**
3313  * mem_calibrate() - Memory calibration entry point.
3314  *
3315  * Perform memory calibration.
3316  */
3317 static uint32_t mem_calibrate(void)
3318 {
3319 	uint32_t i;
3320 	uint32_t rank_bgn, sr;
3321 	uint32_t write_group, write_test_bgn;
3322 	uint32_t read_group, read_test_bgn;
3323 	uint32_t run_groups, current_run;
3324 	uint32_t failing_groups = 0;
3325 	uint32_t group_failed = 0;
3326 
3327 	const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
3328 				RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
3329 
3330 	debug("%s:%d\n", __func__, __LINE__);
3331 
3332 	/* Initialize the data settings */
3333 	gbl->error_substage = CAL_SUBSTAGE_NIL;
3334 	gbl->error_stage = CAL_STAGE_NIL;
3335 	gbl->error_group = 0xff;
3336 	gbl->fom_in = 0;
3337 	gbl->fom_out = 0;
3338 
3339 	/* Initialize WLAT and RLAT. */
3340 	mem_init_latency();
3341 
3342 	/* Initialize bit slips. */
3343 	mem_precharge_and_activate();
3344 
3345 	for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3346 		writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3347 			  SCC_MGR_GROUP_COUNTER_OFFSET);
3348 		/* Only needed once to set all groups, pins, DQ, DQS, DM. */
3349 		if (i == 0)
3350 			scc_mgr_set_hhp_extras();
3351 
3352 		scc_set_bypass_mode(i);
3353 	}
3354 
3355 	/* Calibration is skipped. */
3356 	if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3357 		/*
3358 		 * Set VFIFO and LFIFO to instant-on settings in skip
3359 		 * calibration mode.
3360 		 */
3361 		mem_skip_calibrate();
3362 
3363 		/*
3364 		 * Do not remove this line as it makes sure all of our
3365 		 * decisions have been applied.
3366 		 */
3367 		writel(0, &sdr_scc_mgr->update);
3368 		return 1;
3369 	}
3370 
3371 	/* Calibration is not skipped. */
3372 	for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3373 		/*
3374 		 * Zero all delay chain/phase settings for all
3375 		 * groups and all shadow register sets.
3376 		 */
3377 		scc_mgr_zero_all();
3378 
3379 		run_groups = ~param->skip_groups;
3380 
3381 		for (write_group = 0, write_test_bgn = 0; write_group
3382 			< RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3383 			write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3384 
3385 			/* Initialize the group failure */
3386 			group_failed = 0;
3387 
3388 			current_run = run_groups & ((1 <<
3389 				RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3390 			run_groups = run_groups >>
3391 				RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3392 
3393 			if (current_run == 0)
3394 				continue;
3395 
3396 			writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3397 					    SCC_MGR_GROUP_COUNTER_OFFSET);
3398 			scc_mgr_zero_group(write_group, 0);
3399 
3400 			for (read_group = write_group * rwdqs_ratio,
3401 			     read_test_bgn = 0;
3402 			     read_group < (write_group + 1) * rwdqs_ratio;
3403 			     read_group++,
3404 			     read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3405 				if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3406 					continue;
3407 
3408 				/* Calibrate the VFIFO */
3409 				if (rw_mgr_mem_calibrate_vfifo(read_group,
3410 							       read_test_bgn))
3411 					continue;
3412 
3413 				if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3414 					return 0;
3415 
3416 				/* The group failed, we're done. */
3417 				goto grp_failed;
3418 			}
3419 
3420 			/* Calibrate the output side */
3421 			for (rank_bgn = 0, sr = 0;
3422 			     rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
3423 			     rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3424 				if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3425 					continue;
3426 
3427 				/* Not needed in quick mode! */
3428 				if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
3429 					continue;
3430 
3431 				/*
3432 				 * Determine if this set of ranks
3433 				 * should be skipped entirely.
3434 				 */
3435 				if (param->skip_shadow_regs[sr])
3436 					continue;
3437 
3438 				/* Calibrate WRITEs */
3439 				if (!rw_mgr_mem_calibrate_writes(rank_bgn,
3440 						write_group, write_test_bgn))
3441 					continue;
3442 
3443 				group_failed = 1;
3444 				if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3445 					return 0;
3446 			}
3447 
3448 			/* Some group failed, we're done. */
3449 			if (group_failed)
3450 				goto grp_failed;
3451 
3452 			for (read_group = write_group * rwdqs_ratio,
3453 			     read_test_bgn = 0;
3454 			     read_group < (write_group + 1) * rwdqs_ratio;
3455 			     read_group++,
3456 			     read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3457 				if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3458 					continue;
3459 
3460 				if (rw_mgr_mem_calibrate_vfifo_end(read_group,
3461 								read_test_bgn))
3462 					continue;
3463 
3464 				if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3465 					return 0;
3466 
3467 				/* The group failed, we're done. */
3468 				goto grp_failed;
3469 			}
3470 
3471 			/* No group failed, continue as usual. */
3472 			continue;
3473 
3474 grp_failed:		/* A group failed, increment the counter. */
3475 			failing_groups++;
3476 		}
3477 
3478 		/*
3479 		 * USER If there are any failing groups then report
3480 		 * the failure.
3481 		 */
3482 		if (failing_groups != 0)
3483 			return 0;
3484 
3485 		if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3486 			continue;
3487 
3488 		/*
3489 		 * If we're skipping groups as part of debug,
3490 		 * don't calibrate LFIFO.
3491 		 */
3492 		if (param->skip_groups != 0)
3493 			continue;
3494 
3495 		/* Calibrate the LFIFO */
3496 		if (!rw_mgr_mem_calibrate_lfifo())
3497 			return 0;
3498 	}
3499 
3500 	/*
3501 	 * Do not remove this line as it makes sure all of our decisions
3502 	 * have been applied.
3503 	 */
3504 	writel(0, &sdr_scc_mgr->update);
3505 	return 1;
3506 }
3507 
3508 /**
3509  * run_mem_calibrate() - Perform memory calibration
3510  *
3511  * This function triggers the entire memory calibration procedure.
3512  */
3513 static int run_mem_calibrate(void)
3514 {
3515 	int pass;
3516 
3517 	debug("%s:%d\n", __func__, __LINE__);
3518 
3519 	/* Reset pass/fail status shown on afi_cal_success/fail */
3520 	writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3521 
3522 	/* Stop tracking manager. */
3523 	clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3524 
3525 	phy_mgr_initialize();
3526 	rw_mgr_mem_initialize();
3527 
3528 	/* Perform the actual memory calibration. */
3529 	pass = mem_calibrate();
3530 
3531 	mem_precharge_and_activate();
3532 	writel(0, &phy_mgr_cmd->fifo_reset);
3533 
3534 	/* Handoff. */
3535 	rw_mgr_mem_handoff();
3536 	/*
3537 	 * In Hard PHY this is a 2-bit control:
3538 	 * 0: AFI Mux Select
3539 	 * 1: DDIO Mux Select
3540 	 */
3541 	writel(0x2, &phy_mgr_cfg->mux_sel);
3542 
3543 	/* Start tracking manager. */
3544 	setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3545 
3546 	return pass;
3547 }
3548 
3549 /**
3550  * debug_mem_calibrate() - Report result of memory calibration
3551  * @pass:	Value indicating whether calibration passed or failed
3552  *
3553  * This function reports the results of the memory calibration
3554  * and writes debug information into the register file.
3555  */
3556 static void debug_mem_calibrate(int pass)
3557 {
3558 	uint32_t debug_info;
3559 
3560 	if (pass) {
3561 		printf("%s: CALIBRATION PASSED\n", __FILE__);
3562 
3563 		gbl->fom_in /= 2;
3564 		gbl->fom_out /= 2;
3565 
3566 		if (gbl->fom_in > 0xff)
3567 			gbl->fom_in = 0xff;
3568 
3569 		if (gbl->fom_out > 0xff)
3570 			gbl->fom_out = 0xff;
3571 
3572 		/* Update the FOM in the register file */
3573 		debug_info = gbl->fom_in;
3574 		debug_info |= gbl->fom_out << 8;
3575 		writel(debug_info, &sdr_reg_file->fom);
3576 
3577 		writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3578 		writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3579 	} else {
3580 		printf("%s: CALIBRATION FAILED\n", __FILE__);
3581 
3582 		debug_info = gbl->error_stage;
3583 		debug_info |= gbl->error_substage << 8;
3584 		debug_info |= gbl->error_group << 16;
3585 
3586 		writel(debug_info, &sdr_reg_file->failing_stage);
3587 		writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3588 		writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3589 
3590 		/* Update the failing group/stage in the register file */
3591 		debug_info = gbl->error_stage;
3592 		debug_info |= gbl->error_substage << 8;
3593 		debug_info |= gbl->error_group << 16;
3594 		writel(debug_info, &sdr_reg_file->failing_stage);
3595 	}
3596 
3597 	printf("%s: Calibration complete\n", __FILE__);
3598 }
3599 
3600 /**
3601  * hc_initialize_rom_data() - Initialize ROM data
3602  *
3603  * Initialize ROM data.
3604  */
3605 static void hc_initialize_rom_data(void)
3606 {
3607 	u32 i, addr;
3608 
3609 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3610 	for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3611 		writel(inst_rom_init[i], addr + (i << 2));
3612 
3613 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3614 	for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3615 		writel(ac_rom_init[i], addr + (i << 2));
3616 }
3617 
3618 /**
3619  * initialize_reg_file() - Initialize SDR register file
3620  *
3621  * Initialize SDR register file.
3622  */
3623 static void initialize_reg_file(void)
3624 {
3625 	/* Initialize the register file with the correct data */
3626 	writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3627 	writel(0, &sdr_reg_file->debug_data_addr);
3628 	writel(0, &sdr_reg_file->cur_stage);
3629 	writel(0, &sdr_reg_file->fom);
3630 	writel(0, &sdr_reg_file->failing_stage);
3631 	writel(0, &sdr_reg_file->debug1);
3632 	writel(0, &sdr_reg_file->debug2);
3633 }
3634 
3635 /**
3636  * initialize_hps_phy() - Initialize HPS PHY
3637  *
3638  * Initialize HPS PHY.
3639  */
3640 static void initialize_hps_phy(void)
3641 {
3642 	uint32_t reg;
3643 	/*
3644 	 * Tracking also gets configured here because it's in the
3645 	 * same register.
3646 	 */
3647 	uint32_t trk_sample_count = 7500;
3648 	uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3649 	/*
3650 	 * Format is number of outer loops in the 16 MSB, sample
3651 	 * count in 16 LSB.
3652 	 */
3653 
3654 	reg = 0;
3655 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3656 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3657 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3658 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3659 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3660 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3661 	/*
3662 	 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3663 	 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3664 	 */
3665 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3666 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3667 		trk_sample_count);
3668 	writel(reg, &sdr_ctrl->phy_ctrl0);
3669 
3670 	reg = 0;
3671 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3672 		trk_sample_count >>
3673 		SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3674 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3675 		trk_long_idle_sample_count);
3676 	writel(reg, &sdr_ctrl->phy_ctrl1);
3677 
3678 	reg = 0;
3679 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3680 		trk_long_idle_sample_count >>
3681 		SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3682 	writel(reg, &sdr_ctrl->phy_ctrl2);
3683 }
3684 
3685 /**
3686  * initialize_tracking() - Initialize tracking
3687  *
3688  * Initialize the register file with usable initial data.
3689  */
3690 static void initialize_tracking(void)
3691 {
3692 	/*
3693 	 * Initialize the register file with the correct data.
3694 	 * Compute usable version of value in case we skip full
3695 	 * computation later.
3696 	 */
3697 	writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
3698 	       &sdr_reg_file->dtaps_per_ptap);
3699 
3700 	/* trk_sample_count */
3701 	writel(7500, &sdr_reg_file->trk_sample_count);
3702 
3703 	/* longidle outer loop [15:0] */
3704 	writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3705 
3706 	/*
3707 	 * longidle sample count [31:24]
3708 	 * trfc, worst case of 933Mhz 4Gb [23:16]
3709 	 * trcd, worst case [15:8]
3710 	 * vfifo wait [7:0]
3711 	 */
3712 	writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3713 	       &sdr_reg_file->delays);
3714 
3715 	/* mux delay */
3716 	writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
3717 	       (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
3718 	       &sdr_reg_file->trk_rw_mgr_addr);
3719 
3720 	writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
3721 	       &sdr_reg_file->trk_read_dqs_width);
3722 
3723 	/* trefi [7:0] */
3724 	writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
3725 	       &sdr_reg_file->trk_rfsh);
3726 }
3727 
3728 int sdram_calibration_full(void)
3729 {
3730 	struct param_type my_param;
3731 	struct gbl_type my_gbl;
3732 	uint32_t pass;
3733 
3734 	memset(&my_param, 0, sizeof(my_param));
3735 	memset(&my_gbl, 0, sizeof(my_gbl));
3736 
3737 	param = &my_param;
3738 	gbl = &my_gbl;
3739 
3740 	/* Set the calibration enabled by default */
3741 	gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3742 	/*
3743 	 * Only sweep all groups (regardless of fail state) by default
3744 	 * Set enabled read test by default.
3745 	 */
3746 #if DISABLE_GUARANTEED_READ
3747 	gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3748 #endif
3749 	/* Initialize the register file */
3750 	initialize_reg_file();
3751 
3752 	/* Initialize any PHY CSR */
3753 	initialize_hps_phy();
3754 
3755 	scc_mgr_initialize();
3756 
3757 	initialize_tracking();
3758 
3759 	printf("%s: Preparing to start memory calibration\n", __FILE__);
3760 
3761 	debug("%s:%d\n", __func__, __LINE__);
3762 	debug_cond(DLEVEL == 1,
3763 		   "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3764 		   RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3765 		   RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3766 		   RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3767 		   RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3768 	debug_cond(DLEVEL == 1,
3769 		   "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3770 		   RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3771 		   RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3772 		   IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3773 	debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3774 		   IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3775 	debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3776 		   IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3777 		   IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3778 	debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3779 		   IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3780 		   IO_IO_OUT2_DELAY_MAX);
3781 	debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3782 		   IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
3783 
3784 	hc_initialize_rom_data();
3785 
3786 	/* update info for sims */
3787 	reg_file_set_stage(CAL_STAGE_NIL);
3788 	reg_file_set_group(0);
3789 
3790 	/*
3791 	 * Load global needed for those actions that require
3792 	 * some dynamic calibration support.
3793 	 */
3794 	dyn_calib_steps = STATIC_CALIB_STEPS;
3795 	/*
3796 	 * Load global to allow dynamic selection of delay loop settings
3797 	 * based on calibration mode.
3798 	 */
3799 	if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3800 		skip_delay_mask = 0xff;
3801 	else
3802 		skip_delay_mask = 0x0;
3803 
3804 	pass = run_mem_calibrate();
3805 	debug_mem_calibrate(pass);
3806 	return pass;
3807 }
3808