xref: /openbmc/u-boot/drivers/ddr/altera/sequencer.c (revision f085ac3b1408c33ac2e2239796b31a93a143fefa)
1 /*
2  * Copyright Altera Corporation (C) 2012-2015
3  *
4  * SPDX-License-Identifier:    BSD-3-Clause
5  */
6 
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/arch/sdram.h>
10 #include <errno.h>
11 #include "sequencer.h"
12 
13 /*
14  * FIXME: This path is temporary until the SDRAM driver gets
15  *        a proper thorough cleanup.
16  */
17 #include "../../../board/altera/socfpga/qts/sequencer_auto.h"
18 #include "../../../board/altera/socfpga/qts/sequencer_auto_ac_init.h"
19 #include "../../../board/altera/socfpga/qts/sequencer_auto_inst_init.h"
20 #include "../../../board/altera/socfpga/qts/sequencer_defines.h"
21 
22 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
23 	(struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
24 
25 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
26 	(struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
27 
28 static struct socfpga_sdr_reg_file *sdr_reg_file =
29 	(struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
30 
31 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
32 	(struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
33 
34 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
35 	(struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
36 
37 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
38 	(struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
39 
40 static struct socfpga_data_mgr *data_mgr =
41 	(struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
42 
43 static struct socfpga_sdr_ctrl *sdr_ctrl =
44 	(struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
45 
46 #define DELTA_D		1
47 
48 /*
49  * In order to reduce ROM size, most of the selectable calibration steps are
50  * decided at compile time based on the user's calibration mode selection,
51  * as captured by the STATIC_CALIB_STEPS selection below.
52  *
53  * However, to support simulation-time selection of fast simulation mode, where
54  * we skip everything except the bare minimum, we need a few of the steps to
55  * be dynamic.  In those cases, we either use the DYNAMIC_CALIB_STEPS for the
56  * check, which is based on the rtl-supplied value, or we dynamically compute
57  * the value to use based on the dynamically-chosen calibration mode
58  */
59 
60 #define DLEVEL 0
61 #define STATIC_IN_RTL_SIM 0
62 #define STATIC_SKIP_DELAY_LOOPS 0
63 
64 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
65 	STATIC_SKIP_DELAY_LOOPS)
66 
67 /* calibration steps requested by the rtl */
68 uint16_t dyn_calib_steps;
69 
70 /*
71  * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
72  * instead of static, we use boolean logic to select between
73  * non-skip and skip values
74  *
75  * The mask is set to include all bits when not-skipping, but is
76  * zero when skipping
77  */
78 
79 uint16_t skip_delay_mask;	/* mask off bits when skipping/not-skipping */
80 
81 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
82 	((non_skip_value) & skip_delay_mask)
83 
84 struct gbl_type *gbl;
85 struct param_type *param;
86 
87 static void set_failing_group_stage(uint32_t group, uint32_t stage,
88 	uint32_t substage)
89 {
90 	/*
91 	 * Only set the global stage if there was not been any other
92 	 * failing group
93 	 */
94 	if (gbl->error_stage == CAL_STAGE_NIL)	{
95 		gbl->error_substage = substage;
96 		gbl->error_stage = stage;
97 		gbl->error_group = group;
98 	}
99 }
100 
101 static void reg_file_set_group(u16 set_group)
102 {
103 	clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
104 }
105 
106 static void reg_file_set_stage(u8 set_stage)
107 {
108 	clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
109 }
110 
111 static void reg_file_set_sub_stage(u8 set_sub_stage)
112 {
113 	set_sub_stage &= 0xff;
114 	clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
115 }
116 
117 /**
118  * phy_mgr_initialize() - Initialize PHY Manager
119  *
120  * Initialize PHY Manager.
121  */
122 static void phy_mgr_initialize(void)
123 {
124 	u32 ratio;
125 
126 	debug("%s:%d\n", __func__, __LINE__);
127 	/* Calibration has control over path to memory */
128 	/*
129 	 * In Hard PHY this is a 2-bit control:
130 	 * 0: AFI Mux Select
131 	 * 1: DDIO Mux Select
132 	 */
133 	writel(0x3, &phy_mgr_cfg->mux_sel);
134 
135 	/* USER memory clock is not stable we begin initialization  */
136 	writel(0, &phy_mgr_cfg->reset_mem_stbl);
137 
138 	/* USER calibration status all set to zero */
139 	writel(0, &phy_mgr_cfg->cal_status);
140 
141 	writel(0, &phy_mgr_cfg->cal_debug_info);
142 
143 	/* Init params only if we do NOT skip calibration. */
144 	if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
145 		return;
146 
147 	ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
148 		RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
149 	param->read_correct_mask_vg = (1 << ratio) - 1;
150 	param->write_correct_mask_vg = (1 << ratio) - 1;
151 	param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
152 	param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
153 }
154 
155 /**
156  * set_rank_and_odt_mask() - Set Rank and ODT mask
157  * @rank:	Rank mask
158  * @odt_mode:	ODT mode, OFF or READ_WRITE
159  *
160  * Set Rank and ODT mask (On-Die Termination).
161  */
162 static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
163 {
164 	u32 odt_mask_0 = 0;
165 	u32 odt_mask_1 = 0;
166 	u32 cs_and_odt_mask;
167 
168 	if (odt_mode == RW_MGR_ODT_MODE_OFF) {
169 		odt_mask_0 = 0x0;
170 		odt_mask_1 = 0x0;
171 	} else {	/* RW_MGR_ODT_MODE_READ_WRITE */
172 		switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
173 		case 1:	/* 1 Rank */
174 			/* Read: ODT = 0 ; Write: ODT = 1 */
175 			odt_mask_0 = 0x0;
176 			odt_mask_1 = 0x1;
177 			break;
178 		case 2:	/* 2 Ranks */
179 			if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
180 				/*
181 				 * - Dual-Slot , Single-Rank (1 CS per DIMM)
182 				 *   OR
183 				 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
184 				 *
185 				 * Since MEM_NUMBER_OF_RANKS is 2, they
186 				 * are both single rank with 2 CS each
187 				 * (special for RDIMM).
188 				 *
189 				 * Read: Turn on ODT on the opposite rank
190 				 * Write: Turn on ODT on all ranks
191 				 */
192 				odt_mask_0 = 0x3 & ~(1 << rank);
193 				odt_mask_1 = 0x3;
194 			} else {
195 				/*
196 				 * - Single-Slot , Dual-Rank (2 CS per DIMM)
197 				 *
198 				 * Read: Turn on ODT off on all ranks
199 				 * Write: Turn on ODT on active rank
200 				 */
201 				odt_mask_0 = 0x0;
202 				odt_mask_1 = 0x3 & (1 << rank);
203 			}
204 			break;
205 		case 4:	/* 4 Ranks */
206 			/* Read:
207 			 * ----------+-----------------------+
208 			 *           |         ODT           |
209 			 * Read From +-----------------------+
210 			 *   Rank    |  3  |  2  |  1  |  0  |
211 			 * ----------+-----+-----+-----+-----+
212 			 *     0     |  0  |  1  |  0  |  0  |
213 			 *     1     |  1  |  0  |  0  |  0  |
214 			 *     2     |  0  |  0  |  0  |  1  |
215 			 *     3     |  0  |  0  |  1  |  0  |
216 			 * ----------+-----+-----+-----+-----+
217 			 *
218 			 * Write:
219 			 * ----------+-----------------------+
220 			 *           |         ODT           |
221 			 * Write To  +-----------------------+
222 			 *   Rank    |  3  |  2  |  1  |  0  |
223 			 * ----------+-----+-----+-----+-----+
224 			 *     0     |  0  |  1  |  0  |  1  |
225 			 *     1     |  1  |  0  |  1  |  0  |
226 			 *     2     |  0  |  1  |  0  |  1  |
227 			 *     3     |  1  |  0  |  1  |  0  |
228 			 * ----------+-----+-----+-----+-----+
229 			 */
230 			switch (rank) {
231 			case 0:
232 				odt_mask_0 = 0x4;
233 				odt_mask_1 = 0x5;
234 				break;
235 			case 1:
236 				odt_mask_0 = 0x8;
237 				odt_mask_1 = 0xA;
238 				break;
239 			case 2:
240 				odt_mask_0 = 0x1;
241 				odt_mask_1 = 0x5;
242 				break;
243 			case 3:
244 				odt_mask_0 = 0x2;
245 				odt_mask_1 = 0xA;
246 				break;
247 			}
248 			break;
249 		}
250 	}
251 
252 	cs_and_odt_mask = (0xFF & ~(1 << rank)) |
253 			  ((0xFF & odt_mask_0) << 8) |
254 			  ((0xFF & odt_mask_1) << 16);
255 	writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
256 				RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
257 }
258 
259 /**
260  * scc_mgr_set() - Set SCC Manager register
261  * @off:	Base offset in SCC Manager space
262  * @grp:	Read/Write group
263  * @val:	Value to be set
264  *
265  * This function sets the SCC Manager (Scan Chain Control Manager) register.
266  */
267 static void scc_mgr_set(u32 off, u32 grp, u32 val)
268 {
269 	writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
270 }
271 
272 /**
273  * scc_mgr_initialize() - Initialize SCC Manager registers
274  *
275  * Initialize SCC Manager registers.
276  */
277 static void scc_mgr_initialize(void)
278 {
279 	/*
280 	 * Clear register file for HPS. 16 (2^4) is the size of the
281 	 * full register file in the scc mgr:
282 	 *	RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
283 	 *                             MEM_IF_READ_DQS_WIDTH - 1);
284 	 */
285 	int i;
286 
287 	for (i = 0; i < 16; i++) {
288 		debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
289 			   __func__, __LINE__, i);
290 		scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
291 	}
292 }
293 
294 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
295 {
296 	scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
297 }
298 
299 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
300 {
301 	scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
302 }
303 
304 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
305 {
306 	scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
307 }
308 
309 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
310 {
311 	scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
312 }
313 
314 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay)
315 {
316 	scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
317 		    delay);
318 }
319 
320 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
321 {
322 	scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
323 }
324 
325 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
326 {
327 	scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
328 }
329 
330 static void scc_mgr_set_dqs_out1_delay(uint32_t delay)
331 {
332 	scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
333 		    delay);
334 }
335 
336 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
337 {
338 	scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
339 		    RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
340 		    delay);
341 }
342 
343 /* load up dqs config settings */
344 static void scc_mgr_load_dqs(uint32_t dqs)
345 {
346 	writel(dqs, &sdr_scc_mgr->dqs_ena);
347 }
348 
349 /* load up dqs io config settings */
350 static void scc_mgr_load_dqs_io(void)
351 {
352 	writel(0, &sdr_scc_mgr->dqs_io_ena);
353 }
354 
355 /* load up dq config settings */
356 static void scc_mgr_load_dq(uint32_t dq_in_group)
357 {
358 	writel(dq_in_group, &sdr_scc_mgr->dq_ena);
359 }
360 
361 /* load up dm config settings */
362 static void scc_mgr_load_dm(uint32_t dm)
363 {
364 	writel(dm, &sdr_scc_mgr->dm_ena);
365 }
366 
367 /**
368  * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
369  * @off:	Base offset in SCC Manager space
370  * @grp:	Read/Write group
371  * @val:	Value to be set
372  * @update:	If non-zero, trigger SCC Manager update for all ranks
373  *
374  * This function sets the SCC Manager (Scan Chain Control Manager) register
375  * and optionally triggers the SCC update for all ranks.
376  */
377 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
378 				  const int update)
379 {
380 	u32 r;
381 
382 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
383 	     r += NUM_RANKS_PER_SHADOW_REG) {
384 		scc_mgr_set(off, grp, val);
385 
386 		if (update || (r == 0)) {
387 			writel(grp, &sdr_scc_mgr->dqs_ena);
388 			writel(0, &sdr_scc_mgr->update);
389 		}
390 	}
391 }
392 
393 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
394 {
395 	/*
396 	 * USER although the h/w doesn't support different phases per
397 	 * shadow register, for simplicity our scc manager modeling
398 	 * keeps different phase settings per shadow reg, and it's
399 	 * important for us to keep them in sync to match h/w.
400 	 * for efficiency, the scan chain update should occur only
401 	 * once to sr0.
402 	 */
403 	scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
404 			      read_group, phase, 0);
405 }
406 
407 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
408 						     uint32_t phase)
409 {
410 	/*
411 	 * USER although the h/w doesn't support different phases per
412 	 * shadow register, for simplicity our scc manager modeling
413 	 * keeps different phase settings per shadow reg, and it's
414 	 * important for us to keep them in sync to match h/w.
415 	 * for efficiency, the scan chain update should occur only
416 	 * once to sr0.
417 	 */
418 	scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
419 			      write_group, phase, 0);
420 }
421 
422 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
423 					       uint32_t delay)
424 {
425 	/*
426 	 * In shadow register mode, the T11 settings are stored in
427 	 * registers in the core, which are updated by the DQS_ENA
428 	 * signals. Not issuing the SCC_MGR_UPD command allows us to
429 	 * save lots of rank switching overhead, by calling
430 	 * select_shadow_regs_for_update with update_scan_chains
431 	 * set to 0.
432 	 */
433 	scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
434 			      read_group, delay, 1);
435 	writel(0, &sdr_scc_mgr->update);
436 }
437 
438 /**
439  * scc_mgr_set_oct_out1_delay() - Set OCT output delay
440  * @write_group:	Write group
441  * @delay:		Delay value
442  *
443  * This function sets the OCT output delay in SCC manager.
444  */
445 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
446 {
447 	const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
448 			  RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
449 	const int base = write_group * ratio;
450 	int i;
451 	/*
452 	 * Load the setting in the SCC manager
453 	 * Although OCT affects only write data, the OCT delay is controlled
454 	 * by the DQS logic block which is instantiated once per read group.
455 	 * For protocols where a write group consists of multiple read groups,
456 	 * the setting must be set multiple times.
457 	 */
458 	for (i = 0; i < ratio; i++)
459 		scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
460 }
461 
462 /**
463  * scc_mgr_set_hhp_extras() - Set HHP extras.
464  *
465  * Load the fixed setting in the SCC manager HHP extras.
466  */
467 static void scc_mgr_set_hhp_extras(void)
468 {
469 	/*
470 	 * Load the fixed setting in the SCC manager
471 	 * bits: 0:0 = 1'b1	- DQS bypass
472 	 * bits: 1:1 = 1'b1	- DQ bypass
473 	 * bits: 4:2 = 3'b001	- rfifo_mode
474 	 * bits: 6:5 = 2'b01	- rfifo clock_select
475 	 * bits: 7:7 = 1'b0	- separate gating from ungating setting
476 	 * bits: 8:8 = 1'b0	- separate OE from Output delay setting
477 	 */
478 	const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
479 			  (1 << 2) | (1 << 1) | (1 << 0);
480 	const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
481 			 SCC_MGR_HHP_GLOBALS_OFFSET |
482 			 SCC_MGR_HHP_EXTRAS_OFFSET;
483 
484 	debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
485 		   __func__, __LINE__);
486 	writel(value, addr);
487 	debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
488 		   __func__, __LINE__);
489 }
490 
491 /**
492  * scc_mgr_zero_all() - Zero all DQS config
493  *
494  * Zero all DQS config.
495  */
496 static void scc_mgr_zero_all(void)
497 {
498 	int i, r;
499 
500 	/*
501 	 * USER Zero all DQS config settings, across all groups and all
502 	 * shadow registers
503 	 */
504 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
505 	     r += NUM_RANKS_PER_SHADOW_REG) {
506 		for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
507 			/*
508 			 * The phases actually don't exist on a per-rank basis,
509 			 * but there's no harm updating them several times, so
510 			 * let's keep the code simple.
511 			 */
512 			scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
513 			scc_mgr_set_dqs_en_phase(i, 0);
514 			scc_mgr_set_dqs_en_delay(i, 0);
515 		}
516 
517 		for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
518 			scc_mgr_set_dqdqs_output_phase(i, 0);
519 			/* Arria V/Cyclone V don't have out2. */
520 			scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
521 		}
522 	}
523 
524 	/* Multicast to all DQS group enables. */
525 	writel(0xff, &sdr_scc_mgr->dqs_ena);
526 	writel(0, &sdr_scc_mgr->update);
527 }
528 
529 /**
530  * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
531  * @write_group:	Write group
532  *
533  * Set bypass mode and trigger SCC update.
534  */
535 static void scc_set_bypass_mode(const u32 write_group)
536 {
537 	/* Multicast to all DQ enables. */
538 	writel(0xff, &sdr_scc_mgr->dq_ena);
539 	writel(0xff, &sdr_scc_mgr->dm_ena);
540 
541 	/* Update current DQS IO enable. */
542 	writel(0, &sdr_scc_mgr->dqs_io_ena);
543 
544 	/* Update the DQS logic. */
545 	writel(write_group, &sdr_scc_mgr->dqs_ena);
546 
547 	/* Hit update. */
548 	writel(0, &sdr_scc_mgr->update);
549 }
550 
551 /**
552  * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
553  * @write_group:	Write group
554  *
555  * Load DQS settings for Write Group, do not trigger SCC update.
556  */
557 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
558 {
559 	const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
560 			  RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
561 	const int base = write_group * ratio;
562 	int i;
563 	/*
564 	 * Load the setting in the SCC manager
565 	 * Although OCT affects only write data, the OCT delay is controlled
566 	 * by the DQS logic block which is instantiated once per read group.
567 	 * For protocols where a write group consists of multiple read groups,
568 	 * the setting must be set multiple times.
569 	 */
570 	for (i = 0; i < ratio; i++)
571 		writel(base + i, &sdr_scc_mgr->dqs_ena);
572 }
573 
574 /**
575  * scc_mgr_zero_group() - Zero all configs for a group
576  *
577  * Zero DQ, DM, DQS and OCT configs for a group.
578  */
579 static void scc_mgr_zero_group(const u32 write_group, const int out_only)
580 {
581 	int i, r;
582 
583 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
584 	     r += NUM_RANKS_PER_SHADOW_REG) {
585 		/* Zero all DQ config settings. */
586 		for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
587 			scc_mgr_set_dq_out1_delay(i, 0);
588 			if (!out_only)
589 				scc_mgr_set_dq_in_delay(i, 0);
590 		}
591 
592 		/* Multicast to all DQ enables. */
593 		writel(0xff, &sdr_scc_mgr->dq_ena);
594 
595 		/* Zero all DM config settings. */
596 		for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
597 			scc_mgr_set_dm_out1_delay(i, 0);
598 
599 		/* Multicast to all DM enables. */
600 		writel(0xff, &sdr_scc_mgr->dm_ena);
601 
602 		/* Zero all DQS IO settings. */
603 		if (!out_only)
604 			scc_mgr_set_dqs_io_in_delay(0);
605 
606 		/* Arria V/Cyclone V don't have out2. */
607 		scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
608 		scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
609 		scc_mgr_load_dqs_for_write_group(write_group);
610 
611 		/* Multicast to all DQS IO enables (only 1 in total). */
612 		writel(0, &sdr_scc_mgr->dqs_io_ena);
613 
614 		/* Hit update to zero everything. */
615 		writel(0, &sdr_scc_mgr->update);
616 	}
617 }
618 
619 /*
620  * apply and load a particular input delay for the DQ pins in a group
621  * group_bgn is the index of the first dq pin (in the write group)
622  */
623 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay)
624 {
625 	uint32_t i, p;
626 
627 	for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
628 		scc_mgr_set_dq_in_delay(p, delay);
629 		scc_mgr_load_dq(p);
630 	}
631 }
632 
633 /**
634  * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
635  * @delay:		Delay value
636  *
637  * Apply and load a particular output delay for the DQ pins in a group.
638  */
639 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
640 {
641 	int i;
642 
643 	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
644 		scc_mgr_set_dq_out1_delay(i, delay);
645 		scc_mgr_load_dq(i);
646 	}
647 }
648 
649 /* apply and load a particular output delay for the DM pins in a group */
650 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
651 {
652 	uint32_t i;
653 
654 	for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
655 		scc_mgr_set_dm_out1_delay(i, delay1);
656 		scc_mgr_load_dm(i);
657 	}
658 }
659 
660 
661 /* apply and load delay on both DQS and OCT out1 */
662 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
663 						    uint32_t delay)
664 {
665 	scc_mgr_set_dqs_out1_delay(delay);
666 	scc_mgr_load_dqs_io();
667 
668 	scc_mgr_set_oct_out1_delay(write_group, delay);
669 	scc_mgr_load_dqs_for_write_group(write_group);
670 }
671 
672 /**
673  * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
674  * @write_group:	Write group
675  * @delay:		Delay value
676  *
677  * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
678  */
679 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
680 						  const u32 delay)
681 {
682 	u32 i, new_delay;
683 
684 	/* DQ shift */
685 	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
686 		scc_mgr_load_dq(i);
687 
688 	/* DM shift */
689 	for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
690 		scc_mgr_load_dm(i);
691 
692 	/* DQS shift */
693 	new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
694 	if (new_delay > IO_IO_OUT2_DELAY_MAX) {
695 		debug_cond(DLEVEL == 1,
696 			   "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
697 			   __func__, __LINE__, write_group, delay, new_delay,
698 			   IO_IO_OUT2_DELAY_MAX,
699 			   new_delay - IO_IO_OUT2_DELAY_MAX);
700 		new_delay -= IO_IO_OUT2_DELAY_MAX;
701 		scc_mgr_set_dqs_out1_delay(new_delay);
702 	}
703 
704 	scc_mgr_load_dqs_io();
705 
706 	/* OCT shift */
707 	new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
708 	if (new_delay > IO_IO_OUT2_DELAY_MAX) {
709 		debug_cond(DLEVEL == 1,
710 			   "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
711 			   __func__, __LINE__, write_group, delay,
712 			   new_delay, IO_IO_OUT2_DELAY_MAX,
713 			   new_delay - IO_IO_OUT2_DELAY_MAX);
714 		new_delay -= IO_IO_OUT2_DELAY_MAX;
715 		scc_mgr_set_oct_out1_delay(write_group, new_delay);
716 	}
717 
718 	scc_mgr_load_dqs_for_write_group(write_group);
719 }
720 
721 /**
722  * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
723  * @write_group:	Write group
724  * @delay:		Delay value
725  *
726  * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
727  */
728 static void
729 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
730 						const u32 delay)
731 {
732 	int r;
733 
734 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
735 	     r += NUM_RANKS_PER_SHADOW_REG) {
736 		scc_mgr_apply_group_all_out_delay_add(write_group, delay);
737 		writel(0, &sdr_scc_mgr->update);
738 	}
739 }
740 
741 /**
742  * set_jump_as_return() - Return instruction optimization
743  *
744  * Optimization used to recover some slots in ddr3 inst_rom could be
745  * applied to other protocols if we wanted to
746  */
747 static void set_jump_as_return(void)
748 {
749 	/*
750 	 * To save space, we replace return with jump to special shared
751 	 * RETURN instruction so we set the counter to large value so that
752 	 * we always jump.
753 	 */
754 	writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
755 	writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
756 }
757 
758 /**
759  * delay_for_n_mem_clocks() - Delay for N memory clocks
760  * @clocks:	Length of the delay
761  *
762  * Delay for N memory clocks.
763  */
764 static void delay_for_n_mem_clocks(const u32 clocks)
765 {
766 	u32 afi_clocks;
767 	u16 c_loop;
768 	u8 inner;
769 	u8 outer;
770 
771 	debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
772 
773 	/* Scale (rounding up) to get afi clocks. */
774 	afi_clocks = DIV_ROUND_UP(clocks, AFI_RATE_RATIO);
775 	if (afi_clocks)	/* Temporary underflow protection */
776 		afi_clocks--;
777 
778 	/*
779 	 * Note, we don't bother accounting for being off a little
780 	 * bit because of a few extra instructions in outer loops.
781 	 * Note, the loops have a test at the end, and do the test
782 	 * before the decrement, and so always perform the loop
783 	 * 1 time more than the counter value
784 	 */
785 	c_loop = afi_clocks >> 16;
786 	outer = c_loop ? 0xff : (afi_clocks >> 8);
787 	inner = outer ? 0xff : afi_clocks;
788 
789 	/*
790 	 * rom instructions are structured as follows:
791 	 *
792 	 *    IDLE_LOOP2: jnz cntr0, TARGET_A
793 	 *    IDLE_LOOP1: jnz cntr1, TARGET_B
794 	 *                return
795 	 *
796 	 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
797 	 * TARGET_B is set to IDLE_LOOP2 as well
798 	 *
799 	 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
800 	 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
801 	 *
802 	 * a little confusing, but it helps save precious space in the inst_rom
803 	 * and sequencer rom and keeps the delays more accurate and reduces
804 	 * overhead
805 	 */
806 	if (afi_clocks < 0x100) {
807 		writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
808 			&sdr_rw_load_mgr_regs->load_cntr1);
809 
810 		writel(RW_MGR_IDLE_LOOP1,
811 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
812 
813 		writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
814 					  RW_MGR_RUN_SINGLE_GROUP_OFFSET);
815 	} else {
816 		writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
817 			&sdr_rw_load_mgr_regs->load_cntr0);
818 
819 		writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
820 			&sdr_rw_load_mgr_regs->load_cntr1);
821 
822 		writel(RW_MGR_IDLE_LOOP2,
823 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
824 
825 		writel(RW_MGR_IDLE_LOOP2,
826 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
827 
828 		do {
829 			writel(RW_MGR_IDLE_LOOP2,
830 				SDR_PHYGRP_RWMGRGRP_ADDRESS |
831 				RW_MGR_RUN_SINGLE_GROUP_OFFSET);
832 		} while (c_loop-- != 0);
833 	}
834 	debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
835 }
836 
837 /**
838  * rw_mgr_mem_init_load_regs() - Load instruction registers
839  * @cntr0:	Counter 0 value
840  * @cntr1:	Counter 1 value
841  * @cntr2:	Counter 2 value
842  * @jump:	Jump instruction value
843  *
844  * Load instruction registers.
845  */
846 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
847 {
848 	uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
849 			   RW_MGR_RUN_SINGLE_GROUP_OFFSET;
850 
851 	/* Load counters */
852 	writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
853 	       &sdr_rw_load_mgr_regs->load_cntr0);
854 	writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
855 	       &sdr_rw_load_mgr_regs->load_cntr1);
856 	writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
857 	       &sdr_rw_load_mgr_regs->load_cntr2);
858 
859 	/* Load jump address */
860 	writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
861 	writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
862 	writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
863 
864 	/* Execute count instruction */
865 	writel(jump, grpaddr);
866 }
867 
868 /**
869  * rw_mgr_mem_load_user() - Load user calibration values
870  * @fin1:	Final instruction 1
871  * @fin2:	Final instruction 2
872  * @precharge:	If 1, precharge the banks at the end
873  *
874  * Load user calibration values and optionally precharge the banks.
875  */
876 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
877 				 const int precharge)
878 {
879 	u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
880 		      RW_MGR_RUN_SINGLE_GROUP_OFFSET;
881 	u32 r;
882 
883 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
884 		/* set rank */
885 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
886 
887 		/* precharge all banks ... */
888 		if (precharge)
889 			writel(RW_MGR_PRECHARGE_ALL, grpaddr);
890 
891 		/*
892 		 * USER Use Mirror-ed commands for odd ranks if address
893 		 * mirrorring is on
894 		 */
895 		if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
896 			set_jump_as_return();
897 			writel(RW_MGR_MRS2_MIRR, grpaddr);
898 			delay_for_n_mem_clocks(4);
899 			set_jump_as_return();
900 			writel(RW_MGR_MRS3_MIRR, grpaddr);
901 			delay_for_n_mem_clocks(4);
902 			set_jump_as_return();
903 			writel(RW_MGR_MRS1_MIRR, grpaddr);
904 			delay_for_n_mem_clocks(4);
905 			set_jump_as_return();
906 			writel(fin1, grpaddr);
907 		} else {
908 			set_jump_as_return();
909 			writel(RW_MGR_MRS2, grpaddr);
910 			delay_for_n_mem_clocks(4);
911 			set_jump_as_return();
912 			writel(RW_MGR_MRS3, grpaddr);
913 			delay_for_n_mem_clocks(4);
914 			set_jump_as_return();
915 			writel(RW_MGR_MRS1, grpaddr);
916 			set_jump_as_return();
917 			writel(fin2, grpaddr);
918 		}
919 
920 		if (precharge)
921 			continue;
922 
923 		set_jump_as_return();
924 		writel(RW_MGR_ZQCL, grpaddr);
925 
926 		/* tZQinit = tDLLK = 512 ck cycles */
927 		delay_for_n_mem_clocks(512);
928 	}
929 }
930 
931 /**
932  * rw_mgr_mem_initialize() - Initialize RW Manager
933  *
934  * Initialize RW Manager.
935  */
936 static void rw_mgr_mem_initialize(void)
937 {
938 	debug("%s:%d\n", __func__, __LINE__);
939 
940 	/* The reset / cke part of initialization is broadcasted to all ranks */
941 	writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
942 				RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
943 
944 	/*
945 	 * Here's how you load register for a loop
946 	 * Counters are located @ 0x800
947 	 * Jump address are located @ 0xC00
948 	 * For both, registers 0 to 3 are selected using bits 3 and 2, like
949 	 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
950 	 * I know this ain't pretty, but Avalon bus throws away the 2 least
951 	 * significant bits
952 	 */
953 
954 	/* Start with memory RESET activated */
955 
956 	/* tINIT = 200us */
957 
958 	/*
959 	 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
960 	 * If a and b are the number of iteration in 2 nested loops
961 	 * it takes the following number of cycles to complete the operation:
962 	 * number_of_cycles = ((2 + n) * a + 2) * b
963 	 * where n is the number of instruction in the inner loop
964 	 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
965 	 * b = 6A
966 	 */
967 	rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL,
968 				  SEQ_TINIT_CNTR2_VAL,
969 				  RW_MGR_INIT_RESET_0_CKE_0);
970 
971 	/* Indicate that memory is stable. */
972 	writel(1, &phy_mgr_cfg->reset_mem_stbl);
973 
974 	/*
975 	 * transition the RESET to high
976 	 * Wait for 500us
977 	 */
978 
979 	/*
980 	 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
981 	 * If a and b are the number of iteration in 2 nested loops
982 	 * it takes the following number of cycles to complete the operation
983 	 * number_of_cycles = ((2 + n) * a + 2) * b
984 	 * where n is the number of instruction in the inner loop
985 	 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
986 	 * b = FF
987 	 */
988 	rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
989 				  SEQ_TRESET_CNTR2_VAL,
990 				  RW_MGR_INIT_RESET_1_CKE_0);
991 
992 	/* Bring up clock enable. */
993 
994 	/* tXRP < 250 ck cycles */
995 	delay_for_n_mem_clocks(250);
996 
997 	rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
998 			     0);
999 }
1000 
1001 /**
1002  * rw_mgr_mem_handoff() - Hand off the memory to user
1003  *
1004  * At the end of calibration we have to program the user settings in
1005  * and hand off the memory to the user.
1006  */
1007 static void rw_mgr_mem_handoff(void)
1008 {
1009 	rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
1010 	/*
1011 	 * Need to wait tMOD (12CK or 15ns) time before issuing other
1012 	 * commands, but we will have plenty of NIOS cycles before actual
1013 	 * handoff so its okay.
1014 	 */
1015 }
1016 
1017 /**
1018  * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
1019  * @group:	Write Group
1020  * @use_dm:	Use DM
1021  *
1022  * Issue write test command. Two variants are provided, one that just tests
1023  * a write pattern and another that tests datamask functionality.
1024  */
1025 static void rw_mgr_mem_calibrate_write_test_issue(u32 group,
1026 						  u32 test_dm)
1027 {
1028 	const u32 quick_write_mode =
1029 		(STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
1030 		ENABLE_SUPER_QUICK_CALIBRATION;
1031 	u32 mcc_instruction;
1032 	u32 rw_wl_nop_cycles;
1033 
1034 	/*
1035 	 * Set counter and jump addresses for the right
1036 	 * number of NOP cycles.
1037 	 * The number of supported NOP cycles can range from -1 to infinity
1038 	 * Three different cases are handled:
1039 	 *
1040 	 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
1041 	 *    mechanism will be used to insert the right number of NOPs
1042 	 *
1043 	 * 2. For a number of NOP cycles equals to 0, the micro-instruction
1044 	 *    issuing the write command will jump straight to the
1045 	 *    micro-instruction that turns on DQS (for DDRx), or outputs write
1046 	 *    data (for RLD), skipping
1047 	 *    the NOP micro-instruction all together
1048 	 *
1049 	 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
1050 	 *    turned on in the same micro-instruction that issues the write
1051 	 *    command. Then we need
1052 	 *    to directly jump to the micro-instruction that sends out the data
1053 	 *
1054 	 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
1055 	 *       (2 and 3). One jump-counter (0) is used to perform multiple
1056 	 *       write-read operations.
1057 	 *       one counter left to issue this command in "multiple-group" mode
1058 	 */
1059 
1060 	rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
1061 
1062 	if (rw_wl_nop_cycles == -1) {
1063 		/*
1064 		 * CNTR 2 - We want to execute the special write operation that
1065 		 * turns on DQS right away and then skip directly to the
1066 		 * instruction that sends out the data. We set the counter to a
1067 		 * large number so that the jump is always taken.
1068 		 */
1069 		writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1070 
1071 		/* CNTR 3 - Not used */
1072 		if (test_dm) {
1073 			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
1074 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
1075 			       &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1076 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
1077 			       &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1078 		} else {
1079 			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
1080 			writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
1081 				&sdr_rw_load_jump_mgr_regs->load_jump_add2);
1082 			writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
1083 				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
1084 		}
1085 	} else if (rw_wl_nop_cycles == 0) {
1086 		/*
1087 		 * CNTR 2 - We want to skip the NOP operation and go straight
1088 		 * to the DQS enable instruction. We set the counter to a large
1089 		 * number so that the jump is always taken.
1090 		 */
1091 		writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1092 
1093 		/* CNTR 3 - Not used */
1094 		if (test_dm) {
1095 			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
1096 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
1097 			       &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1098 		} else {
1099 			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
1100 			writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
1101 				&sdr_rw_load_jump_mgr_regs->load_jump_add2);
1102 		}
1103 	} else {
1104 		/*
1105 		 * CNTR 2 - In this case we want to execute the next instruction
1106 		 * and NOT take the jump. So we set the counter to 0. The jump
1107 		 * address doesn't count.
1108 		 */
1109 		writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
1110 		writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1111 
1112 		/*
1113 		 * CNTR 3 - Set the nop counter to the number of cycles we
1114 		 * need to loop for, minus 1.
1115 		 */
1116 		writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
1117 		if (test_dm) {
1118 			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
1119 			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
1120 				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
1121 		} else {
1122 			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
1123 			writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
1124 				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
1125 		}
1126 	}
1127 
1128 	writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1129 		  RW_MGR_RESET_READ_DATAPATH_OFFSET);
1130 
1131 	if (quick_write_mode)
1132 		writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
1133 	else
1134 		writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
1135 
1136 	writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1137 
1138 	/*
1139 	 * CNTR 1 - This is used to ensure enough time elapses
1140 	 * for read data to come back.
1141 	 */
1142 	writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
1143 
1144 	if (test_dm) {
1145 		writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
1146 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1147 	} else {
1148 		writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
1149 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1150 	}
1151 
1152 	writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
1153 				RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
1154 				(group << 2));
1155 }
1156 
1157 /**
1158  * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple pass
1159  * @rank_bgn:		Rank number
1160  * @write_group:	Write Group
1161  * @use_dm:		Use DM
1162  * @all_correct:	All bits must be correct in the mask
1163  * @bit_chk:		Resulting bit mask after the test
1164  * @all_ranks:		Test all ranks
1165  *
1166  * Test writes, can check for a single bit pass or multiple bit pass.
1167  */
1168 static int
1169 rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group,
1170 				const u32 use_dm, const u32 all_correct,
1171 				u32 *bit_chk, const u32 all_ranks)
1172 {
1173 	const u32 rank_end = all_ranks ?
1174 				RW_MGR_MEM_NUMBER_OF_RANKS :
1175 				(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1176 	const u32 shift_ratio = RW_MGR_MEM_DQ_PER_WRITE_DQS /
1177 				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS;
1178 	const u32 correct_mask_vg = param->write_correct_mask_vg;
1179 
1180 	u32 tmp_bit_chk, base_rw_mgr;
1181 	int vg, r;
1182 
1183 	*bit_chk = param->write_correct_mask;
1184 
1185 	for (r = rank_bgn; r < rank_end; r++) {
1186 		/* Set rank */
1187 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1188 
1189 		tmp_bit_chk = 0;
1190 		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS - 1;
1191 		     vg >= 0; vg--) {
1192 			/* Reset the FIFOs to get pointers to known state. */
1193 			writel(0, &phy_mgr_cmd->fifo_reset);
1194 
1195 			rw_mgr_mem_calibrate_write_test_issue(
1196 				write_group *
1197 				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS + vg,
1198 				use_dm);
1199 
1200 			base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1201 			tmp_bit_chk <<= shift_ratio;
1202 			tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
1203 		}
1204 
1205 		*bit_chk &= tmp_bit_chk;
1206 	}
1207 
1208 	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1209 	if (all_correct) {
1210 		debug_cond(DLEVEL == 2,
1211 			   "write_test(%u,%u,ALL) : %u == %u => %i\n",
1212 			   write_group, use_dm, *bit_chk,
1213 			   param->write_correct_mask,
1214 			   *bit_chk == param->write_correct_mask);
1215 		return *bit_chk == param->write_correct_mask;
1216 	} else {
1217 		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1218 		debug_cond(DLEVEL == 2,
1219 			   "write_test(%u,%u,ONE) : %u != %i => %i\n",
1220 			   write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
1221 		return *bit_chk != 0x00;
1222 	}
1223 }
1224 
1225 /**
1226  * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1227  * @rank_bgn:	Rank number
1228  * @group:	Read/Write Group
1229  * @all_ranks:	Test all ranks
1230  *
1231  * Performs a guaranteed read on the patterns we are going to use during a
1232  * read test to ensure memory works.
1233  */
1234 static int
1235 rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
1236 					const u32 all_ranks)
1237 {
1238 	const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1239 			 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1240 	const u32 addr_offset =
1241 			 (group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2;
1242 	const u32 rank_end = all_ranks ?
1243 				RW_MGR_MEM_NUMBER_OF_RANKS :
1244 				(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1245 	const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
1246 				RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
1247 	const u32 correct_mask_vg = param->read_correct_mask_vg;
1248 
1249 	u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1250 	int vg, r;
1251 	int ret = 0;
1252 
1253 	bit_chk = param->read_correct_mask;
1254 
1255 	for (r = rank_bgn; r < rank_end; r++) {
1256 		/* Set rank */
1257 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1258 
1259 		/* Load up a constant bursts of read commands */
1260 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1261 		writel(RW_MGR_GUARANTEED_READ,
1262 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
1263 
1264 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1265 		writel(RW_MGR_GUARANTEED_READ_CONT,
1266 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1267 
1268 		tmp_bit_chk = 0;
1269 		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;
1270 		     vg >= 0; vg--) {
1271 			/* Reset the FIFOs to get pointers to known state. */
1272 			writel(0, &phy_mgr_cmd->fifo_reset);
1273 			writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1274 				  RW_MGR_RESET_READ_DATAPATH_OFFSET);
1275 			writel(RW_MGR_GUARANTEED_READ,
1276 			       addr + addr_offset + (vg << 2));
1277 
1278 			base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1279 			tmp_bit_chk <<= shift_ratio;
1280 			tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
1281 		}
1282 
1283 		bit_chk &= tmp_bit_chk;
1284 	}
1285 
1286 	writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1287 
1288 	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1289 
1290 	if (bit_chk != param->read_correct_mask)
1291 		ret = -EIO;
1292 
1293 	debug_cond(DLEVEL == 1,
1294 		   "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1295 		   __func__, __LINE__, group, bit_chk,
1296 		   param->read_correct_mask, ret);
1297 
1298 	return ret;
1299 }
1300 
1301 /**
1302  * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
1303  * @rank_bgn:	Rank number
1304  * @all_ranks:	Test all ranks
1305  *
1306  * Load up the patterns we are going to use during a read test.
1307  */
1308 static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
1309 						    const int all_ranks)
1310 {
1311 	const u32 rank_end = all_ranks ?
1312 			RW_MGR_MEM_NUMBER_OF_RANKS :
1313 			(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1314 	u32 r;
1315 
1316 	debug("%s:%d\n", __func__, __LINE__);
1317 
1318 	for (r = rank_bgn; r < rank_end; r++) {
1319 		/* set rank */
1320 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1321 
1322 		/* Load up a constant bursts */
1323 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1324 
1325 		writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1326 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
1327 
1328 		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1329 
1330 		writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1331 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1332 
1333 		writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1334 
1335 		writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1336 			&sdr_rw_load_jump_mgr_regs->load_jump_add2);
1337 
1338 		writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1339 
1340 		writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1341 			&sdr_rw_load_jump_mgr_regs->load_jump_add3);
1342 
1343 		writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1344 						RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1345 	}
1346 
1347 	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1348 }
1349 
1350 /**
1351  * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
1352  * @rank_bgn:		Rank number
1353  * @group:		Read/Write group
1354  * @num_tries:		Number of retries of the test
1355  * @all_correct:	All bits must be correct in the mask
1356  * @bit_chk:		Resulting bit mask after the test
1357  * @all_groups:		Test all R/W groups
1358  * @all_ranks:		Test all ranks
1359  *
1360  * Try a read and see if it returns correct data back. Test has dummy reads
1361  * inserted into the mix used to align DQS enable. Test has more thorough
1362  * checks than the regular read test.
1363  */
1364 static int
1365 rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
1366 			       const u32 num_tries, const u32 all_correct,
1367 			       u32 *bit_chk,
1368 			       const u32 all_groups, const u32 all_ranks)
1369 {
1370 	const u32 rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1371 		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1372 	const u32 quick_read_mode =
1373 		((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
1374 		 ENABLE_SUPER_QUICK_CALIBRATION);
1375 	u32 correct_mask_vg = param->read_correct_mask_vg;
1376 	u32 tmp_bit_chk;
1377 	u32 base_rw_mgr;
1378 	u32 addr;
1379 
1380 	int r, vg, ret;
1381 
1382 	*bit_chk = param->read_correct_mask;
1383 
1384 	for (r = rank_bgn; r < rank_end; r++) {
1385 		/* set rank */
1386 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1387 
1388 		writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1389 
1390 		writel(RW_MGR_READ_B2B_WAIT1,
1391 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
1392 
1393 		writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1394 		writel(RW_MGR_READ_B2B_WAIT2,
1395 			&sdr_rw_load_jump_mgr_regs->load_jump_add2);
1396 
1397 		if (quick_read_mode)
1398 			writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1399 			/* need at least two (1+1) reads to capture failures */
1400 		else if (all_groups)
1401 			writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1402 		else
1403 			writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1404 
1405 		writel(RW_MGR_READ_B2B,
1406 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
1407 		if (all_groups)
1408 			writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1409 			       RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
1410 			       &sdr_rw_load_mgr_regs->load_cntr3);
1411 		else
1412 			writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1413 
1414 		writel(RW_MGR_READ_B2B,
1415 			&sdr_rw_load_jump_mgr_regs->load_jump_add3);
1416 
1417 		tmp_bit_chk = 0;
1418 		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1; vg >= 0;
1419 		     vg--) {
1420 			/* Reset the FIFOs to get pointers to known state. */
1421 			writel(0, &phy_mgr_cmd->fifo_reset);
1422 			writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1423 				  RW_MGR_RESET_READ_DATAPATH_OFFSET);
1424 
1425 			if (all_groups) {
1426 				addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1427 				       RW_MGR_RUN_ALL_GROUPS_OFFSET;
1428 			} else {
1429 				addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1430 				       RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1431 			}
1432 
1433 			writel(RW_MGR_READ_B2B, addr +
1434 			       ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1435 			       vg) << 2));
1436 
1437 			base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1438 			tmp_bit_chk <<= RW_MGR_MEM_DQ_PER_READ_DQS /
1439 					RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
1440 			tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
1441 		}
1442 
1443 		*bit_chk &= tmp_bit_chk;
1444 	}
1445 
1446 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1447 	writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1448 
1449 	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1450 
1451 	if (all_correct) {
1452 		ret = (*bit_chk == param->read_correct_mask);
1453 		debug_cond(DLEVEL == 2,
1454 			   "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
1455 			   __func__, __LINE__, group, all_groups, *bit_chk,
1456 			   param->read_correct_mask, ret);
1457 	} else	{
1458 		ret = (*bit_chk != 0x00);
1459 		debug_cond(DLEVEL == 2,
1460 			   "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
1461 			   __func__, __LINE__, group, all_groups, *bit_chk,
1462 			   0, ret);
1463 	}
1464 
1465 	return ret;
1466 }
1467 
1468 /**
1469  * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
1470  * @grp:		Read/Write group
1471  * @num_tries:		Number of retries of the test
1472  * @all_correct:	All bits must be correct in the mask
1473  * @all_groups:		Test all R/W groups
1474  *
1475  * Perform a READ test across all memory ranks.
1476  */
1477 static int
1478 rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries,
1479 					 const u32 all_correct,
1480 					 const u32 all_groups)
1481 {
1482 	u32 bit_chk;
1483 	return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct,
1484 					      &bit_chk, all_groups, 1);
1485 }
1486 
1487 /**
1488  * rw_mgr_incr_vfifo() - Increase VFIFO value
1489  * @grp:	Read/Write group
1490  *
1491  * Increase VFIFO value.
1492  */
1493 static void rw_mgr_incr_vfifo(const u32 grp)
1494 {
1495 	writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1496 }
1497 
1498 /**
1499  * rw_mgr_decr_vfifo() - Decrease VFIFO value
1500  * @grp:	Read/Write group
1501  *
1502  * Decrease VFIFO value.
1503  */
1504 static void rw_mgr_decr_vfifo(const u32 grp)
1505 {
1506 	u32 i;
1507 
1508 	for (i = 0; i < VFIFO_SIZE - 1; i++)
1509 		rw_mgr_incr_vfifo(grp);
1510 }
1511 
1512 /**
1513  * find_vfifo_failing_read() - Push VFIFO to get a failing read
1514  * @grp:	Read/Write group
1515  *
1516  * Push VFIFO until a failing read happens.
1517  */
1518 static int find_vfifo_failing_read(const u32 grp)
1519 {
1520 	u32 v, ret, fail_cnt = 0;
1521 
1522 	for (v = 0; v < VFIFO_SIZE; v++) {
1523 		debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
1524 			   __func__, __LINE__, v);
1525 		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1526 						PASS_ONE_BIT, 0);
1527 		if (!ret) {
1528 			fail_cnt++;
1529 
1530 			if (fail_cnt == 2)
1531 				return v;
1532 		}
1533 
1534 		/* Fiddle with FIFO. */
1535 		rw_mgr_incr_vfifo(grp);
1536 	}
1537 
1538 	/* No failing read found! Something must have gone wrong. */
1539 	debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
1540 	return 0;
1541 }
1542 
1543 /**
1544  * sdr_find_phase_delay() - Find DQS enable phase or delay
1545  * @working:	If 1, look for working phase/delay, if 0, look for non-working
1546  * @delay:	If 1, look for delay, if 0, look for phase
1547  * @grp:	Read/Write group
1548  * @work:	Working window position
1549  * @work_inc:	Working window increment
1550  * @pd:		DQS Phase/Delay Iterator
1551  *
1552  * Find working or non-working DQS enable phase setting.
1553  */
1554 static int sdr_find_phase_delay(int working, int delay, const u32 grp,
1555 				u32 *work, const u32 work_inc, u32 *pd)
1556 {
1557 	const u32 max = delay ? IO_DQS_EN_DELAY_MAX : IO_DQS_EN_PHASE_MAX;
1558 	u32 ret;
1559 
1560 	for (; *pd <= max; (*pd)++) {
1561 		if (delay)
1562 			scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd);
1563 		else
1564 			scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd);
1565 
1566 		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1567 					PASS_ONE_BIT, 0);
1568 		if (!working)
1569 			ret = !ret;
1570 
1571 		if (ret)
1572 			return 0;
1573 
1574 		if (work)
1575 			*work += work_inc;
1576 	}
1577 
1578 	return -EINVAL;
1579 }
1580 /**
1581  * sdr_find_phase() - Find DQS enable phase
1582  * @working:	If 1, look for working phase, if 0, look for non-working phase
1583  * @grp:	Read/Write group
1584  * @work:	Working window position
1585  * @i:		Iterator
1586  * @p:		DQS Phase Iterator
1587  *
1588  * Find working or non-working DQS enable phase setting.
1589  */
1590 static int sdr_find_phase(int working, const u32 grp, u32 *work,
1591 			  u32 *i, u32 *p)
1592 {
1593 	const u32 end = VFIFO_SIZE + (working ? 0 : 1);
1594 	int ret;
1595 
1596 	for (; *i < end; (*i)++) {
1597 		if (working)
1598 			*p = 0;
1599 
1600 		ret = sdr_find_phase_delay(working, 0, grp, work,
1601 					   IO_DELAY_PER_OPA_TAP, p);
1602 		if (!ret)
1603 			return 0;
1604 
1605 		if (*p > IO_DQS_EN_PHASE_MAX) {
1606 			/* Fiddle with FIFO. */
1607 			rw_mgr_incr_vfifo(grp);
1608 			if (!working)
1609 				*p = 0;
1610 		}
1611 	}
1612 
1613 	return -EINVAL;
1614 }
1615 
1616 /**
1617  * sdr_working_phase() - Find working DQS enable phase
1618  * @grp:	Read/Write group
1619  * @work_bgn:	Working window start position
1620  * @d:		dtaps output value
1621  * @p:		DQS Phase Iterator
1622  * @i:		Iterator
1623  *
1624  * Find working DQS enable phase setting.
1625  */
1626 static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
1627 			     u32 *p, u32 *i)
1628 {
1629 	const u32 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP /
1630 				   IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1631 	int ret;
1632 
1633 	*work_bgn = 0;
1634 
1635 	for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1636 		*i = 0;
1637 		scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
1638 		ret = sdr_find_phase(1, grp, work_bgn, i, p);
1639 		if (!ret)
1640 			return 0;
1641 		*work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1642 	}
1643 
1644 	/* Cannot find working solution */
1645 	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
1646 		   __func__, __LINE__);
1647 	return -EINVAL;
1648 }
1649 
1650 /**
1651  * sdr_backup_phase() - Find DQS enable backup phase
1652  * @grp:	Read/Write group
1653  * @work_bgn:	Working window start position
1654  * @p:		DQS Phase Iterator
1655  *
1656  * Find DQS enable backup phase setting.
1657  */
1658 static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
1659 {
1660 	u32 tmp_delay, d;
1661 	int ret;
1662 
1663 	/* Special case code for backing up a phase */
1664 	if (*p == 0) {
1665 		*p = IO_DQS_EN_PHASE_MAX;
1666 		rw_mgr_decr_vfifo(grp);
1667 	} else {
1668 		(*p)--;
1669 	}
1670 	tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1671 	scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
1672 
1673 	for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; d++) {
1674 		scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1675 
1676 		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1677 					PASS_ONE_BIT, 0);
1678 		if (ret) {
1679 			*work_bgn = tmp_delay;
1680 			break;
1681 		}
1682 
1683 		tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1684 	}
1685 
1686 	/* Restore VFIFO to old state before we decremented it (if needed). */
1687 	(*p)++;
1688 	if (*p > IO_DQS_EN_PHASE_MAX) {
1689 		*p = 0;
1690 		rw_mgr_incr_vfifo(grp);
1691 	}
1692 
1693 	scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1694 }
1695 
1696 /**
1697  * sdr_nonworking_phase() - Find non-working DQS enable phase
1698  * @grp:	Read/Write group
1699  * @work_end:	Working window end position
1700  * @p:		DQS Phase Iterator
1701  * @i:		Iterator
1702  *
1703  * Find non-working DQS enable phase setting.
1704  */
1705 static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
1706 {
1707 	int ret;
1708 
1709 	(*p)++;
1710 	*work_end += IO_DELAY_PER_OPA_TAP;
1711 	if (*p > IO_DQS_EN_PHASE_MAX) {
1712 		/* Fiddle with FIFO. */
1713 		*p = 0;
1714 		rw_mgr_incr_vfifo(grp);
1715 	}
1716 
1717 	ret = sdr_find_phase(0, grp, work_end, i, p);
1718 	if (ret) {
1719 		/* Cannot see edge of failing read. */
1720 		debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
1721 			   __func__, __LINE__);
1722 	}
1723 
1724 	return ret;
1725 }
1726 
1727 /**
1728  * sdr_find_window_center() - Find center of the working DQS window.
1729  * @grp:	Read/Write group
1730  * @work_bgn:	First working settings
1731  * @work_end:	Last working settings
1732  *
1733  * Find center of the working DQS enable window.
1734  */
1735 static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
1736 				  const u32 work_end)
1737 {
1738 	u32 work_mid;
1739 	int tmp_delay = 0;
1740 	int i, p, d;
1741 
1742 	work_mid = (work_bgn + work_end) / 2;
1743 
1744 	debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1745 		   work_bgn, work_end, work_mid);
1746 	/* Get the middle delay to be less than a VFIFO delay */
1747 	tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP;
1748 
1749 	debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1750 	work_mid %= tmp_delay;
1751 	debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
1752 
1753 	tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP);
1754 	if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP)
1755 		tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP;
1756 	p = tmp_delay / IO_DELAY_PER_OPA_TAP;
1757 
1758 	debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
1759 
1760 	d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP);
1761 	if (d > IO_DQS_EN_DELAY_MAX)
1762 		d = IO_DQS_EN_DELAY_MAX;
1763 	tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1764 
1765 	debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
1766 
1767 	scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1768 	scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1769 
1770 	/*
1771 	 * push vfifo until we can successfully calibrate. We can do this
1772 	 * because the largest possible margin in 1 VFIFO cycle.
1773 	 */
1774 	for (i = 0; i < VFIFO_SIZE; i++) {
1775 		debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
1776 		if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1777 							     PASS_ONE_BIT,
1778 							     0)) {
1779 			debug_cond(DLEVEL == 2,
1780 				   "%s:%d center: found: ptap=%u dtap=%u\n",
1781 				   __func__, __LINE__, p, d);
1782 			return 0;
1783 		}
1784 
1785 		/* Fiddle with FIFO. */
1786 		rw_mgr_incr_vfifo(grp);
1787 	}
1788 
1789 	debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
1790 		   __func__, __LINE__);
1791 	return -EINVAL;
1792 }
1793 
1794 /**
1795  * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use
1796  * @grp:	Read/Write Group
1797  *
1798  * Find a good DQS enable to use.
1799  */
1800 static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
1801 {
1802 	u32 d, p, i;
1803 	u32 dtaps_per_ptap;
1804 	u32 work_bgn, work_end;
1805 	u32 found_passing_read, found_failing_read, initial_failing_dtap;
1806 	int ret;
1807 
1808 	debug("%s:%d %u\n", __func__, __LINE__, grp);
1809 
1810 	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1811 
1812 	scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1813 	scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1814 
1815 	/* Step 0: Determine number of delay taps for each phase tap. */
1816 	dtaps_per_ptap = IO_DELAY_PER_OPA_TAP / IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1817 
1818 	/* Step 1: First push vfifo until we get a failing read. */
1819 	find_vfifo_failing_read(grp);
1820 
1821 	/* Step 2: Find first working phase, increment in ptaps. */
1822 	work_bgn = 0;
1823 	ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i);
1824 	if (ret)
1825 		return ret;
1826 
1827 	work_end = work_bgn;
1828 
1829 	/*
1830 	 * If d is 0 then the working window covers a phase tap and we can
1831 	 * follow the old procedure. Otherwise, we've found the beginning
1832 	 * and we need to increment the dtaps until we find the end.
1833 	 */
1834 	if (d == 0) {
1835 		/*
1836 		 * Step 3a: If we have room, back off by one and
1837 		 *          increment in dtaps.
1838 		 */
1839 		sdr_backup_phase(grp, &work_bgn, &p);
1840 
1841 		/*
1842 		 * Step 4a: go forward from working phase to non working
1843 		 * phase, increment in ptaps.
1844 		 */
1845 		ret = sdr_nonworking_phase(grp, &work_end, &p, &i);
1846 		if (ret)
1847 			return ret;
1848 
1849 		/* Step 5a: Back off one from last, increment in dtaps. */
1850 
1851 		/* Special case code for backing up a phase */
1852 		if (p == 0) {
1853 			p = IO_DQS_EN_PHASE_MAX;
1854 			rw_mgr_decr_vfifo(grp);
1855 		} else {
1856 			p = p - 1;
1857 		}
1858 
1859 		work_end -= IO_DELAY_PER_OPA_TAP;
1860 		scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1861 
1862 		d = 0;
1863 
1864 		debug_cond(DLEVEL == 2, "%s:%d p: ptap=%u\n",
1865 			   __func__, __LINE__, p);
1866 	}
1867 
1868 	/* The dtap increment to find the failing edge is done here. */
1869 	sdr_find_phase_delay(0, 1, grp, &work_end,
1870 			     IO_DELAY_PER_DQS_EN_DCHAIN_TAP, &d);
1871 
1872 	/* Go back to working dtap */
1873 	if (d != 0)
1874 		work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1875 
1876 	debug_cond(DLEVEL == 2,
1877 		   "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
1878 		   __func__, __LINE__, p, d - 1, work_end);
1879 
1880 	if (work_end < work_bgn) {
1881 		/* nil range */
1882 		debug_cond(DLEVEL == 2, "%s:%d end-2: failed\n",
1883 			   __func__, __LINE__);
1884 		return -EINVAL;
1885 	}
1886 
1887 	debug_cond(DLEVEL == 2, "%s:%d found range [%u,%u]\n",
1888 		   __func__, __LINE__, work_bgn, work_end);
1889 
1890 	/*
1891 	 * We need to calculate the number of dtaps that equal a ptap.
1892 	 * To do that we'll back up a ptap and re-find the edge of the
1893 	 * window using dtaps
1894 	 */
1895 	debug_cond(DLEVEL == 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
1896 		   __func__, __LINE__);
1897 
1898 	/* Special case code for backing up a phase */
1899 	if (p == 0) {
1900 		p = IO_DQS_EN_PHASE_MAX;
1901 		rw_mgr_decr_vfifo(grp);
1902 		debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n",
1903 			   __func__, __LINE__, p);
1904 	} else {
1905 		p = p - 1;
1906 		debug_cond(DLEVEL == 2, "%s:%d backedup phase only: p=%u",
1907 			   __func__, __LINE__, p);
1908 	}
1909 
1910 	scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1911 
1912 	/*
1913 	 * Increase dtap until we first see a passing read (in case the
1914 	 * window is smaller than a ptap), and then a failing read to
1915 	 * mark the edge of the window again.
1916 	 */
1917 
1918 	/* Find a passing read. */
1919 	debug_cond(DLEVEL == 2, "%s:%d find passing read\n",
1920 		   __func__, __LINE__);
1921 
1922 	initial_failing_dtap = d;
1923 
1924 	found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d);
1925 	if (found_passing_read) {
1926 		/* Find a failing read. */
1927 		debug_cond(DLEVEL == 2, "%s:%d find failing read\n",
1928 			   __func__, __LINE__);
1929 		d++;
1930 		found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0,
1931 							   &d);
1932 	} else {
1933 		debug_cond(DLEVEL == 1,
1934 			   "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
1935 			   __func__, __LINE__);
1936 	}
1937 
1938 	/*
1939 	 * The dynamically calculated dtaps_per_ptap is only valid if we
1940 	 * found a passing/failing read. If we didn't, it means d hit the max
1941 	 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1942 	 * statically calculated value.
1943 	 */
1944 	if (found_passing_read && found_failing_read)
1945 		dtaps_per_ptap = d - initial_failing_dtap;
1946 
1947 	writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1948 	debug_cond(DLEVEL == 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
1949 		   __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
1950 
1951 	/* Step 6: Find the centre of the window. */
1952 	ret = sdr_find_window_center(grp, work_bgn, work_end);
1953 
1954 	return ret;
1955 }
1956 
1957 /**
1958  * search_stop_check() - Check if the detected edge is valid
1959  * @write:		Perform read (Stage 2) or write (Stage 3) calibration
1960  * @d:			DQS delay
1961  * @rank_bgn:		Rank number
1962  * @write_group:	Write Group
1963  * @read_group:		Read Group
1964  * @bit_chk:		Resulting bit mask after the test
1965  * @sticky_bit_chk:	Resulting sticky bit mask after the test
1966  * @use_read_test:	Perform read test
1967  *
1968  * Test if the found edge is valid.
1969  */
1970 static u32 search_stop_check(const int write, const int d, const int rank_bgn,
1971 			     const u32 write_group, const u32 read_group,
1972 			     u32 *bit_chk, u32 *sticky_bit_chk,
1973 			     const u32 use_read_test)
1974 {
1975 	const u32 ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
1976 			  RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
1977 	const u32 correct_mask = write ? param->write_correct_mask :
1978 					 param->read_correct_mask;
1979 	const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
1980 				    RW_MGR_MEM_DQ_PER_READ_DQS;
1981 	u32 ret;
1982 	/*
1983 	 * Stop searching when the read test doesn't pass AND when
1984 	 * we've seen a passing read on every bit.
1985 	 */
1986 	if (write) {			/* WRITE-ONLY */
1987 		ret = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1988 							 0, PASS_ONE_BIT,
1989 							 bit_chk, 0);
1990 	} else if (use_read_test) {	/* READ-ONLY */
1991 		ret = !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group,
1992 							NUM_READ_PB_TESTS,
1993 							PASS_ONE_BIT, bit_chk,
1994 							0, 0);
1995 	} else {			/* READ-ONLY */
1996 		rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0,
1997 						PASS_ONE_BIT, bit_chk, 0);
1998 		*bit_chk = *bit_chk >> (per_dqs *
1999 			(read_group - (write_group * ratio)));
2000 		ret = (*bit_chk == 0);
2001 	}
2002 	*sticky_bit_chk = *sticky_bit_chk | *bit_chk;
2003 	ret = ret && (*sticky_bit_chk == correct_mask);
2004 	debug_cond(DLEVEL == 2,
2005 		   "%s:%d center(left): dtap=%u => %u == %u && %u",
2006 		   __func__, __LINE__, d,
2007 		   *sticky_bit_chk, correct_mask, ret);
2008 	return ret;
2009 }
2010 
2011 /**
2012  * search_left_edge() - Find left edge of DQ/DQS working phase
2013  * @write:		Perform read (Stage 2) or write (Stage 3) calibration
2014  * @rank_bgn:		Rank number
2015  * @write_group:	Write Group
2016  * @read_group:		Read Group
2017  * @test_bgn:		Rank number to begin the test
2018  * @sticky_bit_chk:	Resulting sticky bit mask after the test
2019  * @left_edge:		Left edge of the DQ/DQS phase
2020  * @right_edge:		Right edge of the DQ/DQS phase
2021  * @use_read_test:	Perform read test
2022  *
2023  * Find left edge of DQ/DQS working phase.
2024  */
2025 static void search_left_edge(const int write, const int rank_bgn,
2026 	const u32 write_group, const u32 read_group, const u32 test_bgn,
2027 	u32 *sticky_bit_chk,
2028 	int *left_edge, int *right_edge, const u32 use_read_test)
2029 {
2030 	const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
2031 	const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
2032 	const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
2033 				    RW_MGR_MEM_DQ_PER_READ_DQS;
2034 	u32 stop, bit_chk;
2035 	int i, d;
2036 
2037 	for (d = 0; d <= dqs_max; d++) {
2038 		if (write)
2039 			scc_mgr_apply_group_dq_out1_delay(d);
2040 		else
2041 			scc_mgr_apply_group_dq_in_delay(test_bgn, d);
2042 
2043 		writel(0, &sdr_scc_mgr->update);
2044 
2045 		stop = search_stop_check(write, d, rank_bgn, write_group,
2046 					 read_group, &bit_chk, sticky_bit_chk,
2047 					 use_read_test);
2048 		if (stop == 1)
2049 			break;
2050 
2051 		/* stop != 1 */
2052 		for (i = 0; i < per_dqs; i++) {
2053 			if (bit_chk & 1) {
2054 				/*
2055 				 * Remember a passing test as
2056 				 * the left_edge.
2057 				 */
2058 				left_edge[i] = d;
2059 			} else {
2060 				/*
2061 				 * If a left edge has not been seen
2062 				 * yet, then a future passing test
2063 				 * will mark this edge as the right
2064 				 * edge.
2065 				 */
2066 				if (left_edge[i] == delay_max + 1)
2067 					right_edge[i] = -(d + 1);
2068 			}
2069 			bit_chk >>= 1;
2070 		}
2071 	}
2072 
2073 	/* Reset DQ delay chains to 0 */
2074 	if (write)
2075 		scc_mgr_apply_group_dq_out1_delay(0);
2076 	else
2077 		scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2078 
2079 	*sticky_bit_chk = 0;
2080 	for (i = per_dqs - 1; i >= 0; i--) {
2081 		debug_cond(DLEVEL == 2,
2082 			   "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
2083 			   __func__, __LINE__, i, left_edge[i],
2084 			   i, right_edge[i]);
2085 
2086 		/*
2087 		 * Check for cases where we haven't found the left edge,
2088 		 * which makes our assignment of the the right edge invalid.
2089 		 * Reset it to the illegal value.
2090 		 */
2091 		if ((left_edge[i] == delay_max + 1) &&
2092 		    (right_edge[i] != delay_max + 1)) {
2093 			right_edge[i] = delay_max + 1;
2094 			debug_cond(DLEVEL == 2,
2095 				   "%s:%d vfifo_center: reset right_edge[%u]: %d\n",
2096 				   __func__, __LINE__, i, right_edge[i]);
2097 		}
2098 
2099 		/*
2100 		 * Reset sticky bit
2101 		 * READ: except for bits where we have seen both
2102 		 *       the left and right edge.
2103 		 * WRITE: except for bits where we have seen the
2104 		 *        left edge.
2105 		 */
2106 		*sticky_bit_chk <<= 1;
2107 		if (write) {
2108 			if (left_edge[i] != delay_max + 1)
2109 				*sticky_bit_chk |= 1;
2110 		} else {
2111 			if ((left_edge[i] != delay_max + 1) &&
2112 			    (right_edge[i] != delay_max + 1))
2113 				*sticky_bit_chk |= 1;
2114 		}
2115 	}
2116 
2117 
2118 }
2119 
2120 /**
2121  * search_right_edge() - Find right edge of DQ/DQS working phase
2122  * @write:		Perform read (Stage 2) or write (Stage 3) calibration
2123  * @rank_bgn:		Rank number
2124  * @write_group:	Write Group
2125  * @read_group:		Read Group
2126  * @start_dqs:		DQS start phase
2127  * @start_dqs_en:	DQS enable start phase
2128  * @sticky_bit_chk:	Resulting sticky bit mask after the test
2129  * @left_edge:		Left edge of the DQ/DQS phase
2130  * @right_edge:		Right edge of the DQ/DQS phase
2131  * @use_read_test:	Perform read test
2132  *
2133  * Find right edge of DQ/DQS working phase.
2134  */
2135 static int search_right_edge(const int write, const int rank_bgn,
2136 	const u32 write_group, const u32 read_group,
2137 	const int start_dqs, const int start_dqs_en,
2138 	u32 *sticky_bit_chk,
2139 	int *left_edge, int *right_edge, const u32 use_read_test)
2140 {
2141 	const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
2142 	const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
2143 	const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
2144 				    RW_MGR_MEM_DQ_PER_READ_DQS;
2145 	u32 stop, bit_chk;
2146 	int i, d;
2147 
2148 	for (d = 0; d <= dqs_max - start_dqs; d++) {
2149 		if (write) {	/* WRITE-ONLY */
2150 			scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2151 								d + start_dqs);
2152 		} else {	/* READ-ONLY */
2153 			scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
2154 			if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2155 				uint32_t delay = d + start_dqs_en;
2156 				if (delay > IO_DQS_EN_DELAY_MAX)
2157 					delay = IO_DQS_EN_DELAY_MAX;
2158 				scc_mgr_set_dqs_en_delay(read_group, delay);
2159 			}
2160 			scc_mgr_load_dqs(read_group);
2161 		}
2162 
2163 		writel(0, &sdr_scc_mgr->update);
2164 
2165 		stop = search_stop_check(write, d, rank_bgn, write_group,
2166 					 read_group, &bit_chk, sticky_bit_chk,
2167 					 use_read_test);
2168 		if (stop == 1) {
2169 			if (write && (d == 0)) {	/* WRITE-ONLY */
2170 				for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2171 					/*
2172 					 * d = 0 failed, but it passed when
2173 					 * testing the left edge, so it must be
2174 					 * marginal, set it to -1
2175 					 */
2176 					if (right_edge[i] == delay_max + 1 &&
2177 					    left_edge[i] != delay_max + 1)
2178 						right_edge[i] = -1;
2179 				}
2180 			}
2181 			break;
2182 		}
2183 
2184 		/* stop != 1 */
2185 		for (i = 0; i < per_dqs; i++) {
2186 			if (bit_chk & 1) {
2187 				/*
2188 				 * Remember a passing test as
2189 				 * the right_edge.
2190 				 */
2191 				right_edge[i] = d;
2192 			} else {
2193 				if (d != 0) {
2194 					/*
2195 					 * If a right edge has not
2196 					 * been seen yet, then a future
2197 					 * passing test will mark this
2198 					 * edge as the left edge.
2199 					 */
2200 					if (right_edge[i] == delay_max + 1)
2201 						left_edge[i] = -(d + 1);
2202 				} else {
2203 					/*
2204 					 * d = 0 failed, but it passed
2205 					 * when testing the left edge,
2206 					 * so it must be marginal, set
2207 					 * it to -1
2208 					 */
2209 					if (right_edge[i] == delay_max + 1 &&
2210 					    left_edge[i] != delay_max + 1)
2211 						right_edge[i] = -1;
2212 					/*
2213 					 * If a right edge has not been
2214 					 * seen yet, then a future
2215 					 * passing test will mark this
2216 					 * edge as the left edge.
2217 					 */
2218 					else if (right_edge[i] == delay_max + 1)
2219 						left_edge[i] = -(d + 1);
2220 				}
2221 			}
2222 
2223 			debug_cond(DLEVEL == 2, "%s:%d center[r,d=%u]: ",
2224 				   __func__, __LINE__, d);
2225 			debug_cond(DLEVEL == 2,
2226 				   "bit_chk_test=%i left_edge[%u]: %d ",
2227 				   bit_chk & 1, i, left_edge[i]);
2228 			debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2229 				   right_edge[i]);
2230 			bit_chk >>= 1;
2231 		}
2232 	}
2233 
2234 	/* Check that all bits have a window */
2235 	for (i = 0; i < per_dqs; i++) {
2236 		debug_cond(DLEVEL == 2,
2237 			   "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
2238 			   __func__, __LINE__, i, left_edge[i],
2239 			   i, right_edge[i]);
2240 		if ((left_edge[i] == dqs_max + 1) ||
2241 		    (right_edge[i] == dqs_max + 1))
2242 			return i + 1;	/* FIXME: If we fail, retval > 0 */
2243 	}
2244 
2245 	return 0;
2246 }
2247 
2248 /**
2249  * get_window_mid_index() - Find the best middle setting of DQ/DQS phase
2250  * @write:		Perform read (Stage 2) or write (Stage 3) calibration
2251  * @left_edge:		Left edge of the DQ/DQS phase
2252  * @right_edge:		Right edge of the DQ/DQS phase
2253  * @mid_min:		Best DQ/DQS phase middle setting
2254  *
2255  * Find index and value of the middle of the DQ/DQS working phase.
2256  */
2257 static int get_window_mid_index(const int write, int *left_edge,
2258 				int *right_edge, int *mid_min)
2259 {
2260 	const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
2261 				    RW_MGR_MEM_DQ_PER_READ_DQS;
2262 	int i, mid, min_index;
2263 
2264 	/* Find middle of window for each DQ bit */
2265 	*mid_min = left_edge[0] - right_edge[0];
2266 	min_index = 0;
2267 	for (i = 1; i < per_dqs; i++) {
2268 		mid = left_edge[i] - right_edge[i];
2269 		if (mid < *mid_min) {
2270 			*mid_min = mid;
2271 			min_index = i;
2272 		}
2273 	}
2274 
2275 	/*
2276 	 * -mid_min/2 represents the amount that we need to move DQS.
2277 	 * If mid_min is odd and positive we'll need to add one to make
2278 	 * sure the rounding in further calculations is correct (always
2279 	 * bias to the right), so just add 1 for all positive values.
2280 	 */
2281 	if (*mid_min > 0)
2282 		(*mid_min)++;
2283 	*mid_min = *mid_min / 2;
2284 
2285 	debug_cond(DLEVEL == 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n",
2286 		   __func__, __LINE__, *mid_min, min_index);
2287 	return min_index;
2288 }
2289 
2290 /**
2291  * center_dq_windows() - Center the DQ/DQS windows
2292  * @write:		Perform read (Stage 2) or write (Stage 3) calibration
2293  * @left_edge:		Left edge of the DQ/DQS phase
2294  * @right_edge:		Right edge of the DQ/DQS phase
2295  * @mid_min:		Adjusted DQ/DQS phase middle setting
2296  * @orig_mid_min:	Original DQ/DQS phase middle setting
2297  * @min_index:		DQ/DQS phase middle setting index
2298  * @test_bgn:		Rank number to begin the test
2299  * @dq_margin:		Amount of shift for the DQ
2300  * @dqs_margin:		Amount of shift for the DQS
2301  *
2302  * Align the DQ/DQS windows in each group.
2303  */
2304 static void center_dq_windows(const int write, int *left_edge, int *right_edge,
2305 			      const int mid_min, const int orig_mid_min,
2306 			      const int min_index, const int test_bgn,
2307 			      int *dq_margin, int *dqs_margin)
2308 {
2309 	const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
2310 	const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
2311 				    RW_MGR_MEM_DQ_PER_READ_DQS;
2312 	const u32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
2313 				      SCC_MGR_IO_IN_DELAY_OFFSET;
2314 	const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
2315 
2316 	u32 temp_dq_io_delay1, temp_dq_io_delay2;
2317 	int shift_dq, i, p;
2318 
2319 	/* Initialize data for export structures */
2320 	*dqs_margin = delay_max + 1;
2321 	*dq_margin  = delay_max + 1;
2322 
2323 	/* add delay to bring centre of all DQ windows to the same "level" */
2324 	for (i = 0, p = test_bgn; i < per_dqs; i++, p++) {
2325 		/* Use values before divide by 2 to reduce round off error */
2326 		shift_dq = (left_edge[i] - right_edge[i] -
2327 			(left_edge[min_index] - right_edge[min_index]))/2  +
2328 			(orig_mid_min - mid_min);
2329 
2330 		debug_cond(DLEVEL == 2,
2331 			   "vfifo_center: before: shift_dq[%u]=%d\n",
2332 			   i, shift_dq);
2333 
2334 		temp_dq_io_delay1 = readl(addr + (p << 2));
2335 		temp_dq_io_delay2 = readl(addr + (i << 2));
2336 
2337 		if (shift_dq + temp_dq_io_delay1 > delay_max)
2338 			shift_dq = delay_max - temp_dq_io_delay2;
2339 		else if (shift_dq + temp_dq_io_delay1 < 0)
2340 			shift_dq = -temp_dq_io_delay1;
2341 
2342 		debug_cond(DLEVEL == 2,
2343 			   "vfifo_center: after: shift_dq[%u]=%d\n",
2344 			   i, shift_dq);
2345 
2346 		if (write)
2347 			scc_mgr_set_dq_out1_delay(i, temp_dq_io_delay1 + shift_dq);
2348 		else
2349 			scc_mgr_set_dq_in_delay(p, temp_dq_io_delay1 + shift_dq);
2350 
2351 		scc_mgr_load_dq(p);
2352 
2353 		debug_cond(DLEVEL == 2,
2354 			   "vfifo_center: margin[%u]=[%d,%d]\n", i,
2355 			   left_edge[i] - shift_dq + (-mid_min),
2356 			   right_edge[i] + shift_dq - (-mid_min));
2357 
2358 		/* To determine values for export structures */
2359 		if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin)
2360 			*dq_margin = left_edge[i] - shift_dq + (-mid_min);
2361 
2362 		if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin)
2363 			*dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2364 	}
2365 
2366 }
2367 
2368 /**
2369  * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering
2370  * @rank_bgn:		Rank number
2371  * @rw_group:		Read/Write Group
2372  * @test_bgn:		Rank at which the test begins
2373  * @use_read_test:	Perform a read test
2374  * @update_fom:		Update FOM
2375  *
2376  * Per-bit deskew DQ and centering.
2377  */
2378 static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
2379 			const u32 rw_group, const u32 test_bgn,
2380 			const int use_read_test, const int update_fom)
2381 {
2382 	const u32 addr =
2383 		SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET +
2384 		(rw_group << 2);
2385 	/*
2386 	 * Store these as signed since there are comparisons with
2387 	 * signed numbers.
2388 	 */
2389 	uint32_t sticky_bit_chk;
2390 	int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
2391 	int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
2392 	int32_t orig_mid_min, mid_min;
2393 	int32_t new_dqs, start_dqs, start_dqs_en, final_dqs_en;
2394 	int32_t dq_margin, dqs_margin;
2395 	int i, min_index;
2396 	int ret;
2397 
2398 	debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
2399 
2400 	start_dqs = readl(addr);
2401 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2402 		start_dqs_en = readl(addr - IO_DQS_EN_DELAY_OFFSET);
2403 
2404 	/* set the left and right edge of each bit to an illegal value */
2405 	/* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
2406 	sticky_bit_chk = 0;
2407 	for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2408 		left_edge[i]  = IO_IO_IN_DELAY_MAX + 1;
2409 		right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
2410 	}
2411 
2412 	/* Search for the left edge of the window for each bit */
2413 	search_left_edge(0, rank_bgn, rw_group, rw_group, test_bgn,
2414 			 &sticky_bit_chk,
2415 			 left_edge, right_edge, use_read_test);
2416 
2417 
2418 	/* Search for the right edge of the window for each bit */
2419 	ret = search_right_edge(0, rank_bgn, rw_group, rw_group,
2420 				start_dqs, start_dqs_en,
2421 				&sticky_bit_chk,
2422 				left_edge, right_edge, use_read_test);
2423 	if (ret) {
2424 		/*
2425 		 * Restore delay chain settings before letting the loop
2426 		 * in rw_mgr_mem_calibrate_vfifo to retry different
2427 		 * dqs/ck relationships.
2428 		 */
2429 		scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
2430 		if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2431 			scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
2432 
2433 		scc_mgr_load_dqs(rw_group);
2434 		writel(0, &sdr_scc_mgr->update);
2435 
2436 		debug_cond(DLEVEL == 1,
2437 			   "%s:%d vfifo_center: failed to find edge [%u]: %d %d",
2438 			   __func__, __LINE__, i, left_edge[i], right_edge[i]);
2439 		if (use_read_test) {
2440 			set_failing_group_stage(rw_group *
2441 				RW_MGR_MEM_DQ_PER_READ_DQS + i,
2442 				CAL_STAGE_VFIFO,
2443 				CAL_SUBSTAGE_VFIFO_CENTER);
2444 		} else {
2445 			set_failing_group_stage(rw_group *
2446 				RW_MGR_MEM_DQ_PER_READ_DQS + i,
2447 				CAL_STAGE_VFIFO_AFTER_WRITES,
2448 				CAL_SUBSTAGE_VFIFO_CENTER);
2449 		}
2450 		return -EIO;
2451 	}
2452 
2453 	min_index = get_window_mid_index(0, left_edge, right_edge, &mid_min);
2454 
2455 	/* Determine the amount we can change DQS (which is -mid_min) */
2456 	orig_mid_min = mid_min;
2457 	new_dqs = start_dqs - mid_min;
2458 	if (new_dqs > IO_DQS_IN_DELAY_MAX)
2459 		new_dqs = IO_DQS_IN_DELAY_MAX;
2460 	else if (new_dqs < 0)
2461 		new_dqs = 0;
2462 
2463 	mid_min = start_dqs - new_dqs;
2464 	debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2465 		   mid_min, new_dqs);
2466 
2467 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2468 		if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2469 			mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2470 		else if (start_dqs_en - mid_min < 0)
2471 			mid_min += start_dqs_en - mid_min;
2472 	}
2473 	new_dqs = start_dqs - mid_min;
2474 
2475 	debug_cond(DLEVEL == 1,
2476 		   "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
2477 		   start_dqs,
2478 		   IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2479 		   new_dqs, mid_min);
2480 
2481 	/* Add delay to bring centre of all DQ windows to the same "level". */
2482 	center_dq_windows(0, left_edge, right_edge, mid_min, orig_mid_min,
2483 			  min_index, test_bgn, &dq_margin, &dqs_margin);
2484 
2485 	/* Move DQS-en */
2486 	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2487 		final_dqs_en = start_dqs_en - mid_min;
2488 		scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
2489 		scc_mgr_load_dqs(rw_group);
2490 	}
2491 
2492 	/* Move DQS */
2493 	scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs);
2494 	scc_mgr_load_dqs(rw_group);
2495 	debug_cond(DLEVEL == 2,
2496 		   "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d",
2497 		   __func__, __LINE__, dq_margin, dqs_margin);
2498 
2499 	/*
2500 	 * Do not remove this line as it makes sure all of our decisions
2501 	 * have been applied. Apply the update bit.
2502 	 */
2503 	writel(0, &sdr_scc_mgr->update);
2504 
2505 	if ((dq_margin < 0) || (dqs_margin < 0))
2506 		return -EINVAL;
2507 
2508 	return 0;
2509 }
2510 
2511 /**
2512  * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
2513  * @rw_group:	Read/Write Group
2514  * @phase:	DQ/DQS phase
2515  *
2516  * Because initially no communication ca be reliably performed with the memory
2517  * device, the sequencer uses a guaranteed write mechanism to write data into
2518  * the memory device.
2519  */
2520 static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group,
2521 						 const u32 phase)
2522 {
2523 	int ret;
2524 
2525 	/* Set a particular DQ/DQS phase. */
2526 	scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase);
2527 
2528 	debug_cond(DLEVEL == 1, "%s:%d guaranteed write: g=%u p=%u\n",
2529 		   __func__, __LINE__, rw_group, phase);
2530 
2531 	/*
2532 	 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2533 	 * Load up the patterns used by read calibration using the
2534 	 * current DQDQS phase.
2535 	 */
2536 	rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2537 
2538 	if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
2539 		return 0;
2540 
2541 	/*
2542 	 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2543 	 * Back-to-Back reads of the patterns used for calibration.
2544 	 */
2545 	ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1);
2546 	if (ret)
2547 		debug_cond(DLEVEL == 1,
2548 			   "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2549 			   __func__, __LINE__, rw_group, phase);
2550 	return ret;
2551 }
2552 
2553 /**
2554  * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2555  * @rw_group:	Read/Write Group
2556  * @test_bgn:	Rank at which the test begins
2557  *
2558  * DQS enable calibration ensures reliable capture of the DQ signal without
2559  * glitches on the DQS line.
2560  */
2561 static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
2562 						       const u32 test_bgn)
2563 {
2564 	/*
2565 	 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2566 	 * DQS and DQS Eanble Signal Relationships.
2567 	 */
2568 
2569 	/* We start at zero, so have one less dq to devide among */
2570 	const u32 delay_step = IO_IO_IN_DELAY_MAX /
2571 			       (RW_MGR_MEM_DQ_PER_READ_DQS - 1);
2572 	int ret;
2573 	u32 i, p, d, r;
2574 
2575 	debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2576 
2577 	/* Try different dq_in_delays since the DQ path is shorter than DQS. */
2578 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
2579 	     r += NUM_RANKS_PER_SHADOW_REG) {
2580 		for (i = 0, p = test_bgn, d = 0;
2581 		     i < RW_MGR_MEM_DQ_PER_READ_DQS;
2582 		     i++, p++, d += delay_step) {
2583 			debug_cond(DLEVEL == 1,
2584 				   "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2585 				   __func__, __LINE__, rw_group, r, i, p, d);
2586 
2587 			scc_mgr_set_dq_in_delay(p, d);
2588 			scc_mgr_load_dq(p);
2589 		}
2590 
2591 		writel(0, &sdr_scc_mgr->update);
2592 	}
2593 
2594 	/*
2595 	 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2596 	 * dq_in_delay values
2597 	 */
2598 	ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group);
2599 
2600 	debug_cond(DLEVEL == 1,
2601 		   "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
2602 		   __func__, __LINE__, rw_group, !ret);
2603 
2604 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
2605 	     r += NUM_RANKS_PER_SHADOW_REG) {
2606 		scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2607 		writel(0, &sdr_scc_mgr->update);
2608 	}
2609 
2610 	return ret;
2611 }
2612 
2613 /**
2614  * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2615  * @rw_group:		Read/Write Group
2616  * @test_bgn:		Rank at which the test begins
2617  * @use_read_test:	Perform a read test
2618  * @update_fom:		Update FOM
2619  *
2620  * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2621  * within a group.
2622  */
2623 static int
2624 rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
2625 				      const int use_read_test,
2626 				      const int update_fom)
2627 
2628 {
2629 	int ret, grp_calibrated;
2630 	u32 rank_bgn, sr;
2631 
2632 	/*
2633 	 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2634 	 * Read per-bit deskew can be done on a per shadow register basis.
2635 	 */
2636 	grp_calibrated = 1;
2637 	for (rank_bgn = 0, sr = 0;
2638 	     rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2639 	     rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
2640 		ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
2641 							test_bgn,
2642 							use_read_test,
2643 							update_fom);
2644 		if (!ret)
2645 			continue;
2646 
2647 		grp_calibrated = 0;
2648 	}
2649 
2650 	if (!grp_calibrated)
2651 		return -EIO;
2652 
2653 	return 0;
2654 }
2655 
2656 /**
2657  * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2658  * @rw_group:		Read/Write Group
2659  * @test_bgn:		Rank at which the test begins
2660  *
2661  * Stage 1: Calibrate the read valid prediction FIFO.
2662  *
2663  * This function implements UniPHY calibration Stage 1, as explained in
2664  * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2665  *
2666  * - read valid prediction will consist of finding:
2667  *   - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2668  *   - DQS input phase  and DQS input delay (DQ/DQS Centering)
2669  *  - we also do a per-bit deskew on the DQ lines.
2670  */
2671 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
2672 {
2673 	uint32_t p, d;
2674 	uint32_t dtaps_per_ptap;
2675 	uint32_t failed_substage;
2676 
2677 	int ret;
2678 
2679 	debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
2680 
2681 	/* Update info for sims */
2682 	reg_file_set_group(rw_group);
2683 	reg_file_set_stage(CAL_STAGE_VFIFO);
2684 	reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2685 
2686 	failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2687 
2688 	/* USER Determine number of delay taps for each phase tap. */
2689 	dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP,
2690 				      IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1;
2691 
2692 	for (d = 0; d <= dtaps_per_ptap; d += 2) {
2693 		/*
2694 		 * In RLDRAMX we may be messing the delay of pins in
2695 		 * the same write rw_group but outside of the current read
2696 		 * the rw_group, but that's ok because we haven't calibrated
2697 		 * output side yet.
2698 		 */
2699 		if (d > 0) {
2700 			scc_mgr_apply_group_all_out_delay_add_all_ranks(
2701 								rw_group, d);
2702 		}
2703 
2704 		for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++) {
2705 			/* 1) Guaranteed Write */
2706 			ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
2707 			if (ret)
2708 				break;
2709 
2710 			/* 2) DQS Enable Calibration */
2711 			ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group,
2712 									  test_bgn);
2713 			if (ret) {
2714 				failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2715 				continue;
2716 			}
2717 
2718 			/* 3) Centering DQ/DQS */
2719 			/*
2720 			 * If doing read after write calibration, do not update
2721 			 * FOM now. Do it then.
2722 			 */
2723 			ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group,
2724 								test_bgn, 1, 0);
2725 			if (ret) {
2726 				failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
2727 				continue;
2728 			}
2729 
2730 			/* All done. */
2731 			goto cal_done_ok;
2732 		}
2733 	}
2734 
2735 	/* Calibration Stage 1 failed. */
2736 	set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
2737 	return 0;
2738 
2739 	/* Calibration Stage 1 completed OK. */
2740 cal_done_ok:
2741 	/*
2742 	 * Reset the delay chains back to zero if they have moved > 1
2743 	 * (check for > 1 because loop will increase d even when pass in
2744 	 * first case).
2745 	 */
2746 	if (d > 2)
2747 		scc_mgr_zero_group(rw_group, 1);
2748 
2749 	return 1;
2750 }
2751 
2752 /**
2753  * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
2754  * @rw_group:		Read/Write Group
2755  * @test_bgn:		Rank at which the test begins
2756  *
2757  * Stage 3: DQ/DQS Centering.
2758  *
2759  * This function implements UniPHY calibration Stage 3, as explained in
2760  * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2761  */
2762 static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group,
2763 					  const u32 test_bgn)
2764 {
2765 	int ret;
2766 
2767 	debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
2768 
2769 	/* Update info for sims. */
2770 	reg_file_set_group(rw_group);
2771 	reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2772 	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2773 
2774 	ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, test_bgn, 0, 1);
2775 	if (ret)
2776 		set_failing_group_stage(rw_group,
2777 					CAL_STAGE_VFIFO_AFTER_WRITES,
2778 					CAL_SUBSTAGE_VFIFO_CENTER);
2779 	return ret;
2780 }
2781 
2782 /**
2783  * rw_mgr_mem_calibrate_lfifo() - Minimize latency
2784  *
2785  * Stage 4: Minimize latency.
2786  *
2787  * This function implements UniPHY calibration Stage 4, as explained in
2788  * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2789  * Calibrate LFIFO to find smallest read latency.
2790  */
2791 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2792 {
2793 	int found_one = 0;
2794 
2795 	debug("%s:%d\n", __func__, __LINE__);
2796 
2797 	/* Update info for sims. */
2798 	reg_file_set_stage(CAL_STAGE_LFIFO);
2799 	reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2800 
2801 	/* Load up the patterns used by read calibration for all ranks */
2802 	rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2803 
2804 	do {
2805 		writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2806 		debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2807 			   __func__, __LINE__, gbl->curr_read_lat);
2808 
2809 		if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, NUM_READ_TESTS,
2810 							      PASS_ALL_BITS, 1))
2811 			break;
2812 
2813 		found_one = 1;
2814 		/*
2815 		 * Reduce read latency and see if things are
2816 		 * working correctly.
2817 		 */
2818 		gbl->curr_read_lat--;
2819 	} while (gbl->curr_read_lat > 0);
2820 
2821 	/* Reset the fifos to get pointers to known state. */
2822 	writel(0, &phy_mgr_cmd->fifo_reset);
2823 
2824 	if (found_one) {
2825 		/* Add a fudge factor to the read latency that was determined */
2826 		gbl->curr_read_lat += 2;
2827 		writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2828 		debug_cond(DLEVEL == 2,
2829 			   "%s:%d lfifo: success: using read_lat=%u\n",
2830 			   __func__, __LINE__, gbl->curr_read_lat);
2831 	} else {
2832 		set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2833 					CAL_SUBSTAGE_READ_LATENCY);
2834 
2835 		debug_cond(DLEVEL == 2,
2836 			   "%s:%d lfifo: failed at initial read_lat=%u\n",
2837 			   __func__, __LINE__, gbl->curr_read_lat);
2838 	}
2839 
2840 	return found_one;
2841 }
2842 
2843 /**
2844  * search_window() - Search for the/part of the window with DM/DQS shift
2845  * @search_dm:		If 1, search for the DM shift, if 0, search for DQS shift
2846  * @rank_bgn:		Rank number
2847  * @write_group:	Write Group
2848  * @bgn_curr:		Current window begin
2849  * @end_curr:		Current window end
2850  * @bgn_best:		Current best window begin
2851  * @end_best:		Current best window end
2852  * @win_best:		Size of the best window
2853  * @new_dqs:		New DQS value (only applicable if search_dm = 0).
2854  *
2855  * Search for the/part of the window with DM/DQS shift.
2856  */
2857 static void search_window(const int search_dm,
2858 			  const u32 rank_bgn, const u32 write_group,
2859 			  int *bgn_curr, int *end_curr, int *bgn_best,
2860 			  int *end_best, int *win_best, int new_dqs)
2861 {
2862 	u32 bit_chk;
2863 	const int max = IO_IO_OUT1_DELAY_MAX - new_dqs;
2864 	int d, di;
2865 
2866 	/* Search for the/part of the window with DM/DQS shift. */
2867 	for (di = max; di >= 0; di -= DELTA_D) {
2868 		if (search_dm) {
2869 			d = di;
2870 			scc_mgr_apply_group_dm_out1_delay(d);
2871 		} else {
2872 			/* For DQS, we go from 0...max */
2873 			d = max - di;
2874 			/*
2875 			 * Note: This only shifts DQS, so are we limiting ourselve to
2876 			 * width of DQ unnecessarily.
2877 			 */
2878 			scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2879 								d + new_dqs);
2880 		}
2881 
2882 		writel(0, &sdr_scc_mgr->update);
2883 
2884 		if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2885 						    PASS_ALL_BITS, &bit_chk,
2886 						    0)) {
2887 			/* Set current end of the window. */
2888 			*end_curr = search_dm ? -d : d;
2889 
2890 			/*
2891 			 * If a starting edge of our window has not been seen
2892 			 * this is our current start of the DM window.
2893 			 */
2894 			if (*bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
2895 				*bgn_curr = search_dm ? -d : d;
2896 
2897 			/*
2898 			 * If current window is bigger than best seen.
2899 			 * Set best seen to be current window.
2900 			 */
2901 			if ((*end_curr - *bgn_curr + 1) > *win_best) {
2902 				*win_best = *end_curr - *bgn_curr + 1;
2903 				*bgn_best = *bgn_curr;
2904 				*end_best = *end_curr;
2905 			}
2906 		} else {
2907 			/* We just saw a failing test. Reset temp edge. */
2908 			*bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2909 			*end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2910 
2911 			/* Early exit is only applicable to DQS. */
2912 			if (search_dm)
2913 				continue;
2914 
2915 			/*
2916 			 * Early exit optimization: if the remaining delay
2917 			 * chain space is less than already seen largest
2918 			 * window we can exit.
2919 			 */
2920 			if (*win_best - 1 > IO_IO_OUT1_DELAY_MAX - new_dqs - d)
2921 				break;
2922 		}
2923 	}
2924 }
2925 
2926 /*
2927  * rw_mgr_mem_calibrate_writes_center() - Center all windows
2928  * @rank_bgn:		Rank number
2929  * @write_group:	Write group
2930  * @test_bgn:		Rank at which the test begins
2931  *
2932  * Center all windows. Do per-bit-deskew to possibly increase size of
2933  * certain windows.
2934  */
2935 static int
2936 rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group,
2937 				   const u32 test_bgn)
2938 {
2939 	int i;
2940 	u32 sticky_bit_chk;
2941 	u32 min_index;
2942 	int left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2943 	int right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2944 	int mid;
2945 	int mid_min, orig_mid_min;
2946 	int new_dqs, start_dqs;
2947 	int dq_margin, dqs_margin, dm_margin;
2948 	int bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2949 	int end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2950 	int bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
2951 	int end_best = IO_IO_OUT1_DELAY_MAX + 1;
2952 	int win_best = 0;
2953 
2954 	int ret;
2955 
2956 	debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2957 
2958 	dm_margin = 0;
2959 
2960 	start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
2961 			  SCC_MGR_IO_OUT1_DELAY_OFFSET) +
2962 			  (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2963 
2964 	/* Per-bit deskew. */
2965 
2966 	/*
2967 	 * Set the left and right edge of each bit to an illegal value.
2968 	 * Use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2969 	 */
2970 	sticky_bit_chk = 0;
2971 	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2972 		left_edge[i]  = IO_IO_OUT1_DELAY_MAX + 1;
2973 		right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2974 	}
2975 
2976 	/* Search for the left edge of the window for each bit. */
2977 	search_left_edge(1, rank_bgn, write_group, 0, test_bgn,
2978 			 &sticky_bit_chk,
2979 			 left_edge, right_edge, 0);
2980 
2981 	/* Search for the right edge of the window for each bit. */
2982 	ret = search_right_edge(1, rank_bgn, write_group, 0,
2983 				start_dqs, 0,
2984 				&sticky_bit_chk,
2985 				left_edge, right_edge, 0);
2986 	if (ret) {
2987 		set_failing_group_stage(test_bgn + ret - 1, CAL_STAGE_WRITES,
2988 					CAL_SUBSTAGE_WRITES_CENTER);
2989 		return -EINVAL;
2990 	}
2991 
2992 	min_index = get_window_mid_index(1, left_edge, right_edge, &mid_min);
2993 
2994 	/* Determine the amount we can change DQS (which is -mid_min). */
2995 	orig_mid_min = mid_min;
2996 	new_dqs = start_dqs;
2997 	mid_min = 0;
2998 	debug_cond(DLEVEL == 1,
2999 		   "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n",
3000 		   __func__, __LINE__, start_dqs, new_dqs, mid_min);
3001 
3002 	/* Add delay to bring centre of all DQ windows to the same "level". */
3003 	center_dq_windows(1, left_edge, right_edge, mid_min, orig_mid_min,
3004 			  min_index, 0, &dq_margin, &dqs_margin);
3005 
3006 	/* Move DQS */
3007 	scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3008 	writel(0, &sdr_scc_mgr->update);
3009 
3010 	/* Centre DM */
3011 	debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
3012 
3013 	/*
3014 	 * Set the left and right edge of each bit to an illegal value.
3015 	 * Use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
3016 	 */
3017 	left_edge[0]  = IO_IO_OUT1_DELAY_MAX + 1;
3018 	right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
3019 
3020 	/* Search for the/part of the window with DM shift. */
3021 	search_window(1, rank_bgn, write_group, &bgn_curr, &end_curr,
3022 		      &bgn_best, &end_best, &win_best, 0);
3023 
3024 	/* Reset DM delay chains to 0. */
3025 	scc_mgr_apply_group_dm_out1_delay(0);
3026 
3027 	/*
3028 	 * Check to see if the current window nudges up aganist 0 delay.
3029 	 * If so we need to continue the search by shifting DQS otherwise DQS
3030 	 * search begins as a new search.
3031 	 */
3032 	if (end_curr != 0) {
3033 		bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3034 		end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3035 	}
3036 
3037 	/* Search for the/part of the window with DQS shifts. */
3038 	search_window(0, rank_bgn, write_group, &bgn_curr, &end_curr,
3039 		      &bgn_best, &end_best, &win_best, new_dqs);
3040 
3041 	/* Assign left and right edge for cal and reporting. */
3042 	left_edge[0] = -1 * bgn_best;
3043 	right_edge[0] = end_best;
3044 
3045 	debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n",
3046 		   __func__, __LINE__, left_edge[0], right_edge[0]);
3047 
3048 	/* Move DQS (back to orig). */
3049 	scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3050 
3051 	/* Move DM */
3052 
3053 	/* Find middle of window for the DM bit. */
3054 	mid = (left_edge[0] - right_edge[0]) / 2;
3055 
3056 	/* Only move right, since we are not moving DQS/DQ. */
3057 	if (mid < 0)
3058 		mid = 0;
3059 
3060 	/* dm_marign should fail if we never find a window. */
3061 	if (win_best == 0)
3062 		dm_margin = -1;
3063 	else
3064 		dm_margin = left_edge[0] - mid;
3065 
3066 	scc_mgr_apply_group_dm_out1_delay(mid);
3067 	writel(0, &sdr_scc_mgr->update);
3068 
3069 	debug_cond(DLEVEL == 2,
3070 		   "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n",
3071 		   __func__, __LINE__, left_edge[0], right_edge[0],
3072 		   mid, dm_margin);
3073 	/* Export values. */
3074 	gbl->fom_out += dq_margin + dqs_margin;
3075 
3076 	debug_cond(DLEVEL == 2,
3077 		   "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n",
3078 		   __func__, __LINE__, dq_margin, dqs_margin, dm_margin);
3079 
3080 	/*
3081 	 * Do not remove this line as it makes sure all of our
3082 	 * decisions have been applied.
3083 	 */
3084 	writel(0, &sdr_scc_mgr->update);
3085 
3086 	if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0))
3087 		return -EINVAL;
3088 
3089 	return 0;
3090 }
3091 
3092 /**
3093  * rw_mgr_mem_calibrate_writes() - Write Calibration Part One
3094  * @rank_bgn:		Rank number
3095  * @group:		Read/Write Group
3096  * @test_bgn:		Rank at which the test begins
3097  *
3098  * Stage 2: Write Calibration Part One.
3099  *
3100  * This function implements UniPHY calibration Stage 2, as explained in
3101  * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
3102  */
3103 static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group,
3104 				       const u32 test_bgn)
3105 {
3106 	int ret;
3107 
3108 	/* Update info for sims */
3109 	debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn);
3110 
3111 	reg_file_set_group(group);
3112 	reg_file_set_stage(CAL_STAGE_WRITES);
3113 	reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3114 
3115 	ret = rw_mgr_mem_calibrate_writes_center(rank_bgn, group, test_bgn);
3116 	if (ret)
3117 		set_failing_group_stage(group, CAL_STAGE_WRITES,
3118 					CAL_SUBSTAGE_WRITES_CENTER);
3119 
3120 	return ret;
3121 }
3122 
3123 /**
3124  * mem_precharge_and_activate() - Precharge all banks and activate
3125  *
3126  * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3127  */
3128 static void mem_precharge_and_activate(void)
3129 {
3130 	int r;
3131 
3132 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3133 		/* Set rank. */
3134 		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3135 
3136 		/* Precharge all banks. */
3137 		writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3138 					     RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3139 
3140 		writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3141 		writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3142 			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
3143 
3144 		writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3145 		writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3146 			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
3147 
3148 		/* Activate rows. */
3149 		writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3150 						RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3151 	}
3152 }
3153 
3154 /**
3155  * mem_init_latency() - Configure memory RLAT and WLAT settings
3156  *
3157  * Configure memory RLAT and WLAT parameters.
3158  */
3159 static void mem_init_latency(void)
3160 {
3161 	/*
3162 	 * For AV/CV, LFIFO is hardened and always runs at full rate
3163 	 * so max latency in AFI clocks, used here, is correspondingly
3164 	 * smaller.
3165 	 */
3166 	const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
3167 	u32 rlat, wlat;
3168 
3169 	debug("%s:%d\n", __func__, __LINE__);
3170 
3171 	/*
3172 	 * Read in write latency.
3173 	 * WL for Hard PHY does not include additive latency.
3174 	 */
3175 	wlat = readl(&data_mgr->t_wl_add);
3176 	wlat += readl(&data_mgr->mem_t_add);
3177 
3178 	gbl->rw_wl_nop_cycles = wlat - 1;
3179 
3180 	/* Read in readl latency. */
3181 	rlat = readl(&data_mgr->t_rl_add);
3182 
3183 	/* Set a pretty high read latency initially. */
3184 	gbl->curr_read_lat = rlat + 16;
3185 	if (gbl->curr_read_lat > max_latency)
3186 		gbl->curr_read_lat = max_latency;
3187 
3188 	writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3189 
3190 	/* Advertise write latency. */
3191 	writel(wlat, &phy_mgr_cfg->afi_wlat);
3192 }
3193 
3194 /**
3195  * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3196  *
3197  * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3198  */
3199 static void mem_skip_calibrate(void)
3200 {
3201 	uint32_t vfifo_offset;
3202 	uint32_t i, j, r;
3203 
3204 	debug("%s:%d\n", __func__, __LINE__);
3205 	/* Need to update every shadow register set used by the interface */
3206 	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3207 	     r += NUM_RANKS_PER_SHADOW_REG) {
3208 		/*
3209 		 * Set output phase alignment settings appropriate for
3210 		 * skip calibration.
3211 		 */
3212 		for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3213 			scc_mgr_set_dqs_en_phase(i, 0);
3214 #if IO_DLL_CHAIN_LENGTH == 6
3215 			scc_mgr_set_dqdqs_output_phase(i, 6);
3216 #else
3217 			scc_mgr_set_dqdqs_output_phase(i, 7);
3218 #endif
3219 			/*
3220 			 * Case:33398
3221 			 *
3222 			 * Write data arrives to the I/O two cycles before write
3223 			 * latency is reached (720 deg).
3224 			 *   -> due to bit-slip in a/c bus
3225 			 *   -> to allow board skew where dqs is longer than ck
3226 			 *      -> how often can this happen!?
3227 			 *      -> can claim back some ptaps for high freq
3228 			 *       support if we can relax this, but i digress...
3229 			 *
3230 			 * The write_clk leads mem_ck by 90 deg
3231 			 * The minimum ptap of the OPA is 180 deg
3232 			 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3233 			 * The write_clk is always delayed by 2 ptaps
3234 			 *
3235 			 * Hence, to make DQS aligned to CK, we need to delay
3236 			 * DQS by:
3237 			 *    (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3238 			 *
3239 			 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3240 			 * gives us the number of ptaps, which simplies to:
3241 			 *
3242 			 *    (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3243 			 */
3244 			scc_mgr_set_dqdqs_output_phase(i,
3245 					1.25 * IO_DLL_CHAIN_LENGTH - 2);
3246 		}
3247 		writel(0xff, &sdr_scc_mgr->dqs_ena);
3248 		writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3249 
3250 		for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
3251 			writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3252 				  SCC_MGR_GROUP_COUNTER_OFFSET);
3253 		}
3254 		writel(0xff, &sdr_scc_mgr->dq_ena);
3255 		writel(0xff, &sdr_scc_mgr->dm_ena);
3256 		writel(0, &sdr_scc_mgr->update);
3257 	}
3258 
3259 	/* Compensate for simulation model behaviour */
3260 	for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3261 		scc_mgr_set_dqs_bus_in_delay(i, 10);
3262 		scc_mgr_load_dqs(i);
3263 	}
3264 	writel(0, &sdr_scc_mgr->update);
3265 
3266 	/*
3267 	 * ArriaV has hard FIFOs that can only be initialized by incrementing
3268 	 * in sequencer.
3269 	 */
3270 	vfifo_offset = CALIB_VFIFO_OFFSET;
3271 	for (j = 0; j < vfifo_offset; j++)
3272 		writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3273 	writel(0, &phy_mgr_cmd->fifo_reset);
3274 
3275 	/*
3276 	 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3277 	 * setting from generation-time constant.
3278 	 */
3279 	gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
3280 	writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3281 }
3282 
3283 /**
3284  * mem_calibrate() - Memory calibration entry point.
3285  *
3286  * Perform memory calibration.
3287  */
3288 static uint32_t mem_calibrate(void)
3289 {
3290 	uint32_t i;
3291 	uint32_t rank_bgn, sr;
3292 	uint32_t write_group, write_test_bgn;
3293 	uint32_t read_group, read_test_bgn;
3294 	uint32_t run_groups, current_run;
3295 	uint32_t failing_groups = 0;
3296 	uint32_t group_failed = 0;
3297 
3298 	const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
3299 				RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
3300 
3301 	debug("%s:%d\n", __func__, __LINE__);
3302 
3303 	/* Initialize the data settings */
3304 	gbl->error_substage = CAL_SUBSTAGE_NIL;
3305 	gbl->error_stage = CAL_STAGE_NIL;
3306 	gbl->error_group = 0xff;
3307 	gbl->fom_in = 0;
3308 	gbl->fom_out = 0;
3309 
3310 	/* Initialize WLAT and RLAT. */
3311 	mem_init_latency();
3312 
3313 	/* Initialize bit slips. */
3314 	mem_precharge_and_activate();
3315 
3316 	for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3317 		writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3318 			  SCC_MGR_GROUP_COUNTER_OFFSET);
3319 		/* Only needed once to set all groups, pins, DQ, DQS, DM. */
3320 		if (i == 0)
3321 			scc_mgr_set_hhp_extras();
3322 
3323 		scc_set_bypass_mode(i);
3324 	}
3325 
3326 	/* Calibration is skipped. */
3327 	if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3328 		/*
3329 		 * Set VFIFO and LFIFO to instant-on settings in skip
3330 		 * calibration mode.
3331 		 */
3332 		mem_skip_calibrate();
3333 
3334 		/*
3335 		 * Do not remove this line as it makes sure all of our
3336 		 * decisions have been applied.
3337 		 */
3338 		writel(0, &sdr_scc_mgr->update);
3339 		return 1;
3340 	}
3341 
3342 	/* Calibration is not skipped. */
3343 	for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3344 		/*
3345 		 * Zero all delay chain/phase settings for all
3346 		 * groups and all shadow register sets.
3347 		 */
3348 		scc_mgr_zero_all();
3349 
3350 		run_groups = ~0;
3351 
3352 		for (write_group = 0, write_test_bgn = 0; write_group
3353 			< RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3354 			write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3355 
3356 			/* Initialize the group failure */
3357 			group_failed = 0;
3358 
3359 			current_run = run_groups & ((1 <<
3360 				RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3361 			run_groups = run_groups >>
3362 				RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3363 
3364 			if (current_run == 0)
3365 				continue;
3366 
3367 			writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3368 					    SCC_MGR_GROUP_COUNTER_OFFSET);
3369 			scc_mgr_zero_group(write_group, 0);
3370 
3371 			for (read_group = write_group * rwdqs_ratio,
3372 			     read_test_bgn = 0;
3373 			     read_group < (write_group + 1) * rwdqs_ratio;
3374 			     read_group++,
3375 			     read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3376 				if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3377 					continue;
3378 
3379 				/* Calibrate the VFIFO */
3380 				if (rw_mgr_mem_calibrate_vfifo(read_group,
3381 							       read_test_bgn))
3382 					continue;
3383 
3384 				if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3385 					return 0;
3386 
3387 				/* The group failed, we're done. */
3388 				goto grp_failed;
3389 			}
3390 
3391 			/* Calibrate the output side */
3392 			for (rank_bgn = 0, sr = 0;
3393 			     rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
3394 			     rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3395 				if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3396 					continue;
3397 
3398 				/* Not needed in quick mode! */
3399 				if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
3400 					continue;
3401 
3402 				/* Calibrate WRITEs */
3403 				if (!rw_mgr_mem_calibrate_writes(rank_bgn,
3404 						write_group, write_test_bgn))
3405 					continue;
3406 
3407 				group_failed = 1;
3408 				if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3409 					return 0;
3410 			}
3411 
3412 			/* Some group failed, we're done. */
3413 			if (group_failed)
3414 				goto grp_failed;
3415 
3416 			for (read_group = write_group * rwdqs_ratio,
3417 			     read_test_bgn = 0;
3418 			     read_group < (write_group + 1) * rwdqs_ratio;
3419 			     read_group++,
3420 			     read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3421 				if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3422 					continue;
3423 
3424 				if (!rw_mgr_mem_calibrate_vfifo_end(read_group,
3425 								read_test_bgn))
3426 					continue;
3427 
3428 				if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3429 					return 0;
3430 
3431 				/* The group failed, we're done. */
3432 				goto grp_failed;
3433 			}
3434 
3435 			/* No group failed, continue as usual. */
3436 			continue;
3437 
3438 grp_failed:		/* A group failed, increment the counter. */
3439 			failing_groups++;
3440 		}
3441 
3442 		/*
3443 		 * USER If there are any failing groups then report
3444 		 * the failure.
3445 		 */
3446 		if (failing_groups != 0)
3447 			return 0;
3448 
3449 		if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3450 			continue;
3451 
3452 		/* Calibrate the LFIFO */
3453 		if (!rw_mgr_mem_calibrate_lfifo())
3454 			return 0;
3455 	}
3456 
3457 	/*
3458 	 * Do not remove this line as it makes sure all of our decisions
3459 	 * have been applied.
3460 	 */
3461 	writel(0, &sdr_scc_mgr->update);
3462 	return 1;
3463 }
3464 
3465 /**
3466  * run_mem_calibrate() - Perform memory calibration
3467  *
3468  * This function triggers the entire memory calibration procedure.
3469  */
3470 static int run_mem_calibrate(void)
3471 {
3472 	int pass;
3473 
3474 	debug("%s:%d\n", __func__, __LINE__);
3475 
3476 	/* Reset pass/fail status shown on afi_cal_success/fail */
3477 	writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3478 
3479 	/* Stop tracking manager. */
3480 	clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3481 
3482 	phy_mgr_initialize();
3483 	rw_mgr_mem_initialize();
3484 
3485 	/* Perform the actual memory calibration. */
3486 	pass = mem_calibrate();
3487 
3488 	mem_precharge_and_activate();
3489 	writel(0, &phy_mgr_cmd->fifo_reset);
3490 
3491 	/* Handoff. */
3492 	rw_mgr_mem_handoff();
3493 	/*
3494 	 * In Hard PHY this is a 2-bit control:
3495 	 * 0: AFI Mux Select
3496 	 * 1: DDIO Mux Select
3497 	 */
3498 	writel(0x2, &phy_mgr_cfg->mux_sel);
3499 
3500 	/* Start tracking manager. */
3501 	setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3502 
3503 	return pass;
3504 }
3505 
3506 /**
3507  * debug_mem_calibrate() - Report result of memory calibration
3508  * @pass:	Value indicating whether calibration passed or failed
3509  *
3510  * This function reports the results of the memory calibration
3511  * and writes debug information into the register file.
3512  */
3513 static void debug_mem_calibrate(int pass)
3514 {
3515 	uint32_t debug_info;
3516 
3517 	if (pass) {
3518 		printf("%s: CALIBRATION PASSED\n", __FILE__);
3519 
3520 		gbl->fom_in /= 2;
3521 		gbl->fom_out /= 2;
3522 
3523 		if (gbl->fom_in > 0xff)
3524 			gbl->fom_in = 0xff;
3525 
3526 		if (gbl->fom_out > 0xff)
3527 			gbl->fom_out = 0xff;
3528 
3529 		/* Update the FOM in the register file */
3530 		debug_info = gbl->fom_in;
3531 		debug_info |= gbl->fom_out << 8;
3532 		writel(debug_info, &sdr_reg_file->fom);
3533 
3534 		writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3535 		writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3536 	} else {
3537 		printf("%s: CALIBRATION FAILED\n", __FILE__);
3538 
3539 		debug_info = gbl->error_stage;
3540 		debug_info |= gbl->error_substage << 8;
3541 		debug_info |= gbl->error_group << 16;
3542 
3543 		writel(debug_info, &sdr_reg_file->failing_stage);
3544 		writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3545 		writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3546 
3547 		/* Update the failing group/stage in the register file */
3548 		debug_info = gbl->error_stage;
3549 		debug_info |= gbl->error_substage << 8;
3550 		debug_info |= gbl->error_group << 16;
3551 		writel(debug_info, &sdr_reg_file->failing_stage);
3552 	}
3553 
3554 	printf("%s: Calibration complete\n", __FILE__);
3555 }
3556 
3557 /**
3558  * hc_initialize_rom_data() - Initialize ROM data
3559  *
3560  * Initialize ROM data.
3561  */
3562 static void hc_initialize_rom_data(void)
3563 {
3564 	u32 i, addr;
3565 
3566 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3567 	for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3568 		writel(inst_rom_init[i], addr + (i << 2));
3569 
3570 	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3571 	for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3572 		writel(ac_rom_init[i], addr + (i << 2));
3573 }
3574 
3575 /**
3576  * initialize_reg_file() - Initialize SDR register file
3577  *
3578  * Initialize SDR register file.
3579  */
3580 static void initialize_reg_file(void)
3581 {
3582 	/* Initialize the register file with the correct data */
3583 	writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3584 	writel(0, &sdr_reg_file->debug_data_addr);
3585 	writel(0, &sdr_reg_file->cur_stage);
3586 	writel(0, &sdr_reg_file->fom);
3587 	writel(0, &sdr_reg_file->failing_stage);
3588 	writel(0, &sdr_reg_file->debug1);
3589 	writel(0, &sdr_reg_file->debug2);
3590 }
3591 
3592 /**
3593  * initialize_hps_phy() - Initialize HPS PHY
3594  *
3595  * Initialize HPS PHY.
3596  */
3597 static void initialize_hps_phy(void)
3598 {
3599 	uint32_t reg;
3600 	/*
3601 	 * Tracking also gets configured here because it's in the
3602 	 * same register.
3603 	 */
3604 	uint32_t trk_sample_count = 7500;
3605 	uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3606 	/*
3607 	 * Format is number of outer loops in the 16 MSB, sample
3608 	 * count in 16 LSB.
3609 	 */
3610 
3611 	reg = 0;
3612 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3613 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3614 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3615 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3616 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3617 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3618 	/*
3619 	 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3620 	 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3621 	 */
3622 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3623 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3624 		trk_sample_count);
3625 	writel(reg, &sdr_ctrl->phy_ctrl0);
3626 
3627 	reg = 0;
3628 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3629 		trk_sample_count >>
3630 		SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3631 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3632 		trk_long_idle_sample_count);
3633 	writel(reg, &sdr_ctrl->phy_ctrl1);
3634 
3635 	reg = 0;
3636 	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3637 		trk_long_idle_sample_count >>
3638 		SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3639 	writel(reg, &sdr_ctrl->phy_ctrl2);
3640 }
3641 
3642 /**
3643  * initialize_tracking() - Initialize tracking
3644  *
3645  * Initialize the register file with usable initial data.
3646  */
3647 static void initialize_tracking(void)
3648 {
3649 	/*
3650 	 * Initialize the register file with the correct data.
3651 	 * Compute usable version of value in case we skip full
3652 	 * computation later.
3653 	 */
3654 	writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
3655 	       &sdr_reg_file->dtaps_per_ptap);
3656 
3657 	/* trk_sample_count */
3658 	writel(7500, &sdr_reg_file->trk_sample_count);
3659 
3660 	/* longidle outer loop [15:0] */
3661 	writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3662 
3663 	/*
3664 	 * longidle sample count [31:24]
3665 	 * trfc, worst case of 933Mhz 4Gb [23:16]
3666 	 * trcd, worst case [15:8]
3667 	 * vfifo wait [7:0]
3668 	 */
3669 	writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3670 	       &sdr_reg_file->delays);
3671 
3672 	/* mux delay */
3673 	writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
3674 	       (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
3675 	       &sdr_reg_file->trk_rw_mgr_addr);
3676 
3677 	writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
3678 	       &sdr_reg_file->trk_read_dqs_width);
3679 
3680 	/* trefi [7:0] */
3681 	writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
3682 	       &sdr_reg_file->trk_rfsh);
3683 }
3684 
3685 int sdram_calibration_full(void)
3686 {
3687 	struct param_type my_param;
3688 	struct gbl_type my_gbl;
3689 	uint32_t pass;
3690 
3691 	memset(&my_param, 0, sizeof(my_param));
3692 	memset(&my_gbl, 0, sizeof(my_gbl));
3693 
3694 	param = &my_param;
3695 	gbl = &my_gbl;
3696 
3697 	/* Set the calibration enabled by default */
3698 	gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3699 	/*
3700 	 * Only sweep all groups (regardless of fail state) by default
3701 	 * Set enabled read test by default.
3702 	 */
3703 #if DISABLE_GUARANTEED_READ
3704 	gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3705 #endif
3706 	/* Initialize the register file */
3707 	initialize_reg_file();
3708 
3709 	/* Initialize any PHY CSR */
3710 	initialize_hps_phy();
3711 
3712 	scc_mgr_initialize();
3713 
3714 	initialize_tracking();
3715 
3716 	printf("%s: Preparing to start memory calibration\n", __FILE__);
3717 
3718 	debug("%s:%d\n", __func__, __LINE__);
3719 	debug_cond(DLEVEL == 1,
3720 		   "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3721 		   RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3722 		   RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3723 		   RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3724 		   RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3725 	debug_cond(DLEVEL == 1,
3726 		   "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3727 		   RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3728 		   RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3729 		   IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3730 	debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3731 		   IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3732 	debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3733 		   IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3734 		   IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3735 	debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3736 		   IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3737 		   IO_IO_OUT2_DELAY_MAX);
3738 	debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3739 		   IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
3740 
3741 	hc_initialize_rom_data();
3742 
3743 	/* update info for sims */
3744 	reg_file_set_stage(CAL_STAGE_NIL);
3745 	reg_file_set_group(0);
3746 
3747 	/*
3748 	 * Load global needed for those actions that require
3749 	 * some dynamic calibration support.
3750 	 */
3751 	dyn_calib_steps = STATIC_CALIB_STEPS;
3752 	/*
3753 	 * Load global to allow dynamic selection of delay loop settings
3754 	 * based on calibration mode.
3755 	 */
3756 	if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3757 		skip_delay_mask = 0xff;
3758 	else
3759 		skip_delay_mask = 0x0;
3760 
3761 	pass = run_mem_calibrate();
3762 	debug_mem_calibrate(pass);
3763 	return pass;
3764 }
3765