1 /*
2  * Copyright 2008-2014 Freescale Semiconductor, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * Version 2 as published by the Free Software Foundation.
7  */
8 
9 #include <common.h>
10 #include <fsl_ddr_sdram.h>
11 
12 #include <fsl_ddr.h>
13 
14 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
15 static unsigned int
16 compute_cas_latency(const dimm_params_t *dimm_params,
17 		    common_timing_params_t *outpdimm,
18 		    unsigned int number_of_dimms)
19 {
20 	unsigned int i;
21 	unsigned int common_caslat;
22 	unsigned int caslat_actual;
23 	unsigned int retry = 16;
24 	unsigned int tmp;
25 	const unsigned int mclk_ps = get_memory_clk_period_ps();
26 #ifdef CONFIG_SYS_FSL_DDR3
27 	const unsigned int taamax = 20000;
28 #else
29 	const unsigned int taamax = 18000;
30 #endif
31 
32 	/* compute the common CAS latency supported between slots */
33 	tmp = dimm_params[0].caslat_x;
34 	for (i = 1; i < number_of_dimms; i++) {
35 		if (dimm_params[i].n_ranks)
36 			tmp &= dimm_params[i].caslat_x;
37 	}
38 	common_caslat = tmp;
39 
40 	/* validate if the memory clk is in the range of dimms */
41 	if (mclk_ps < outpdimm->tckmin_x_ps) {
42 		printf("DDR clock (MCLK cycle %u ps) is faster than "
43 			"the slowest DIMM(s) (tCKmin %u ps) can support.\n",
44 			mclk_ps, outpdimm->tckmin_x_ps);
45 	}
46 #ifdef CONFIG_SYS_FSL_DDR4
47 	if (mclk_ps > outpdimm->tckmax_ps) {
48 		printf("DDR clock (MCLK cycle %u ps) is slower than DIMM(s) (tCKmax %u ps) can support.\n",
49 		       mclk_ps, outpdimm->tckmax_ps);
50 	}
51 #endif
52 	/* determine the acutal cas latency */
53 	caslat_actual = (outpdimm->taamin_ps + mclk_ps - 1) / mclk_ps;
54 	/* check if the dimms support the CAS latency */
55 	while (!(common_caslat & (1 << caslat_actual)) && retry > 0) {
56 		caslat_actual++;
57 		retry--;
58 	}
59 	/* once the caculation of caslat_actual is completed
60 	 * we must verify that this CAS latency value does not
61 	 * exceed tAAmax, which is 20 ns for all DDR3 speed grades,
62 	 * 18ns for all DDR4 speed grades.
63 	 */
64 	if (caslat_actual * mclk_ps > taamax) {
65 		printf("The choosen cas latency %d is too large\n",
66 			caslat_actual);
67 	}
68 	outpdimm->lowest_common_spd_caslat = caslat_actual;
69 	debug("lowest_common_spd_caslat is 0x%x\n", caslat_actual);
70 
71 	return 0;
72 }
73 #else	/* for DDR1 and DDR2 */
74 static unsigned int
75 compute_cas_latency(const dimm_params_t *dimm_params,
76 		    common_timing_params_t *outpdimm,
77 		    unsigned int number_of_dimms)
78 {
79 	int i;
80 	const unsigned int mclk_ps = get_memory_clk_period_ps();
81 	unsigned int lowest_good_caslat;
82 	unsigned int not_ok;
83 	unsigned int temp1, temp2;
84 
85 	debug("using mclk_ps = %u\n", mclk_ps);
86 	if (mclk_ps > outpdimm->tckmax_ps) {
87 		printf("Warning: DDR clock (%u ps) is slower than DIMM(s) (tCKmax %u ps)\n",
88 		       mclk_ps, outpdimm->tckmax_ps);
89 	}
90 
91 	/*
92 	 * Compute a CAS latency suitable for all DIMMs
93 	 *
94 	 * Strategy for SPD-defined latencies: compute only
95 	 * CAS latency defined by all DIMMs.
96 	 */
97 
98 	/*
99 	 * Step 1: find CAS latency common to all DIMMs using bitwise
100 	 * operation.
101 	 */
102 	temp1 = 0xFF;
103 	for (i = 0; i < number_of_dimms; i++) {
104 		if (dimm_params[i].n_ranks) {
105 			temp2 = 0;
106 			temp2 |= 1 << dimm_params[i].caslat_x;
107 			temp2 |= 1 << dimm_params[i].caslat_x_minus_1;
108 			temp2 |= 1 << dimm_params[i].caslat_x_minus_2;
109 			/*
110 			 * If there was no entry for X-2 (X-1) in
111 			 * the SPD, then caslat_x_minus_2
112 			 * (caslat_x_minus_1) contains either 255 or
113 			 * 0xFFFFFFFF because that's what the glorious
114 			 * __ilog2 function returns for an input of 0.
115 			 * On 32-bit PowerPC, left shift counts with bit
116 			 * 26 set (that the value of 255 or 0xFFFFFFFF
117 			 * will have), cause the destination register to
118 			 * be 0.  That is why this works.
119 			 */
120 			temp1 &= temp2;
121 		}
122 	}
123 
124 	/*
125 	 * Step 2: check each common CAS latency against tCK of each
126 	 * DIMM's SPD.
127 	 */
128 	lowest_good_caslat = 0;
129 	temp2 = 0;
130 	while (temp1) {
131 		not_ok = 0;
132 		temp2 =  __ilog2(temp1);
133 		debug("checking common caslat = %u\n", temp2);
134 
135 		/* Check if this CAS latency will work on all DIMMs at tCK. */
136 		for (i = 0; i < number_of_dimms; i++) {
137 			if (!dimm_params[i].n_ranks)
138 				continue;
139 
140 			if (dimm_params[i].caslat_x == temp2) {
141 				if (mclk_ps >= dimm_params[i].tckmin_x_ps) {
142 					debug("CL = %u ok on DIMM %u at tCK=%u ps with tCKmin_X_ps of %u\n",
143 					      temp2, i, mclk_ps,
144 					      dimm_params[i].tckmin_x_ps);
145 					continue;
146 				} else {
147 					not_ok++;
148 				}
149 			}
150 
151 			if (dimm_params[i].caslat_x_minus_1 == temp2) {
152 				unsigned int tckmin_x_minus_1_ps
153 					= dimm_params[i].tckmin_x_minus_1_ps;
154 				if (mclk_ps >= tckmin_x_minus_1_ps) {
155 					debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_1_ps of %u\n",
156 					      temp2, i, mclk_ps,
157 					      tckmin_x_minus_1_ps);
158 					continue;
159 				} else {
160 					not_ok++;
161 				}
162 			}
163 
164 			if (dimm_params[i].caslat_x_minus_2 == temp2) {
165 				unsigned int tckmin_x_minus_2_ps
166 					= dimm_params[i].tckmin_x_minus_2_ps;
167 				if (mclk_ps >= tckmin_x_minus_2_ps) {
168 					debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_2_ps of %u\n",
169 					      temp2, i, mclk_ps,
170 					      tckmin_x_minus_2_ps);
171 					continue;
172 				} else {
173 					not_ok++;
174 				}
175 			}
176 		}
177 
178 		if (!not_ok)
179 			lowest_good_caslat = temp2;
180 
181 		temp1 &= ~(1 << temp2);
182 	}
183 
184 	debug("lowest common SPD-defined CAS latency = %u\n",
185 	      lowest_good_caslat);
186 	outpdimm->lowest_common_spd_caslat = lowest_good_caslat;
187 
188 
189 	/*
190 	 * Compute a common 'de-rated' CAS latency.
191 	 *
192 	 * The strategy here is to find the *highest* dereated cas latency
193 	 * with the assumption that all of the DIMMs will support a dereated
194 	 * CAS latency higher than or equal to their lowest dereated value.
195 	 */
196 	temp1 = 0;
197 	for (i = 0; i < number_of_dimms; i++)
198 		temp1 = max(temp1, dimm_params[i].caslat_lowest_derated);
199 
200 	outpdimm->highest_common_derated_caslat = temp1;
201 	debug("highest common dereated CAS latency = %u\n", temp1);
202 
203 	return 0;
204 }
205 #endif
206 
207 /*
208  * compute_lowest_common_dimm_parameters()
209  *
210  * Determine the worst-case DIMM timing parameters from the set of DIMMs
211  * whose parameters have been computed into the array pointed to
212  * by dimm_params.
213  */
214 unsigned int
215 compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
216 				      common_timing_params_t *outpdimm,
217 				      const unsigned int number_of_dimms)
218 {
219 	unsigned int i, j;
220 
221 	unsigned int tckmin_x_ps = 0;
222 	unsigned int tckmax_ps = 0xFFFFFFFF;
223 	unsigned int trcd_ps = 0;
224 	unsigned int trp_ps = 0;
225 	unsigned int tras_ps = 0;
226 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
227 	unsigned int taamin_ps = 0;
228 #endif
229 #ifdef CONFIG_SYS_FSL_DDR4
230 	unsigned int twr_ps = 15000;
231 	unsigned int trfc1_ps = 0;
232 	unsigned int trfc2_ps = 0;
233 	unsigned int trfc4_ps = 0;
234 	unsigned int trrds_ps = 0;
235 	unsigned int trrdl_ps = 0;
236 	unsigned int tccdl_ps = 0;
237 #else
238 	unsigned int twr_ps = 0;
239 	unsigned int twtr_ps = 0;
240 	unsigned int trfc_ps = 0;
241 	unsigned int trrd_ps = 0;
242 	unsigned int trtp_ps = 0;
243 #endif
244 	unsigned int trc_ps = 0;
245 	unsigned int refresh_rate_ps = 0;
246 	unsigned int extended_op_srt = 1;
247 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
248 	unsigned int tis_ps = 0;
249 	unsigned int tih_ps = 0;
250 	unsigned int tds_ps = 0;
251 	unsigned int tdh_ps = 0;
252 	unsigned int tdqsq_max_ps = 0;
253 	unsigned int tqhs_ps = 0;
254 #endif
255 	unsigned int temp1, temp2;
256 	unsigned int additive_latency = 0;
257 
258 	temp1 = 0;
259 	for (i = 0; i < number_of_dimms; i++) {
260 		/*
261 		 * If there are no ranks on this DIMM,
262 		 * it probably doesn't exist, so skip it.
263 		 */
264 		if (dimm_params[i].n_ranks == 0) {
265 			temp1++;
266 			continue;
267 		}
268 		if (dimm_params[i].n_ranks == 4 && i != 0) {
269 			printf("Found Quad-rank DIMM in wrong bank, ignored."
270 				" Software may not run as expected.\n");
271 			temp1++;
272 			continue;
273 		}
274 
275 		/*
276 		 * check if quad-rank DIMM is plugged if
277 		 * CONFIG_CHIP_SELECT_QUAD_CAPABLE is not defined
278 		 * Only the board with proper design is capable
279 		 */
280 #ifndef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
281 		if (dimm_params[i].n_ranks == 4 && \
282 		  CONFIG_CHIP_SELECTS_PER_CTRL/CONFIG_DIMM_SLOTS_PER_CTLR < 4) {
283 			printf("Found Quad-rank DIMM, not able to support.");
284 			temp1++;
285 			continue;
286 		}
287 #endif
288 		/*
289 		 * Find minimum tckmax_ps to find fastest slow speed,
290 		 * i.e., this is the slowest the whole system can go.
291 		 */
292 		tckmax_ps = min(tckmax_ps,
293 				(unsigned int)dimm_params[i].tckmax_ps);
294 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
295 		taamin_ps = max(taamin_ps,
296 				(unsigned int)dimm_params[i].taa_ps);
297 #endif
298 		tckmin_x_ps = max(tckmin_x_ps,
299 				  (unsigned int)dimm_params[i].tckmin_x_ps);
300 		trcd_ps = max(trcd_ps, (unsigned int)dimm_params[i].trcd_ps);
301 		trp_ps = max(trp_ps, (unsigned int)dimm_params[i].trp_ps);
302 		tras_ps = max(tras_ps, (unsigned int)dimm_params[i].tras_ps);
303 #ifdef CONFIG_SYS_FSL_DDR4
304 		trfc1_ps = max(trfc1_ps,
305 			       (unsigned int)dimm_params[i].trfc1_ps);
306 		trfc2_ps = max(trfc2_ps,
307 			       (unsigned int)dimm_params[i].trfc2_ps);
308 		trfc4_ps = max(trfc4_ps,
309 			       (unsigned int)dimm_params[i].trfc4_ps);
310 		trrds_ps = max(trrds_ps,
311 			       (unsigned int)dimm_params[i].trrds_ps);
312 		trrdl_ps = max(trrdl_ps,
313 			       (unsigned int)dimm_params[i].trrdl_ps);
314 		tccdl_ps = max(tccdl_ps,
315 			       (unsigned int)dimm_params[i].tccdl_ps);
316 #else
317 		twr_ps = max(twr_ps, (unsigned int)dimm_params[i].twr_ps);
318 		twtr_ps = max(twtr_ps, (unsigned int)dimm_params[i].twtr_ps);
319 		trfc_ps = max(trfc_ps, (unsigned int)dimm_params[i].trfc_ps);
320 		trrd_ps = max(trrd_ps, (unsigned int)dimm_params[i].trrd_ps);
321 		trtp_ps = max(trtp_ps, (unsigned int)dimm_params[i].trtp_ps);
322 #endif
323 		trc_ps = max(trc_ps, (unsigned int)dimm_params[i].trc_ps);
324 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
325 		tis_ps = max(tis_ps, (unsigned int)dimm_params[i].tis_ps);
326 		tih_ps = max(tih_ps, (unsigned int)dimm_params[i].tih_ps);
327 		tds_ps = max(tds_ps, (unsigned int)dimm_params[i].tds_ps);
328 		tdh_ps = max(tdh_ps, (unsigned int)dimm_params[i].tdh_ps);
329 		tqhs_ps = max(tqhs_ps, (unsigned int)dimm_params[i].tqhs_ps);
330 		/*
331 		 * Find maximum tdqsq_max_ps to find slowest.
332 		 *
333 		 * FIXME: is finding the slowest value the correct
334 		 * strategy for this parameter?
335 		 */
336 		tdqsq_max_ps = max(tdqsq_max_ps,
337 				   (unsigned int)dimm_params[i].tdqsq_max_ps);
338 #endif
339 		refresh_rate_ps = max(refresh_rate_ps,
340 				      (unsigned int)dimm_params[i].refresh_rate_ps);
341 		/* extended_op_srt is either 0 or 1, 0 having priority */
342 		extended_op_srt = min(extended_op_srt,
343 				      (unsigned int)dimm_params[i].extended_op_srt);
344 	}
345 
346 	outpdimm->ndimms_present = number_of_dimms - temp1;
347 
348 	if (temp1 == number_of_dimms) {
349 		debug("no dimms this memory controller\n");
350 		return 0;
351 	}
352 
353 	outpdimm->tckmin_x_ps = tckmin_x_ps;
354 	outpdimm->tckmax_ps = tckmax_ps;
355 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
356 	outpdimm->taamin_ps = taamin_ps;
357 #endif
358 	outpdimm->trcd_ps = trcd_ps;
359 	outpdimm->trp_ps = trp_ps;
360 	outpdimm->tras_ps = tras_ps;
361 #ifdef CONFIG_SYS_FSL_DDR4
362 	outpdimm->trfc1_ps = trfc1_ps;
363 	outpdimm->trfc2_ps = trfc2_ps;
364 	outpdimm->trfc4_ps = trfc4_ps;
365 	outpdimm->trrds_ps = trrds_ps;
366 	outpdimm->trrdl_ps = trrdl_ps;
367 	outpdimm->tccdl_ps = tccdl_ps;
368 #else
369 	outpdimm->twtr_ps = twtr_ps;
370 	outpdimm->trfc_ps = trfc_ps;
371 	outpdimm->trrd_ps = trrd_ps;
372 	outpdimm->trtp_ps = trtp_ps;
373 #endif
374 	outpdimm->twr_ps = twr_ps;
375 	outpdimm->trc_ps = trc_ps;
376 	outpdimm->refresh_rate_ps = refresh_rate_ps;
377 	outpdimm->extended_op_srt = extended_op_srt;
378 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
379 	outpdimm->tis_ps = tis_ps;
380 	outpdimm->tih_ps = tih_ps;
381 	outpdimm->tds_ps = tds_ps;
382 	outpdimm->tdh_ps = tdh_ps;
383 	outpdimm->tdqsq_max_ps = tdqsq_max_ps;
384 	outpdimm->tqhs_ps = tqhs_ps;
385 #endif
386 
387 	/* Determine common burst length for all DIMMs. */
388 	temp1 = 0xff;
389 	for (i = 0; i < number_of_dimms; i++) {
390 		if (dimm_params[i].n_ranks) {
391 			temp1 &= dimm_params[i].burst_lengths_bitmask;
392 		}
393 	}
394 	outpdimm->all_dimms_burst_lengths_bitmask = temp1;
395 
396 	/* Determine if all DIMMs registered buffered. */
397 	temp1 = temp2 = 0;
398 	for (i = 0; i < number_of_dimms; i++) {
399 		if (dimm_params[i].n_ranks) {
400 			if (dimm_params[i].registered_dimm) {
401 				temp1 = 1;
402 #ifndef CONFIG_SPL_BUILD
403 				printf("Detected RDIMM %s\n",
404 					dimm_params[i].mpart);
405 #endif
406 			} else {
407 				temp2 = 1;
408 #ifndef CONFIG_SPL_BUILD
409 				printf("Detected UDIMM %s\n",
410 					dimm_params[i].mpart);
411 #endif
412 			}
413 		}
414 	}
415 
416 	outpdimm->all_dimms_registered = 0;
417 	outpdimm->all_dimms_unbuffered = 0;
418 	if (temp1 && !temp2) {
419 		outpdimm->all_dimms_registered = 1;
420 	} else if (!temp1 && temp2) {
421 		outpdimm->all_dimms_unbuffered = 1;
422 	} else {
423 		printf("ERROR:  Mix of registered buffered and unbuffered "
424 				"DIMMs detected!\n");
425 	}
426 
427 	temp1 = 0;
428 	if (outpdimm->all_dimms_registered)
429 		for (j = 0; j < 16; j++) {
430 			outpdimm->rcw[j] = dimm_params[0].rcw[j];
431 			for (i = 1; i < number_of_dimms; i++) {
432 				if (!dimm_params[i].n_ranks)
433 					continue;
434 				if (dimm_params[i].rcw[j] != dimm_params[0].rcw[j]) {
435 					temp1 = 1;
436 					break;
437 				}
438 			}
439 		}
440 
441 	if (temp1 != 0)
442 		printf("ERROR: Mix different RDIMM detected!\n");
443 
444 	/* calculate cas latency for all DDR types */
445 	if (compute_cas_latency(dimm_params, outpdimm, number_of_dimms))
446 		return 1;
447 
448 	/* Determine if all DIMMs ECC capable. */
449 	temp1 = 1;
450 	for (i = 0; i < number_of_dimms; i++) {
451 		if (dimm_params[i].n_ranks &&
452 			!(dimm_params[i].edc_config & EDC_ECC)) {
453 			temp1 = 0;
454 			break;
455 		}
456 	}
457 	if (temp1) {
458 		debug("all DIMMs ECC capable\n");
459 	} else {
460 		debug("Warning: not all DIMMs ECC capable, cant enable ECC\n");
461 	}
462 	outpdimm->all_dimms_ecc_capable = temp1;
463 
464 	/*
465 	 * Compute additive latency.
466 	 *
467 	 * For DDR1, additive latency should be 0.
468 	 *
469 	 * For DDR2, with ODT enabled, use "a value" less than ACTTORW,
470 	 *	which comes from Trcd, and also note that:
471 	 *	    add_lat + caslat must be >= 4
472 	 *
473 	 * For DDR3, we use the AL=0
474 	 *
475 	 * When to use additive latency for DDR2:
476 	 *
477 	 * I. Because you are using CL=3 and need to do ODT on writes and
478 	 *    want functionality.
479 	 *    1. Are you going to use ODT? (Does your board not have
480 	 *      additional termination circuitry for DQ, DQS, DQS_,
481 	 *      DM, RDQS, RDQS_ for x4/x8 configs?)
482 	 *    2. If so, is your lowest supported CL going to be 3?
483 	 *    3. If so, then you must set AL=1 because
484 	 *
485 	 *       WL >= 3 for ODT on writes
486 	 *       RL = AL + CL
487 	 *       WL = RL - 1
488 	 *       ->
489 	 *       WL = AL + CL - 1
490 	 *       AL + CL - 1 >= 3
491 	 *       AL + CL >= 4
492 	 *  QED
493 	 *
494 	 *  RL >= 3 for ODT on reads
495 	 *  RL = AL + CL
496 	 *
497 	 *  Since CL aren't usually less than 2, AL=0 is a minimum,
498 	 *  so the WL-derived AL should be the  -- FIXME?
499 	 *
500 	 * II. Because you are using auto-precharge globally and want to
501 	 *     use additive latency (posted CAS) to get more bandwidth.
502 	 *     1. Are you going to use auto-precharge mode globally?
503 	 *
504 	 *        Use addtivie latency and compute AL to be 1 cycle less than
505 	 *        tRCD, i.e. the READ or WRITE command is in the cycle
506 	 *        immediately following the ACTIVATE command..
507 	 *
508 	 * III. Because you feel like it or want to do some sort of
509 	 *      degraded-performance experiment.
510 	 *     1.  Do you just want to use additive latency because you feel
511 	 *         like it?
512 	 *
513 	 * Validation:  AL is less than tRCD, and within the other
514 	 * read-to-precharge constraints.
515 	 */
516 
517 	additive_latency = 0;
518 
519 #if defined(CONFIG_SYS_FSL_DDR2)
520 	if ((outpdimm->lowest_common_spd_caslat < 4) &&
521 	    (picos_to_mclk(trcd_ps) > outpdimm->lowest_common_spd_caslat)) {
522 		additive_latency = picos_to_mclk(trcd_ps) -
523 				   outpdimm->lowest_common_spd_caslat;
524 		if (mclk_to_picos(additive_latency) > trcd_ps) {
525 			additive_latency = picos_to_mclk(trcd_ps);
526 			debug("setting additive_latency to %u because it was "
527 				" greater than tRCD_ps\n", additive_latency);
528 		}
529 	}
530 #endif
531 
532 	/*
533 	 * Validate additive latency
534 	 *
535 	 * AL <= tRCD(min)
536 	 */
537 	if (mclk_to_picos(additive_latency) > trcd_ps) {
538 		printf("Error: invalid additive latency exceeds tRCD(min).\n");
539 		return 1;
540 	}
541 
542 	/*
543 	 * RL = CL + AL;  RL >= 3 for ODT_RD_CFG to be enabled
544 	 * WL = RL - 1;  WL >= 3 for ODT_WL_CFG to be enabled
545 	 * ADD_LAT (the register) must be set to a value less
546 	 * than ACTTORW if WL = 1, then AL must be set to 1
547 	 * RD_TO_PRE (the register) must be set to a minimum
548 	 * tRTP + AL if AL is nonzero
549 	 */
550 
551 	/*
552 	 * Additive latency will be applied only if the memctl option to
553 	 * use it.
554 	 */
555 	outpdimm->additive_latency = additive_latency;
556 
557 	debug("tCKmin_ps = %u\n", outpdimm->tckmin_x_ps);
558 	debug("trcd_ps   = %u\n", outpdimm->trcd_ps);
559 	debug("trp_ps    = %u\n", outpdimm->trp_ps);
560 	debug("tras_ps   = %u\n", outpdimm->tras_ps);
561 #ifdef CONFIG_SYS_FSL_DDR4
562 	debug("trfc1_ps = %u\n", trfc1_ps);
563 	debug("trfc2_ps = %u\n", trfc2_ps);
564 	debug("trfc4_ps = %u\n", trfc4_ps);
565 	debug("trrds_ps = %u\n", trrds_ps);
566 	debug("trrdl_ps = %u\n", trrdl_ps);
567 	debug("tccdl_ps = %u\n", tccdl_ps);
568 #else
569 	debug("twtr_ps   = %u\n", outpdimm->twtr_ps);
570 	debug("trfc_ps   = %u\n", outpdimm->trfc_ps);
571 	debug("trrd_ps   = %u\n", outpdimm->trrd_ps);
572 #endif
573 	debug("twr_ps    = %u\n", outpdimm->twr_ps);
574 	debug("trc_ps    = %u\n", outpdimm->trc_ps);
575 
576 	return 0;
577 }
578