1 /* 2 * Copyright 2008 Freescale Semiconductor, Inc. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 */ 6 7 #include <common.h> 8 #include <fsl_ddr_sdram.h> 9 10 #include <fsl_ddr.h> 11 12 /* 13 * Calculate the Density of each Physical Rank. 14 * Returned size is in bytes. 15 * 16 * Study these table from Byte 31 of JEDEC SPD Spec. 17 * 18 * DDR I DDR II 19 * Bit Size Size 20 * --- ----- ------ 21 * 7 high 512MB 512MB 22 * 6 256MB 256MB 23 * 5 128MB 128MB 24 * 4 64MB 16GB 25 * 3 32MB 8GB 26 * 2 16MB 4GB 27 * 1 2GB 2GB 28 * 0 low 1GB 1GB 29 * 30 * Reorder Table to be linear by stripping the bottom 31 * 2 or 5 bits off and shifting them up to the top. 32 */ 33 34 static unsigned long long 35 compute_ranksize(unsigned int mem_type, unsigned char row_dens) 36 { 37 unsigned long long bsize; 38 39 /* Bottom 2 bits up to the top. */ 40 bsize = ((row_dens >> 2) | ((row_dens & 3) << 6)); 41 bsize <<= 24ULL; 42 debug("DDR: DDR I rank density = 0x%16llx\n", bsize); 43 44 return bsize; 45 } 46 47 /* 48 * Convert a two-nibble BCD value into a cycle time. 49 * While the spec calls for nano-seconds, picos are returned. 50 * 51 * This implements the tables for bytes 9, 23 and 25 for both 52 * DDR I and II. No allowance for distinguishing the invalid 53 * fields absent for DDR I yet present in DDR II is made. 54 * (That is, cycle times of .25, .33, .66 and .75 ns are 55 * allowed for both DDR II and I.) 56 */ 57 static unsigned int 58 convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val) 59 { 60 /* Table look up the lower nibble, allow DDR I & II. */ 61 unsigned int tenths_ps[16] = { 62 0, 63 100, 64 200, 65 300, 66 400, 67 500, 68 600, 69 700, 70 800, 71 900, 72 250, /* This and the next 3 entries valid ... */ 73 330, /* ... only for tCK calculations. */ 74 660, 75 750, 76 0, /* undefined */ 77 0 /* undefined */ 78 }; 79 80 unsigned int whole_ns = (spd_val & 0xF0) >> 4; 81 unsigned int tenth_ns = spd_val & 0x0F; 82 unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns]; 83 84 return ps; 85 } 86 87 static unsigned int 88 convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val) 89 { 90 unsigned int tenth_ns = (spd_val & 0xF0) >> 4; 91 unsigned int hundredth_ns = spd_val & 0x0F; 92 unsigned int ps = tenth_ns * 100 + hundredth_ns * 10; 93 94 return ps; 95 } 96 97 static unsigned int byte40_table_ps[8] = { 98 0, 99 250, 100 330, 101 500, 102 660, 103 750, 104 0, /* supposed to be RFC, but not sure what that means */ 105 0 /* Undefined */ 106 }; 107 108 static unsigned int 109 compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc) 110 { 111 return ((trctrfc_ext & 0x1) * 256 + trfc) * 1000 112 + byte40_table_ps[(trctrfc_ext >> 1) & 0x7]; 113 } 114 115 static unsigned int 116 compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc) 117 { 118 return trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7]; 119 } 120 121 /* 122 * tCKmax from DDR I SPD Byte 43 123 * 124 * Bits 7:2 == whole ns 125 * Bits 1:0 == quarter ns 126 * 00 == 0.00 ns 127 * 01 == 0.25 ns 128 * 10 == 0.50 ns 129 * 11 == 0.75 ns 130 * 131 * Returns picoseconds. 132 */ 133 static unsigned int 134 compute_tckmax_from_spd_ps(unsigned int byte43) 135 { 136 return (byte43 >> 2) * 1000 + (byte43 & 0x3) * 250; 137 } 138 139 /* 140 * Determine Refresh Rate. Ignore self refresh bit on DDR I. 141 * Table from SPD Spec, Byte 12, converted to picoseconds and 142 * filled in with "default" normal values. 143 */ 144 static unsigned int 145 determine_refresh_rate_ps(const unsigned int spd_refresh) 146 { 147 unsigned int refresh_time_ps[8] = { 148 15625000, /* 0 Normal 1.00x */ 149 3900000, /* 1 Reduced .25x */ 150 7800000, /* 2 Extended .50x */ 151 31300000, /* 3 Extended 2.00x */ 152 62500000, /* 4 Extended 4.00x */ 153 125000000, /* 5 Extended 8.00x */ 154 15625000, /* 6 Normal 1.00x filler */ 155 15625000, /* 7 Normal 1.00x filler */ 156 }; 157 158 return refresh_time_ps[spd_refresh & 0x7]; 159 } 160 161 /* 162 * The purpose of this function is to compute a suitable 163 * CAS latency given the DRAM clock period. The SPD only 164 * defines at most 3 CAS latencies. Typically the slower in 165 * frequency the DIMM runs at, the shorter its CAS latency can be. 166 * If the DIMM is operating at a sufficiently low frequency, 167 * it may be able to run at a CAS latency shorter than the 168 * shortest SPD-defined CAS latency. 169 * 170 * If a CAS latency is not found, 0 is returned. 171 * 172 * Do this by finding in the standard speed bin table the longest 173 * tCKmin that doesn't exceed the value of mclk_ps (tCK). 174 * 175 * An assumption made is that the SDRAM device allows the 176 * CL to be programmed for a value that is lower than those 177 * advertised by the SPD. This is not always the case, 178 * as those modes not defined in the SPD are optional. 179 * 180 * CAS latency de-rating based upon values JEDEC Standard No. 79-E 181 * Table 11. 182 * 183 * ordinal 2, ddr1_speed_bins[1] contains tCK for CL=2 184 */ 185 /* CL2.0 CL2.5 CL3.0 */ 186 unsigned short ddr1_speed_bins[] = {0, 7500, 6000, 5000 }; 187 188 unsigned int 189 compute_derated_DDR1_CAS_latency(unsigned int mclk_ps) 190 { 191 const unsigned int num_speed_bins = ARRAY_SIZE(ddr1_speed_bins); 192 unsigned int lowest_tCKmin_found = 0; 193 unsigned int lowest_tCKmin_CL = 0; 194 unsigned int i; 195 196 debug("mclk_ps = %u\n", mclk_ps); 197 198 for (i = 0; i < num_speed_bins; i++) { 199 unsigned int x = ddr1_speed_bins[i]; 200 debug("i=%u, x = %u, lowest_tCKmin_found = %u\n", 201 i, x, lowest_tCKmin_found); 202 if (x && lowest_tCKmin_found <= x && x <= mclk_ps) { 203 lowest_tCKmin_found = x; 204 lowest_tCKmin_CL = i + 1; 205 } 206 } 207 208 debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL); 209 210 return lowest_tCKmin_CL; 211 } 212 213 /* 214 * ddr_compute_dimm_parameters for DDR1 SPD 215 * 216 * Compute DIMM parameters based upon the SPD information in spd. 217 * Writes the results to the dimm_params_t structure pointed by pdimm. 218 * 219 * FIXME: use #define for the retvals 220 */ 221 unsigned int ddr_compute_dimm_parameters(const unsigned int ctrl_num, 222 const ddr1_spd_eeprom_t *spd, 223 dimm_params_t *pdimm, 224 unsigned int dimm_number) 225 { 226 unsigned int retval; 227 228 if (spd->mem_type) { 229 if (spd->mem_type != SPD_MEMTYPE_DDR) { 230 printf("DIMM %u: is not a DDR1 SPD.\n", dimm_number); 231 return 1; 232 } 233 } else { 234 memset(pdimm, 0, sizeof(dimm_params_t)); 235 return 1; 236 } 237 238 retval = ddr1_spd_check(spd); 239 if (retval) { 240 printf("DIMM %u: failed checksum\n", dimm_number); 241 return 2; 242 } 243 244 /* 245 * The part name in ASCII in the SPD EEPROM is not null terminated. 246 * Guarantee null termination here by presetting all bytes to 0 247 * and copying the part name in ASCII from the SPD onto it 248 */ 249 memset(pdimm->mpart, 0, sizeof(pdimm->mpart)); 250 memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1); 251 252 /* DIMM organization parameters */ 253 pdimm->n_ranks = spd->nrows; 254 pdimm->rank_density = compute_ranksize(spd->mem_type, spd->bank_dens); 255 pdimm->capacity = pdimm->n_ranks * pdimm->rank_density; 256 pdimm->data_width = spd->dataw_lsb; 257 pdimm->primary_sdram_width = spd->primw; 258 pdimm->ec_sdram_width = spd->ecw; 259 260 /* 261 * FIXME: Need to determine registered_dimm status. 262 * 1 == register buffered 263 * 0 == unbuffered 264 */ 265 pdimm->registered_dimm = 0; /* unbuffered */ 266 267 /* SDRAM device parameters */ 268 pdimm->n_row_addr = spd->nrow_addr; 269 pdimm->n_col_addr = spd->ncol_addr; 270 pdimm->n_banks_per_sdram_device = spd->nbanks; 271 pdimm->edc_config = spd->config; 272 pdimm->burst_lengths_bitmask = spd->burstl; 273 274 /* 275 * Calculate the Maximum Data Rate based on the Minimum Cycle time. 276 * The SPD clk_cycle field (tCKmin) is measured in tenths of 277 * nanoseconds and represented as BCD. 278 */ 279 pdimm->tckmin_x_ps 280 = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle); 281 pdimm->tckmin_x_minus_1_ps 282 = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2); 283 pdimm->tckmin_x_minus_2_ps 284 = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3); 285 286 pdimm->tckmax_ps = compute_tckmax_from_spd_ps(spd->tckmax); 287 288 /* 289 * Compute CAS latencies defined by SPD 290 * The SPD caslat_x should have at least 1 and at most 3 bits set. 291 * 292 * If cas_lat after masking is 0, the __ilog2 function returns 293 * 255 into the variable. This behavior is abused once. 294 */ 295 pdimm->caslat_x = __ilog2(spd->cas_lat); 296 pdimm->caslat_x_minus_1 = __ilog2(spd->cas_lat 297 & ~(1 << pdimm->caslat_x)); 298 pdimm->caslat_x_minus_2 = __ilog2(spd->cas_lat 299 & ~(1 << pdimm->caslat_x) 300 & ~(1 << pdimm->caslat_x_minus_1)); 301 302 /* Compute CAS latencies below that defined by SPD */ 303 pdimm->caslat_lowest_derated = compute_derated_DDR1_CAS_latency( 304 get_memory_clk_period_ps(ctrl_num)); 305 306 /* Compute timing parameters */ 307 pdimm->trcd_ps = spd->trcd * 250; 308 pdimm->trp_ps = spd->trp * 250; 309 pdimm->tras_ps = spd->tras * 1000; 310 311 pdimm->twr_ps = mclk_to_picos(ctrl_num, 3); 312 pdimm->twtr_ps = mclk_to_picos(ctrl_num, 1); 313 pdimm->trfc_ps = compute_trfc_ps_from_spd(0, spd->trfc); 314 315 pdimm->trrd_ps = spd->trrd * 250; 316 pdimm->trc_ps = compute_trc_ps_from_spd(0, spd->trc); 317 318 pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh); 319 320 pdimm->tis_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup); 321 pdimm->tih_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold); 322 pdimm->tds_ps 323 = convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup); 324 pdimm->tdh_ps 325 = convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold); 326 327 pdimm->trtp_ps = mclk_to_picos(ctrl_num, 2); /* By the book. */ 328 pdimm->tdqsq_max_ps = spd->tdqsq * 10; 329 pdimm->tqhs_ps = spd->tqhs * 10; 330 331 return 0; 332 } 333