1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Marvell International Ltd. and its affiliates 4 */ 5 #include "ddr_ml_wrapper.h" 6 #include "mv_ddr_plat.h" 7 8 #include "mv_ddr_topology.h" 9 #include "mv_ddr_common.h" 10 #include "mv_ddr_spd.h" 11 #include "ddr_topology_def.h" 12 #include "ddr3_training_ip_db.h" 13 #include "ddr3_training_ip.h" 14 #include "mv_ddr_training_db.h" 15 16 unsigned int mv_ddr_cl_calc(unsigned int taa_min, unsigned int tclk) 17 { 18 unsigned int cl = ceil_div(taa_min, tclk); 19 20 return mv_ddr_spd_supported_cl_get(cl); 21 22 } 23 24 unsigned int mv_ddr_cwl_calc(unsigned int tclk) 25 { 26 unsigned int cwl; 27 28 if (tclk >= 1250) 29 cwl = 9; 30 else if (tclk >= 1071) 31 cwl = 10; 32 else if (tclk >= 938) 33 cwl = 11; 34 else if (tclk >= 833) 35 cwl = 12; 36 else if (tclk >= 750) 37 cwl = 14; 38 else if (tclk >= 625) 39 cwl = 16; 40 else 41 cwl = 0; 42 43 return cwl; 44 } 45 46 int mv_ddr_topology_map_update(void) 47 { 48 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 49 struct if_params *iface_params = &(tm->interface_params[0]); 50 unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); 51 enum mv_ddr_speed_bin speed_bin_index; 52 enum mv_ddr_freq freq = MV_DDR_FREQ_LAST; 53 unsigned int tclk; 54 unsigned char val = 0; 55 int i; 56 57 if (iface_params->memory_freq == MV_DDR_FREQ_SAR) 58 iface_params->memory_freq = mv_ddr_init_freq_get(); 59 60 if (tm->cfg_src == MV_DDR_CFG_SPD) { 61 /* check dram device type */ 62 val = mv_ddr_spd_dev_type_get(&tm->spd_data); 63 if (val != MV_DDR_SPD_DEV_TYPE_DDR4) { 64 printf("mv_ddr: unsupported dram device type found\n"); 65 return -1; 66 } 67 68 /* update topology map with timing data */ 69 if (mv_ddr_spd_timing_calc(&tm->spd_data, tm->timing_data) > 0) { 70 printf("mv_ddr: negative timing data found\n"); 71 return -1; 72 } 73 74 /* update device width in topology map */ 75 iface_params->bus_width = mv_ddr_spd_dev_width_get(&tm->spd_data); 76 77 /* update die capacity in topology map */ 78 iface_params->memory_size = mv_ddr_spd_die_capacity_get(&tm->spd_data); 79 80 /* update bus bit mask in topology map */ 81 tm->bus_act_mask = mv_ddr_bus_bit_mask_get(); 82 83 /* update cs bit mask in topology map */ 84 val = mv_ddr_spd_cs_bit_mask_get(&tm->spd_data); 85 for (i = 0; i < octets_per_if_num; i++) 86 iface_params->as_bus_params[i].cs_bitmask = val; 87 88 /* check dram module type */ 89 val = mv_ddr_spd_module_type_get(&tm->spd_data); 90 switch (val) { 91 case MV_DDR_SPD_MODULE_TYPE_UDIMM: 92 case MV_DDR_SPD_MODULE_TYPE_SO_DIMM: 93 case MV_DDR_SPD_MODULE_TYPE_MINI_UDIMM: 94 case MV_DDR_SPD_MODULE_TYPE_72BIT_SO_UDIMM: 95 case MV_DDR_SPD_MODULE_TYPE_16BIT_SO_DIMM: 96 case MV_DDR_SPD_MODULE_TYPE_32BIT_SO_DIMM: 97 break; 98 default: 99 printf("mv_ddr: unsupported dram module type found\n"); 100 return -1; 101 } 102 103 /* update mirror bit mask in topology map */ 104 val = mv_ddr_spd_mem_mirror_get(&tm->spd_data); 105 for (i = 0; i < octets_per_if_num; i++) 106 iface_params->as_bus_params[i].mirror_enable_bitmask = val << 1; 107 108 tclk = 1000000 / mv_ddr_freq_get(iface_params->memory_freq); 109 /* update cas write latency (cwl) */ 110 val = mv_ddr_cwl_calc(tclk); 111 if (val == 0) { 112 printf("mv_ddr: unsupported cas write latency value found\n"); 113 return -1; 114 } 115 iface_params->cas_wl = val; 116 117 /* update cas latency (cl) */ 118 mv_ddr_spd_supported_cls_calc(&tm->spd_data); 119 val = mv_ddr_cl_calc(tm->timing_data[MV_DDR_TAA_MIN], tclk); 120 if (val == 0) { 121 printf("mv_ddr: unsupported cas latency value found\n"); 122 return -1; 123 } 124 iface_params->cas_l = val; 125 } else if (tm->cfg_src == MV_DDR_CFG_DEFAULT) { 126 /* set cas and cas-write latencies per speed bin, if they unset */ 127 speed_bin_index = iface_params->speed_bin_index; 128 freq = iface_params->memory_freq; 129 130 if (iface_params->cas_l == 0) 131 iface_params->cas_l = mv_ddr_cl_val_get(speed_bin_index, freq); 132 133 if (iface_params->cas_wl == 0) 134 iface_params->cas_wl = mv_ddr_cwl_val_get(speed_bin_index, freq); 135 } 136 137 return 0; 138 } 139 140 unsigned short mv_ddr_bus_bit_mask_get(void) 141 { 142 unsigned short pri_and_ext_bus_width = 0x0; 143 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 144 unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); 145 146 if (tm->cfg_src == MV_DDR_CFG_SPD) { 147 enum mv_ddr_pri_bus_width pri_bus_width = mv_ddr_spd_pri_bus_width_get(&tm->spd_data); 148 enum mv_ddr_bus_width_ext bus_width_ext = mv_ddr_spd_bus_width_ext_get(&tm->spd_data); 149 150 switch (pri_bus_width) { 151 case MV_DDR_PRI_BUS_WIDTH_16: 152 pri_and_ext_bus_width = BUS_MASK_16BIT; 153 break; 154 case MV_DDR_PRI_BUS_WIDTH_32: 155 pri_and_ext_bus_width = BUS_MASK_32BIT; 156 break; 157 case MV_DDR_PRI_BUS_WIDTH_64: 158 pri_and_ext_bus_width = MV_DDR_64BIT_BUS_MASK; 159 break; 160 default: 161 pri_and_ext_bus_width = 0x0; 162 } 163 164 if (bus_width_ext == MV_DDR_BUS_WIDTH_EXT_8) 165 pri_and_ext_bus_width |= 1 << (octets_per_if_num - 1); 166 } 167 168 return pri_and_ext_bus_width; 169 } 170 171 unsigned int mv_ddr_if_bus_width_get(void) 172 { 173 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 174 unsigned int bus_width; 175 176 switch (tm->bus_act_mask) { 177 case BUS_MASK_16BIT: 178 case BUS_MASK_16BIT_ECC: 179 case BUS_MASK_16BIT_ECC_PUP3: 180 bus_width = 16; 181 break; 182 case BUS_MASK_32BIT: 183 case BUS_MASK_32BIT_ECC: 184 case MV_DDR_32BIT_ECC_PUP8_BUS_MASK: 185 bus_width = 32; 186 break; 187 case MV_DDR_64BIT_BUS_MASK: 188 case MV_DDR_64BIT_ECC_PUP8_BUS_MASK: 189 bus_width = 64; 190 break; 191 default: 192 printf("mv_ddr: unsupported bus active mask parameter found\n"); 193 bus_width = 0; 194 } 195 196 return bus_width; 197 } 198 199 unsigned int mv_ddr_cs_num_get(void) 200 { 201 unsigned int cs_num = 0; 202 unsigned int cs, sphy; 203 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 204 struct if_params *iface_params = &(tm->interface_params[0]); 205 unsigned int sphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); 206 207 for (sphy = 0; sphy < sphy_max; sphy++) { 208 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sphy); 209 break; 210 } 211 212 for (cs = 0; cs < MAX_CS_NUM; cs++) { 213 VALIDATE_ACTIVE(iface_params->as_bus_params[sphy].cs_bitmask, cs); 214 cs_num++; 215 } 216 217 return cs_num; 218 } 219 220 int mv_ddr_is_ecc_ena(void) 221 { 222 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 223 224 if (DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask) || 225 DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask) || 226 DDR3_IS_ECC_PUP8_MODE(tm->bus_act_mask)) 227 return 1; 228 else 229 return 0; 230 } 231 232 /* translate topology map definition to real memory size in bits */ 233 static unsigned int mem_size[] = { 234 ADDR_SIZE_512MB, 235 ADDR_SIZE_1GB, 236 ADDR_SIZE_2GB, 237 ADDR_SIZE_4GB, 238 ADDR_SIZE_8GB 239 /* TODO: add capacity up to 256GB */ 240 }; 241 242 unsigned long long mv_ddr_mem_sz_per_cs_get(void) 243 { 244 unsigned long long mem_sz_per_cs; 245 unsigned int i, sphys, sphys_per_dunit; 246 unsigned int sphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); 247 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 248 struct if_params *iface_params = &(tm->interface_params[0]); 249 250 /* calc number of active subphys excl. ecc one */ 251 for (i = 0, sphys = 0; i < sphy_max - 1; i++) { 252 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i); 253 sphys++; 254 } 255 256 /* calc number of subphys per ddr unit */ 257 if (iface_params->bus_width == MV_DDR_DEV_WIDTH_8BIT) 258 sphys_per_dunit = MV_DDR_ONE_SPHY_PER_DUNIT; 259 else if (iface_params->bus_width == MV_DDR_DEV_WIDTH_16BIT) 260 sphys_per_dunit = MV_DDR_TWO_SPHY_PER_DUNIT; 261 else { 262 printf("mv_ddr: unsupported bus width type found\n"); 263 return 0; 264 } 265 266 /* calc dram size per cs */ 267 mem_sz_per_cs = (unsigned long long)mem_size[iface_params->memory_size] * 268 (unsigned long long)sphys / 269 (unsigned long long)sphys_per_dunit; 270 271 return mem_sz_per_cs; 272 } 273 274 unsigned long long mv_ddr_mem_sz_get(void) 275 { 276 unsigned long long tot_mem_sz = 0; 277 unsigned long long mem_sz_per_cs = 0; 278 unsigned long long max_cs = mv_ddr_cs_num_get(); 279 280 mem_sz_per_cs = mv_ddr_mem_sz_per_cs_get(); 281 tot_mem_sz = max_cs * mem_sz_per_cs; 282 283 return tot_mem_sz; 284 } 285 286 unsigned int mv_ddr_rtt_nom_get(void) 287 { 288 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 289 unsigned int rtt_nom = tm->edata.mem_edata.rtt_nom; 290 291 if (rtt_nom >= MV_DDR_RTT_NOM_PARK_RZQ_LAST) { 292 printf("error: %s: unsupported rtt_nom parameter found\n", __func__); 293 rtt_nom = PARAM_UNDEFINED; 294 } 295 296 return rtt_nom; 297 } 298 299 unsigned int mv_ddr_rtt_park_get(void) 300 { 301 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 302 unsigned int cs_num = mv_ddr_cs_num_get(); 303 unsigned int rtt_park = MV_DDR_RTT_NOM_PARK_RZQ_LAST; 304 305 if (cs_num > 0 && cs_num <= MAX_CS_NUM) 306 rtt_park = tm->edata.mem_edata.rtt_park[cs_num - 1]; 307 308 if (rtt_park >= MV_DDR_RTT_NOM_PARK_RZQ_LAST) { 309 printf("error: %s: unsupported rtt_park parameter found\n", __func__); 310 rtt_park = PARAM_UNDEFINED; 311 } 312 313 return rtt_park; 314 } 315 316 unsigned int mv_ddr_rtt_wr_get(void) 317 { 318 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 319 unsigned int cs_num = mv_ddr_cs_num_get(); 320 unsigned int rtt_wr = MV_DDR_RTT_WR_RZQ_LAST; 321 322 if (cs_num > 0 && cs_num <= MAX_CS_NUM) 323 rtt_wr = tm->edata.mem_edata.rtt_wr[cs_num - 1]; 324 325 if (rtt_wr >= MV_DDR_RTT_WR_RZQ_LAST) { 326 printf("error: %s: unsupported rtt_wr parameter found\n", __func__); 327 rtt_wr = PARAM_UNDEFINED; 328 } 329 330 return rtt_wr; 331 } 332 333 unsigned int mv_ddr_dic_get(void) 334 { 335 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 336 unsigned int dic = tm->edata.mem_edata.dic; 337 338 if (dic >= MV_DDR_DIC_RZQ_LAST) { 339 printf("error: %s: unsupported dic parameter found\n", __func__); 340 dic = PARAM_UNDEFINED; 341 } 342 343 return dic; 344 } 345