1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module 2 * 3 * This driver supports the memory controllers found on the Intel 4 * processor family Sandy Bridge. 5 * 6 * This file may be distributed under the terms of the 7 * GNU General Public License version 2 only. 8 * 9 * Copyright (c) 2011 by: 10 * Mauro Carvalho Chehab 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/pci.h> 16 #include <linux/pci_ids.h> 17 #include <linux/slab.h> 18 #include <linux/delay.h> 19 #include <linux/edac.h> 20 #include <linux/mmzone.h> 21 #include <linux/smp.h> 22 #include <linux/bitmap.h> 23 #include <linux/math64.h> 24 #include <linux/mod_devicetable.h> 25 #include <asm/cpu_device_id.h> 26 #include <asm/intel-family.h> 27 #include <asm/processor.h> 28 #include <asm/mce.h> 29 30 #include "edac_module.h" 31 32 /* Static vars */ 33 static LIST_HEAD(sbridge_edac_list); 34 35 /* 36 * Alter this version for the module when modifications are made 37 */ 38 #define SBRIDGE_REVISION " Ver: 1.1.1 " 39 #define EDAC_MOD_STR "sbridge_edac" 40 41 /* 42 * Debug macros 43 */ 44 #define sbridge_printk(level, fmt, arg...) \ 45 edac_printk(level, "sbridge", fmt, ##arg) 46 47 #define sbridge_mc_printk(mci, level, fmt, arg...) \ 48 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg) 49 50 /* 51 * Get a bit field at register value <v>, from bit <lo> to bit <hi> 52 */ 53 #define GET_BITFIELD(v, lo, hi) \ 54 (((v) & GENMASK_ULL(hi, lo)) >> (lo)) 55 56 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */ 57 static const u32 sbridge_dram_rule[] = { 58 0x80, 0x88, 0x90, 0x98, 0xa0, 59 0xa8, 0xb0, 0xb8, 0xc0, 0xc8, 60 }; 61 62 static const u32 ibridge_dram_rule[] = { 63 0x60, 0x68, 0x70, 0x78, 0x80, 64 0x88, 0x90, 0x98, 0xa0, 0xa8, 65 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, 66 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, 67 }; 68 69 static const u32 knl_dram_rule[] = { 70 0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */ 71 0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */ 72 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */ 73 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */ 74 0x100, 0x108, 0x110, 0x118, /* 20-23 */ 75 }; 76 77 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0) 78 #define A7MODE(reg) GET_BITFIELD(reg, 26, 26) 79 80 static char *show_dram_attr(u32 attr) 81 { 82 switch (attr) { 83 case 0: 84 return "DRAM"; 85 case 1: 86 return "MMCFG"; 87 case 2: 88 return "NXM"; 89 default: 90 return "unknown"; 91 } 92 } 93 94 static const u32 sbridge_interleave_list[] = { 95 0x84, 0x8c, 0x94, 0x9c, 0xa4, 96 0xac, 0xb4, 0xbc, 0xc4, 0xcc, 97 }; 98 99 static const u32 ibridge_interleave_list[] = { 100 0x64, 0x6c, 0x74, 0x7c, 0x84, 101 0x8c, 0x94, 0x9c, 0xa4, 0xac, 102 0xb4, 0xbc, 0xc4, 0xcc, 0xd4, 103 0xdc, 0xe4, 0xec, 0xf4, 0xfc, 104 }; 105 106 static const u32 knl_interleave_list[] = { 107 0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */ 108 0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */ 109 0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */ 110 0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */ 111 0x104, 0x10c, 0x114, 0x11c, /* 20-23 */ 112 }; 113 114 struct interleave_pkg { 115 unsigned char start; 116 unsigned char end; 117 }; 118 119 static const struct interleave_pkg sbridge_interleave_pkg[] = { 120 { 0, 2 }, 121 { 3, 5 }, 122 { 8, 10 }, 123 { 11, 13 }, 124 { 16, 18 }, 125 { 19, 21 }, 126 { 24, 26 }, 127 { 27, 29 }, 128 }; 129 130 static const struct interleave_pkg ibridge_interleave_pkg[] = { 131 { 0, 3 }, 132 { 4, 7 }, 133 { 8, 11 }, 134 { 12, 15 }, 135 { 16, 19 }, 136 { 20, 23 }, 137 { 24, 27 }, 138 { 28, 31 }, 139 }; 140 141 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, 142 int interleave) 143 { 144 return GET_BITFIELD(reg, table[interleave].start, 145 table[interleave].end); 146 } 147 148 /* Devices 12 Function 7 */ 149 150 #define TOLM 0x80 151 #define TOHM 0x84 152 #define HASWELL_TOLM 0xd0 153 #define HASWELL_TOHM_0 0xd4 154 #define HASWELL_TOHM_1 0xd8 155 #define KNL_TOLM 0xd0 156 #define KNL_TOHM_0 0xd4 157 #define KNL_TOHM_1 0xd8 158 159 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff) 160 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff) 161 162 /* Device 13 Function 6 */ 163 164 #define SAD_TARGET 0xf0 165 166 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11) 167 168 #define SOURCE_ID_KNL(reg) GET_BITFIELD(reg, 12, 14) 169 170 #define SAD_CONTROL 0xf4 171 172 /* Device 14 function 0 */ 173 174 static const u32 tad_dram_rule[] = { 175 0x40, 0x44, 0x48, 0x4c, 176 0x50, 0x54, 0x58, 0x5c, 177 0x60, 0x64, 0x68, 0x6c, 178 }; 179 #define MAX_TAD ARRAY_SIZE(tad_dram_rule) 180 181 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff) 182 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11) 183 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9) 184 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7) 185 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5) 186 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3) 187 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1) 188 189 /* Device 15, function 0 */ 190 191 #define MCMTR 0x7c 192 #define KNL_MCMTR 0x624 193 194 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2) 195 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1) 196 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0) 197 198 /* Device 15, function 1 */ 199 200 #define RASENABLES 0xac 201 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0) 202 203 /* Device 15, functions 2-5 */ 204 205 static const int mtr_regs[] = { 206 0x80, 0x84, 0x88, 207 }; 208 209 static const int knl_mtr_reg = 0xb60; 210 211 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19) 212 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14) 213 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13) 214 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4) 215 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1) 216 217 static const u32 tad_ch_nilv_offset[] = { 218 0x90, 0x94, 0x98, 0x9c, 219 0xa0, 0xa4, 0xa8, 0xac, 220 0xb0, 0xb4, 0xb8, 0xbc, 221 }; 222 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29) 223 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26) 224 225 static const u32 rir_way_limit[] = { 226 0x108, 0x10c, 0x110, 0x114, 0x118, 227 }; 228 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit) 229 230 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31) 231 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29) 232 233 #define MAX_RIR_WAY 8 234 235 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { 236 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c }, 237 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c }, 238 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c }, 239 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c }, 240 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, 241 }; 242 243 #define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \ 244 GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19)) 245 246 #define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \ 247 GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14)) 248 249 /* Device 16, functions 2-7 */ 250 251 /* 252 * FIXME: Implement the error count reads directly 253 */ 254 255 static const u32 correrrcnt[] = { 256 0x104, 0x108, 0x10c, 0x110, 257 }; 258 259 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31) 260 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30) 261 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15) 262 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14) 263 264 static const u32 correrrthrsld[] = { 265 0x11c, 0x120, 0x124, 0x128, 266 }; 267 268 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30) 269 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14) 270 271 272 /* Device 17, function 0 */ 273 274 #define SB_RANK_CFG_A 0x0328 275 276 #define IB_RANK_CFG_A 0x0320 277 278 /* 279 * sbridge structs 280 */ 281 282 #define NUM_CHANNELS 8 /* 2MC per socket, four chan per MC */ 283 #define MAX_DIMMS 3 /* Max DIMMS per channel */ 284 #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ 285 #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ 286 #define KNL_MAX_EDCS 8 /* Embedded DRAM controllers */ 287 #define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */ 288 289 enum type { 290 SANDY_BRIDGE, 291 IVY_BRIDGE, 292 HASWELL, 293 BROADWELL, 294 KNIGHTS_LANDING, 295 }; 296 297 struct sbridge_pvt; 298 struct sbridge_info { 299 enum type type; 300 u32 mcmtr; 301 u32 rankcfgr; 302 u64 (*get_tolm)(struct sbridge_pvt *pvt); 303 u64 (*get_tohm)(struct sbridge_pvt *pvt); 304 u64 (*rir_limit)(u32 reg); 305 u64 (*sad_limit)(u32 reg); 306 u32 (*interleave_mode)(u32 reg); 307 u32 (*dram_attr)(u32 reg); 308 const u32 *dram_rule; 309 const u32 *interleave_list; 310 const struct interleave_pkg *interleave_pkg; 311 u8 max_sad; 312 u8 max_interleave; 313 u8 (*get_node_id)(struct sbridge_pvt *pvt); 314 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt); 315 enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr); 316 struct pci_dev *pci_vtd; 317 }; 318 319 struct sbridge_channel { 320 u32 ranks; 321 u32 dimms; 322 }; 323 324 struct pci_id_descr { 325 int dev_id; 326 int optional; 327 }; 328 329 struct pci_id_table { 330 const struct pci_id_descr *descr; 331 int n_devs; 332 enum type type; 333 }; 334 335 struct sbridge_dev { 336 struct list_head list; 337 u8 bus, mc; 338 u8 node_id, source_id; 339 struct pci_dev **pdev; 340 int n_devs; 341 struct mem_ctl_info *mci; 342 }; 343 344 struct knl_pvt { 345 struct pci_dev *pci_cha[KNL_MAX_CHAS]; 346 struct pci_dev *pci_channel[KNL_MAX_CHANNELS]; 347 struct pci_dev *pci_mc0; 348 struct pci_dev *pci_mc1; 349 struct pci_dev *pci_mc0_misc; 350 struct pci_dev *pci_mc1_misc; 351 struct pci_dev *pci_mc_info; /* tolm, tohm */ 352 }; 353 354 struct sbridge_pvt { 355 struct pci_dev *pci_ta, *pci_ddrio, *pci_ras; 356 struct pci_dev *pci_sad0, *pci_sad1; 357 struct pci_dev *pci_ha0, *pci_ha1; 358 struct pci_dev *pci_br0, *pci_br1; 359 struct pci_dev *pci_ha1_ta; 360 struct pci_dev *pci_tad[NUM_CHANNELS]; 361 362 struct sbridge_dev *sbridge_dev; 363 364 struct sbridge_info info; 365 struct sbridge_channel channel[NUM_CHANNELS]; 366 367 /* Memory type detection */ 368 bool is_mirrored, is_lockstep, is_close_pg; 369 bool is_chan_hash; 370 371 /* Memory description */ 372 u64 tolm, tohm; 373 struct knl_pvt knl; 374 }; 375 376 #define PCI_DESCR(device_id, opt) \ 377 .dev_id = (device_id), \ 378 .optional = opt 379 380 static const struct pci_id_descr pci_dev_descr_sbridge[] = { 381 /* Processor Home Agent */ 382 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) }, 383 384 /* Memory controller */ 385 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) }, 386 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) }, 387 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) }, 388 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) }, 389 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) }, 390 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) }, 391 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) }, 392 393 /* System Address Decoder */ 394 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) }, 395 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) }, 396 397 /* Broadcast Registers */ 398 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, 399 }; 400 401 #define PCI_ID_TABLE_ENTRY(A, T) { \ 402 .descr = A, \ 403 .n_devs = ARRAY_SIZE(A), \ 404 .type = T \ 405 } 406 407 static const struct pci_id_table pci_dev_descr_sbridge_table[] = { 408 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE), 409 {0,} /* 0 terminated list. */ 410 }; 411 412 /* This changes depending if 1HA or 2HA: 413 * 1HA: 414 * 0x0eb8 (17.0) is DDRIO0 415 * 2HA: 416 * 0x0ebc (17.4) is DDRIO0 417 */ 418 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8 419 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc 420 421 /* pci ids */ 422 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0 423 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8 424 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71 425 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa 426 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab 427 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac 428 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead 429 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8 430 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9 431 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca 432 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60 433 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68 434 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79 435 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a 436 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b 437 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2 0x0e6c 438 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3 0x0e6d 439 440 static const struct pci_id_descr pci_dev_descr_ibridge[] = { 441 /* Processor Home Agent */ 442 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) }, 443 444 /* Memory controller */ 445 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) }, 446 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) }, 447 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) }, 448 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) }, 449 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) }, 450 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) }, 451 452 /* System Address Decoder */ 453 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) }, 454 455 /* Broadcast Registers */ 456 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) }, 457 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) }, 458 459 /* Optional, mode 2HA */ 460 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) }, 461 #if 0 462 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) }, 463 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) }, 464 #endif 465 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) }, 466 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) }, 467 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1) }, 468 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1) }, 469 470 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) }, 471 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) }, 472 }; 473 474 static const struct pci_id_table pci_dev_descr_ibridge_table[] = { 475 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE), 476 {0,} /* 0 terminated list. */ 477 }; 478 479 /* Haswell support */ 480 /* EN processor: 481 * - 1 IMC 482 * - 3 DDR3 channels, 2 DPC per channel 483 * EP processor: 484 * - 1 or 2 IMC 485 * - 4 DDR4 channels, 3 DPC per channel 486 * EP 4S processor: 487 * - 2 IMC 488 * - 4 DDR4 channels, 3 DPC per channel 489 * EX processor: 490 * - 2 IMC 491 * - each IMC interfaces with a SMI 2 channel 492 * - each SMI channel interfaces with a scalable memory buffer 493 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC 494 */ 495 #define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */ 496 #define HASWELL_HASYSDEFEATURE2 0x84 497 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28 498 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0 499 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60 500 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8 501 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71 502 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68 503 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79 504 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc 505 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd 506 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa 507 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab 508 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac 509 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad 510 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a 511 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b 512 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c 513 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d 514 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd 515 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf 516 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9 517 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb 518 static const struct pci_id_descr pci_dev_descr_haswell[] = { 519 /* first item must be the HA */ 520 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) }, 521 522 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) }, 523 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) }, 524 525 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) }, 526 527 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) }, 528 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) }, 529 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) }, 530 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) }, 531 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) }, 532 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) }, 533 534 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) }, 535 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1) }, 536 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1) }, 537 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1) }, 538 539 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) }, 540 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) }, 541 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) }, 542 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) }, 543 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) }, 544 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) }, 545 }; 546 547 static const struct pci_id_table pci_dev_descr_haswell_table[] = { 548 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL), 549 {0,} /* 0 terminated list. */ 550 }; 551 552 /* Knight's Landing Support */ 553 /* 554 * KNL's memory channels are swizzled between memory controllers. 555 * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2 556 */ 557 #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3) 558 559 /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ 560 #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 561 /* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */ 562 #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL 0x7843 563 /* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */ 564 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844 565 /* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */ 566 #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0 0x782a 567 /* SAD target - 1-29-1 (1 of these) */ 568 #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1 0x782b 569 /* Caching / Home Agent */ 570 #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA 0x782c 571 /* Device with TOLM and TOHM, 0-5-0 (1 of these) */ 572 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM 0x7810 573 574 /* 575 * KNL differs from SB, IB, and Haswell in that it has multiple 576 * instances of the same device with the same device ID, so we handle that 577 * by creating as many copies in the table as we expect to find. 578 * (Like device ID must be grouped together.) 579 */ 580 581 static const struct pci_id_descr pci_dev_descr_knl[] = { 582 [0] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0) }, 583 [1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0) }, 584 [2 ... 3] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0)}, 585 [4 ... 41] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0) }, 586 [42 ... 47] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL, 0) }, 587 [48] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0) }, 588 [49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0) }, 589 }; 590 591 static const struct pci_id_table pci_dev_descr_knl_table[] = { 592 PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING), 593 {0,} 594 }; 595 596 /* 597 * Broadwell support 598 * 599 * DE processor: 600 * - 1 IMC 601 * - 2 DDR3 channels, 2 DPC per channel 602 * EP processor: 603 * - 1 or 2 IMC 604 * - 4 DDR4 channels, 3 DPC per channel 605 * EP 4S processor: 606 * - 2 IMC 607 * - 4 DDR4 channels, 3 DPC per channel 608 * EX processor: 609 * - 2 IMC 610 * - each IMC interfaces with a SMI 2 channel 611 * - each SMI channel interfaces with a scalable memory buffer 612 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC 613 */ 614 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28 615 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0 616 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60 617 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8 618 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL 0x6f71 619 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68 620 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL 0x6f79 621 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc 622 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd 623 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa 624 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab 625 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac 626 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad 627 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a 628 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b 629 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c 630 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d 631 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf 632 633 static const struct pci_id_descr pci_dev_descr_broadwell[] = { 634 /* first item must be the HA */ 635 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0) }, 636 637 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0) }, 638 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0) }, 639 640 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1) }, 641 642 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0) }, 643 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL, 0) }, 644 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0) }, 645 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0) }, 646 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1) }, 647 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1) }, 648 649 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1) }, 650 651 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1) }, 652 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL, 1) }, 653 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1) }, 654 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1) }, 655 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1) }, 656 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1) }, 657 }; 658 659 static const struct pci_id_table pci_dev_descr_broadwell_table[] = { 660 PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL), 661 {0,} /* 0 terminated list. */ 662 }; 663 664 665 /**************************************************************************** 666 Ancillary status routines 667 ****************************************************************************/ 668 669 static inline int numrank(enum type type, u32 mtr) 670 { 671 int ranks = (1 << RANK_CNT_BITS(mtr)); 672 int max = 4; 673 674 if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING) 675 max = 8; 676 677 if (ranks > max) { 678 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n", 679 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr); 680 return -EINVAL; 681 } 682 683 return ranks; 684 } 685 686 static inline int numrow(u32 mtr) 687 { 688 int rows = (RANK_WIDTH_BITS(mtr) + 12); 689 690 if (rows < 13 || rows > 18) { 691 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n", 692 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr); 693 return -EINVAL; 694 } 695 696 return 1 << rows; 697 } 698 699 static inline int numcol(u32 mtr) 700 { 701 int cols = (COL_WIDTH_BITS(mtr) + 10); 702 703 if (cols > 12) { 704 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n", 705 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr); 706 return -EINVAL; 707 } 708 709 return 1 << cols; 710 } 711 712 static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus) 713 { 714 struct sbridge_dev *sbridge_dev; 715 716 /* 717 * If we have devices scattered across several busses that pertain 718 * to the same memory controller, we'll lump them all together. 719 */ 720 if (multi_bus) { 721 return list_first_entry_or_null(&sbridge_edac_list, 722 struct sbridge_dev, list); 723 } 724 725 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 726 if (sbridge_dev->bus == bus) 727 return sbridge_dev; 728 } 729 730 return NULL; 731 } 732 733 static struct sbridge_dev *alloc_sbridge_dev(u8 bus, 734 const struct pci_id_table *table) 735 { 736 struct sbridge_dev *sbridge_dev; 737 738 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL); 739 if (!sbridge_dev) 740 return NULL; 741 742 sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs, 743 GFP_KERNEL); 744 if (!sbridge_dev->pdev) { 745 kfree(sbridge_dev); 746 return NULL; 747 } 748 749 sbridge_dev->bus = bus; 750 sbridge_dev->n_devs = table->n_devs; 751 list_add_tail(&sbridge_dev->list, &sbridge_edac_list); 752 753 return sbridge_dev; 754 } 755 756 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev) 757 { 758 list_del(&sbridge_dev->list); 759 kfree(sbridge_dev->pdev); 760 kfree(sbridge_dev); 761 } 762 763 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt) 764 { 765 u32 reg; 766 767 /* Address range is 32:28 */ 768 pci_read_config_dword(pvt->pci_sad1, TOLM, ®); 769 return GET_TOLM(reg); 770 } 771 772 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt) 773 { 774 u32 reg; 775 776 pci_read_config_dword(pvt->pci_sad1, TOHM, ®); 777 return GET_TOHM(reg); 778 } 779 780 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt) 781 { 782 u32 reg; 783 784 pci_read_config_dword(pvt->pci_br1, TOLM, ®); 785 786 return GET_TOLM(reg); 787 } 788 789 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt) 790 { 791 u32 reg; 792 793 pci_read_config_dword(pvt->pci_br1, TOHM, ®); 794 795 return GET_TOHM(reg); 796 } 797 798 static u64 rir_limit(u32 reg) 799 { 800 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff; 801 } 802 803 static u64 sad_limit(u32 reg) 804 { 805 return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff; 806 } 807 808 static u32 interleave_mode(u32 reg) 809 { 810 return GET_BITFIELD(reg, 1, 1); 811 } 812 813 static u32 dram_attr(u32 reg) 814 { 815 return GET_BITFIELD(reg, 2, 3); 816 } 817 818 static u64 knl_sad_limit(u32 reg) 819 { 820 return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff; 821 } 822 823 static u32 knl_interleave_mode(u32 reg) 824 { 825 return GET_BITFIELD(reg, 1, 2); 826 } 827 828 static const char * const knl_intlv_mode[] = { 829 "[8:6]", "[10:8]", "[14:12]", "[32:30]" 830 }; 831 832 static const char *get_intlv_mode_str(u32 reg, enum type t) 833 { 834 if (t == KNIGHTS_LANDING) 835 return knl_intlv_mode[knl_interleave_mode(reg)]; 836 else 837 return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]"; 838 } 839 840 static u32 dram_attr_knl(u32 reg) 841 { 842 return GET_BITFIELD(reg, 3, 4); 843 } 844 845 846 static enum mem_type get_memory_type(struct sbridge_pvt *pvt) 847 { 848 u32 reg; 849 enum mem_type mtype; 850 851 if (pvt->pci_ddrio) { 852 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr, 853 ®); 854 if (GET_BITFIELD(reg, 11, 11)) 855 /* FIXME: Can also be LRDIMM */ 856 mtype = MEM_RDDR3; 857 else 858 mtype = MEM_DDR3; 859 } else 860 mtype = MEM_UNKNOWN; 861 862 return mtype; 863 } 864 865 static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt) 866 { 867 u32 reg; 868 bool registered = false; 869 enum mem_type mtype = MEM_UNKNOWN; 870 871 if (!pvt->pci_ddrio) 872 goto out; 873 874 pci_read_config_dword(pvt->pci_ddrio, 875 HASWELL_DDRCRCLKCONTROLS, ®); 876 /* Is_Rdimm */ 877 if (GET_BITFIELD(reg, 16, 16)) 878 registered = true; 879 880 pci_read_config_dword(pvt->pci_ta, MCMTR, ®); 881 if (GET_BITFIELD(reg, 14, 14)) { 882 if (registered) 883 mtype = MEM_RDDR4; 884 else 885 mtype = MEM_DDR4; 886 } else { 887 if (registered) 888 mtype = MEM_RDDR3; 889 else 890 mtype = MEM_DDR3; 891 } 892 893 out: 894 return mtype; 895 } 896 897 static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr) 898 { 899 /* for KNL value is fixed */ 900 return DEV_X16; 901 } 902 903 static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr) 904 { 905 /* there's no way to figure out */ 906 return DEV_UNKNOWN; 907 } 908 909 static enum dev_type __ibridge_get_width(u32 mtr) 910 { 911 enum dev_type type; 912 913 switch (mtr) { 914 case 3: 915 type = DEV_UNKNOWN; 916 break; 917 case 2: 918 type = DEV_X16; 919 break; 920 case 1: 921 type = DEV_X8; 922 break; 923 case 0: 924 type = DEV_X4; 925 break; 926 } 927 928 return type; 929 } 930 931 static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr) 932 { 933 /* 934 * ddr3_width on the documentation but also valid for DDR4 on 935 * Haswell 936 */ 937 return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8)); 938 } 939 940 static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr) 941 { 942 /* ddr3_width on the documentation but also valid for DDR4 */ 943 return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9)); 944 } 945 946 static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt) 947 { 948 /* DDR4 RDIMMS and LRDIMMS are supported */ 949 return MEM_RDDR4; 950 } 951 952 static u8 get_node_id(struct sbridge_pvt *pvt) 953 { 954 u32 reg; 955 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®); 956 return GET_BITFIELD(reg, 0, 2); 957 } 958 959 static u8 haswell_get_node_id(struct sbridge_pvt *pvt) 960 { 961 u32 reg; 962 963 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®); 964 return GET_BITFIELD(reg, 0, 3); 965 } 966 967 static u8 knl_get_node_id(struct sbridge_pvt *pvt) 968 { 969 u32 reg; 970 971 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®); 972 return GET_BITFIELD(reg, 0, 2); 973 } 974 975 976 static u64 haswell_get_tolm(struct sbridge_pvt *pvt) 977 { 978 u32 reg; 979 980 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, ®); 981 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff; 982 } 983 984 static u64 haswell_get_tohm(struct sbridge_pvt *pvt) 985 { 986 u64 rc; 987 u32 reg; 988 989 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, ®); 990 rc = GET_BITFIELD(reg, 26, 31); 991 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®); 992 rc = ((reg << 6) | rc) << 26; 993 994 return rc | 0x1ffffff; 995 } 996 997 static u64 knl_get_tolm(struct sbridge_pvt *pvt) 998 { 999 u32 reg; 1000 1001 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, ®); 1002 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff; 1003 } 1004 1005 static u64 knl_get_tohm(struct sbridge_pvt *pvt) 1006 { 1007 u64 rc; 1008 u32 reg_lo, reg_hi; 1009 1010 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, ®_lo); 1011 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, ®_hi); 1012 rc = ((u64)reg_hi << 32) | reg_lo; 1013 return rc | 0x3ffffff; 1014 } 1015 1016 1017 static u64 haswell_rir_limit(u32 reg) 1018 { 1019 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1; 1020 } 1021 1022 static inline u8 sad_pkg_socket(u8 pkg) 1023 { 1024 /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */ 1025 return ((pkg >> 3) << 2) | (pkg & 0x3); 1026 } 1027 1028 static inline u8 sad_pkg_ha(u8 pkg) 1029 { 1030 return (pkg >> 2) & 0x1; 1031 } 1032 1033 static int haswell_chan_hash(int idx, u64 addr) 1034 { 1035 int i; 1036 1037 /* 1038 * XOR even bits from 12:26 to bit0 of idx, 1039 * odd bits from 13:27 to bit1 1040 */ 1041 for (i = 12; i < 28; i += 2) 1042 idx ^= (addr >> i) & 3; 1043 1044 return idx; 1045 } 1046 1047 /**************************************************************************** 1048 Memory check routines 1049 ****************************************************************************/ 1050 static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id) 1051 { 1052 struct pci_dev *pdev = NULL; 1053 1054 do { 1055 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev); 1056 if (pdev && pdev->bus->number == bus) 1057 break; 1058 } while (pdev); 1059 1060 return pdev; 1061 } 1062 1063 /** 1064 * check_if_ecc_is_active() - Checks if ECC is active 1065 * @bus: Device bus 1066 * @type: Memory controller type 1067 * returns: 0 in case ECC is active, -ENODEV if it can't be determined or 1068 * disabled 1069 */ 1070 static int check_if_ecc_is_active(const u8 bus, enum type type) 1071 { 1072 struct pci_dev *pdev = NULL; 1073 u32 mcmtr, id; 1074 1075 switch (type) { 1076 case IVY_BRIDGE: 1077 id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA; 1078 break; 1079 case HASWELL: 1080 id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA; 1081 break; 1082 case SANDY_BRIDGE: 1083 id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA; 1084 break; 1085 case BROADWELL: 1086 id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA; 1087 break; 1088 case KNIGHTS_LANDING: 1089 /* 1090 * KNL doesn't group things by bus the same way 1091 * SB/IB/Haswell does. 1092 */ 1093 id = PCI_DEVICE_ID_INTEL_KNL_IMC_TA; 1094 break; 1095 default: 1096 return -ENODEV; 1097 } 1098 1099 if (type != KNIGHTS_LANDING) 1100 pdev = get_pdev_same_bus(bus, id); 1101 else 1102 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, 0); 1103 1104 if (!pdev) { 1105 sbridge_printk(KERN_ERR, "Couldn't find PCI device " 1106 "%04x:%04x! on bus %02d\n", 1107 PCI_VENDOR_ID_INTEL, id, bus); 1108 return -ENODEV; 1109 } 1110 1111 pci_read_config_dword(pdev, 1112 type == KNIGHTS_LANDING ? KNL_MCMTR : MCMTR, &mcmtr); 1113 if (!IS_ECC_ENABLED(mcmtr)) { 1114 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n"); 1115 return -ENODEV; 1116 } 1117 return 0; 1118 } 1119 1120 /* Low bits of TAD limit, and some metadata. */ 1121 static const u32 knl_tad_dram_limit_lo[] = { 1122 0x400, 0x500, 0x600, 0x700, 1123 0x800, 0x900, 0xa00, 0xb00, 1124 }; 1125 1126 /* Low bits of TAD offset. */ 1127 static const u32 knl_tad_dram_offset_lo[] = { 1128 0x404, 0x504, 0x604, 0x704, 1129 0x804, 0x904, 0xa04, 0xb04, 1130 }; 1131 1132 /* High 16 bits of TAD limit and offset. */ 1133 static const u32 knl_tad_dram_hi[] = { 1134 0x408, 0x508, 0x608, 0x708, 1135 0x808, 0x908, 0xa08, 0xb08, 1136 }; 1137 1138 /* Number of ways a tad entry is interleaved. */ 1139 static const u32 knl_tad_ways[] = { 1140 8, 6, 4, 3, 2, 1, 1141 }; 1142 1143 /* 1144 * Retrieve the n'th Target Address Decode table entry 1145 * from the memory controller's TAD table. 1146 * 1147 * @pvt: driver private data 1148 * @entry: which entry you want to retrieve 1149 * @mc: which memory controller (0 or 1) 1150 * @offset: output tad range offset 1151 * @limit: output address of first byte above tad range 1152 * @ways: output number of interleave ways 1153 * 1154 * The offset value has curious semantics. It's a sort of running total 1155 * of the sizes of all the memory regions that aren't mapped in this 1156 * tad table. 1157 */ 1158 static int knl_get_tad(const struct sbridge_pvt *pvt, 1159 const int entry, 1160 const int mc, 1161 u64 *offset, 1162 u64 *limit, 1163 int *ways) 1164 { 1165 u32 reg_limit_lo, reg_offset_lo, reg_hi; 1166 struct pci_dev *pci_mc; 1167 int way_id; 1168 1169 switch (mc) { 1170 case 0: 1171 pci_mc = pvt->knl.pci_mc0; 1172 break; 1173 case 1: 1174 pci_mc = pvt->knl.pci_mc1; 1175 break; 1176 default: 1177 WARN_ON(1); 1178 return -EINVAL; 1179 } 1180 1181 pci_read_config_dword(pci_mc, 1182 knl_tad_dram_limit_lo[entry], ®_limit_lo); 1183 pci_read_config_dword(pci_mc, 1184 knl_tad_dram_offset_lo[entry], ®_offset_lo); 1185 pci_read_config_dword(pci_mc, 1186 knl_tad_dram_hi[entry], ®_hi); 1187 1188 /* Is this TAD entry enabled? */ 1189 if (!GET_BITFIELD(reg_limit_lo, 0, 0)) 1190 return -ENODEV; 1191 1192 way_id = GET_BITFIELD(reg_limit_lo, 3, 5); 1193 1194 if (way_id < ARRAY_SIZE(knl_tad_ways)) { 1195 *ways = knl_tad_ways[way_id]; 1196 } else { 1197 *ways = 0; 1198 sbridge_printk(KERN_ERR, 1199 "Unexpected value %d in mc_tad_limit_lo wayness field\n", 1200 way_id); 1201 return -ENODEV; 1202 } 1203 1204 /* 1205 * The least significant 6 bits of base and limit are truncated. 1206 * For limit, we fill the missing bits with 1s. 1207 */ 1208 *offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) | 1209 ((u64) GET_BITFIELD(reg_hi, 0, 15) << 32); 1210 *limit = ((u64) GET_BITFIELD(reg_limit_lo, 6, 31) << 6) | 63 | 1211 ((u64) GET_BITFIELD(reg_hi, 16, 31) << 32); 1212 1213 return 0; 1214 } 1215 1216 /* Determine which memory controller is responsible for a given channel. */ 1217 static int knl_channel_mc(int channel) 1218 { 1219 WARN_ON(channel < 0 || channel >= 6); 1220 1221 return channel < 3 ? 1 : 0; 1222 } 1223 1224 /* 1225 * Get the Nth entry from EDC_ROUTE_TABLE register. 1226 * (This is the per-tile mapping of logical interleave targets to 1227 * physical EDC modules.) 1228 * 1229 * entry 0: 0:2 1230 * 1: 3:5 1231 * 2: 6:8 1232 * 3: 9:11 1233 * 4: 12:14 1234 * 5: 15:17 1235 * 6: 18:20 1236 * 7: 21:23 1237 * reserved: 24:31 1238 */ 1239 static u32 knl_get_edc_route(int entry, u32 reg) 1240 { 1241 WARN_ON(entry >= KNL_MAX_EDCS); 1242 return GET_BITFIELD(reg, entry*3, (entry*3)+2); 1243 } 1244 1245 /* 1246 * Get the Nth entry from MC_ROUTE_TABLE register. 1247 * (This is the per-tile mapping of logical interleave targets to 1248 * physical DRAM channels modules.) 1249 * 1250 * entry 0: mc 0:2 channel 18:19 1251 * 1: mc 3:5 channel 20:21 1252 * 2: mc 6:8 channel 22:23 1253 * 3: mc 9:11 channel 24:25 1254 * 4: mc 12:14 channel 26:27 1255 * 5: mc 15:17 channel 28:29 1256 * reserved: 30:31 1257 * 1258 * Though we have 3 bits to identify the MC, we should only see 1259 * the values 0 or 1. 1260 */ 1261 1262 static u32 knl_get_mc_route(int entry, u32 reg) 1263 { 1264 int mc, chan; 1265 1266 WARN_ON(entry >= KNL_MAX_CHANNELS); 1267 1268 mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); 1269 chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); 1270 1271 return knl_channel_remap(mc, chan); 1272 } 1273 1274 /* 1275 * Render the EDC_ROUTE register in human-readable form. 1276 * Output string s should be at least KNL_MAX_EDCS*2 bytes. 1277 */ 1278 static void knl_show_edc_route(u32 reg, char *s) 1279 { 1280 int i; 1281 1282 for (i = 0; i < KNL_MAX_EDCS; i++) { 1283 s[i*2] = knl_get_edc_route(i, reg) + '0'; 1284 s[i*2+1] = '-'; 1285 } 1286 1287 s[KNL_MAX_EDCS*2 - 1] = '\0'; 1288 } 1289 1290 /* 1291 * Render the MC_ROUTE register in human-readable form. 1292 * Output string s should be at least KNL_MAX_CHANNELS*2 bytes. 1293 */ 1294 static void knl_show_mc_route(u32 reg, char *s) 1295 { 1296 int i; 1297 1298 for (i = 0; i < KNL_MAX_CHANNELS; i++) { 1299 s[i*2] = knl_get_mc_route(i, reg) + '0'; 1300 s[i*2+1] = '-'; 1301 } 1302 1303 s[KNL_MAX_CHANNELS*2 - 1] = '\0'; 1304 } 1305 1306 #define KNL_EDC_ROUTE 0xb8 1307 #define KNL_MC_ROUTE 0xb4 1308 1309 /* Is this dram rule backed by regular DRAM in flat mode? */ 1310 #define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29) 1311 1312 /* Is this dram rule cached? */ 1313 #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28) 1314 1315 /* Is this rule backed by edc ? */ 1316 #define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29) 1317 1318 /* Is this rule backed by DRAM, cacheable in EDRAM? */ 1319 #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28) 1320 1321 /* Is this rule mod3? */ 1322 #define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27) 1323 1324 /* 1325 * Figure out how big our RAM modules are. 1326 * 1327 * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we 1328 * have to figure this out from the SAD rules, interleave lists, route tables, 1329 * and TAD rules. 1330 * 1331 * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to 1332 * inspect the TAD rules to figure out how large the SAD regions really are. 1333 * 1334 * When we know the real size of a SAD region and how many ways it's 1335 * interleaved, we know the individual contribution of each channel to 1336 * TAD is size/ways. 1337 * 1338 * Finally, we have to check whether each channel participates in each SAD 1339 * region. 1340 * 1341 * Fortunately, KNL only supports one DIMM per channel, so once we know how 1342 * much memory the channel uses, we know the DIMM is at least that large. 1343 * (The BIOS might possibly choose not to map all available memory, in which 1344 * case we will underreport the size of the DIMM.) 1345 * 1346 * In theory, we could try to determine the EDC sizes as well, but that would 1347 * only work in flat mode, not in cache mode. 1348 * 1349 * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS 1350 * elements) 1351 */ 1352 static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) 1353 { 1354 u64 sad_base, sad_size, sad_limit = 0; 1355 u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace; 1356 int sad_rule = 0; 1357 int tad_rule = 0; 1358 int intrlv_ways, tad_ways; 1359 u32 first_pkg, pkg; 1360 int i; 1361 u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */ 1362 u32 dram_rule, interleave_reg; 1363 u32 mc_route_reg[KNL_MAX_CHAS]; 1364 u32 edc_route_reg[KNL_MAX_CHAS]; 1365 int edram_only; 1366 char edc_route_string[KNL_MAX_EDCS*2]; 1367 char mc_route_string[KNL_MAX_CHANNELS*2]; 1368 int cur_reg_start; 1369 int mc; 1370 int channel; 1371 int way; 1372 int participants[KNL_MAX_CHANNELS]; 1373 int participant_count = 0; 1374 1375 for (i = 0; i < KNL_MAX_CHANNELS; i++) 1376 mc_sizes[i] = 0; 1377 1378 /* Read the EDC route table in each CHA. */ 1379 cur_reg_start = 0; 1380 for (i = 0; i < KNL_MAX_CHAS; i++) { 1381 pci_read_config_dword(pvt->knl.pci_cha[i], 1382 KNL_EDC_ROUTE, &edc_route_reg[i]); 1383 1384 if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) { 1385 knl_show_edc_route(edc_route_reg[i-1], 1386 edc_route_string); 1387 if (cur_reg_start == i-1) 1388 edac_dbg(0, "edc route table for CHA %d: %s\n", 1389 cur_reg_start, edc_route_string); 1390 else 1391 edac_dbg(0, "edc route table for CHA %d-%d: %s\n", 1392 cur_reg_start, i-1, edc_route_string); 1393 cur_reg_start = i; 1394 } 1395 } 1396 knl_show_edc_route(edc_route_reg[i-1], edc_route_string); 1397 if (cur_reg_start == i-1) 1398 edac_dbg(0, "edc route table for CHA %d: %s\n", 1399 cur_reg_start, edc_route_string); 1400 else 1401 edac_dbg(0, "edc route table for CHA %d-%d: %s\n", 1402 cur_reg_start, i-1, edc_route_string); 1403 1404 /* Read the MC route table in each CHA. */ 1405 cur_reg_start = 0; 1406 for (i = 0; i < KNL_MAX_CHAS; i++) { 1407 pci_read_config_dword(pvt->knl.pci_cha[i], 1408 KNL_MC_ROUTE, &mc_route_reg[i]); 1409 1410 if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) { 1411 knl_show_mc_route(mc_route_reg[i-1], mc_route_string); 1412 if (cur_reg_start == i-1) 1413 edac_dbg(0, "mc route table for CHA %d: %s\n", 1414 cur_reg_start, mc_route_string); 1415 else 1416 edac_dbg(0, "mc route table for CHA %d-%d: %s\n", 1417 cur_reg_start, i-1, mc_route_string); 1418 cur_reg_start = i; 1419 } 1420 } 1421 knl_show_mc_route(mc_route_reg[i-1], mc_route_string); 1422 if (cur_reg_start == i-1) 1423 edac_dbg(0, "mc route table for CHA %d: %s\n", 1424 cur_reg_start, mc_route_string); 1425 else 1426 edac_dbg(0, "mc route table for CHA %d-%d: %s\n", 1427 cur_reg_start, i-1, mc_route_string); 1428 1429 /* Process DRAM rules */ 1430 for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) { 1431 /* previous limit becomes the new base */ 1432 sad_base = sad_limit; 1433 1434 pci_read_config_dword(pvt->pci_sad0, 1435 pvt->info.dram_rule[sad_rule], &dram_rule); 1436 1437 if (!DRAM_RULE_ENABLE(dram_rule)) 1438 break; 1439 1440 edram_only = KNL_EDRAM_ONLY(dram_rule); 1441 1442 sad_limit = pvt->info.sad_limit(dram_rule)+1; 1443 sad_size = sad_limit - sad_base; 1444 1445 pci_read_config_dword(pvt->pci_sad0, 1446 pvt->info.interleave_list[sad_rule], &interleave_reg); 1447 1448 /* 1449 * Find out how many ways this dram rule is interleaved. 1450 * We stop when we see the first channel again. 1451 */ 1452 first_pkg = sad_pkg(pvt->info.interleave_pkg, 1453 interleave_reg, 0); 1454 for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) { 1455 pkg = sad_pkg(pvt->info.interleave_pkg, 1456 interleave_reg, intrlv_ways); 1457 1458 if ((pkg & 0x8) == 0) { 1459 /* 1460 * 0 bit means memory is non-local, 1461 * which KNL doesn't support 1462 */ 1463 edac_dbg(0, "Unexpected interleave target %d\n", 1464 pkg); 1465 return -1; 1466 } 1467 1468 if (pkg == first_pkg) 1469 break; 1470 } 1471 if (KNL_MOD3(dram_rule)) 1472 intrlv_ways *= 3; 1473 1474 edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n", 1475 sad_rule, 1476 sad_base, 1477 sad_limit, 1478 intrlv_ways, 1479 edram_only ? ", EDRAM" : ""); 1480 1481 /* 1482 * Find out how big the SAD region really is by iterating 1483 * over TAD tables (SAD regions may contain holes). 1484 * Each memory controller might have a different TAD table, so 1485 * we have to look at both. 1486 * 1487 * Livespace is the memory that's mapped in this TAD table, 1488 * deadspace is the holes (this could be the MMIO hole, or it 1489 * could be memory that's mapped by the other TAD table but 1490 * not this one). 1491 */ 1492 for (mc = 0; mc < 2; mc++) { 1493 sad_actual_size[mc] = 0; 1494 tad_livespace = 0; 1495 for (tad_rule = 0; 1496 tad_rule < ARRAY_SIZE( 1497 knl_tad_dram_limit_lo); 1498 tad_rule++) { 1499 if (knl_get_tad(pvt, 1500 tad_rule, 1501 mc, 1502 &tad_deadspace, 1503 &tad_limit, 1504 &tad_ways)) 1505 break; 1506 1507 tad_size = (tad_limit+1) - 1508 (tad_livespace + tad_deadspace); 1509 tad_livespace += tad_size; 1510 tad_base = (tad_limit+1) - tad_size; 1511 1512 if (tad_base < sad_base) { 1513 if (tad_limit > sad_base) 1514 edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n"); 1515 } else if (tad_base < sad_limit) { 1516 if (tad_limit+1 > sad_limit) { 1517 edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n"); 1518 } else { 1519 /* TAD region is completely inside SAD region */ 1520 edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n", 1521 tad_rule, tad_base, 1522 tad_limit, tad_size, 1523 mc); 1524 sad_actual_size[mc] += tad_size; 1525 } 1526 } 1527 tad_base = tad_limit+1; 1528 } 1529 } 1530 1531 for (mc = 0; mc < 2; mc++) { 1532 edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n", 1533 mc, sad_actual_size[mc], sad_actual_size[mc]); 1534 } 1535 1536 /* Ignore EDRAM rule */ 1537 if (edram_only) 1538 continue; 1539 1540 /* Figure out which channels participate in interleave. */ 1541 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) 1542 participants[channel] = 0; 1543 1544 /* For each channel, does at least one CHA have 1545 * this channel mapped to the given target? 1546 */ 1547 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) { 1548 for (way = 0; way < intrlv_ways; way++) { 1549 int target; 1550 int cha; 1551 1552 if (KNL_MOD3(dram_rule)) 1553 target = way; 1554 else 1555 target = 0x7 & sad_pkg( 1556 pvt->info.interleave_pkg, interleave_reg, way); 1557 1558 for (cha = 0; cha < KNL_MAX_CHAS; cha++) { 1559 if (knl_get_mc_route(target, 1560 mc_route_reg[cha]) == channel 1561 && !participants[channel]) { 1562 participant_count++; 1563 participants[channel] = 1; 1564 break; 1565 } 1566 } 1567 } 1568 } 1569 1570 if (participant_count != intrlv_ways) 1571 edac_dbg(0, "participant_count (%d) != interleave_ways (%d): DIMM size may be incorrect\n", 1572 participant_count, intrlv_ways); 1573 1574 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) { 1575 mc = knl_channel_mc(channel); 1576 if (participants[channel]) { 1577 edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n", 1578 channel, 1579 sad_actual_size[mc]/intrlv_ways, 1580 sad_rule); 1581 mc_sizes[channel] += 1582 sad_actual_size[mc]/intrlv_ways; 1583 } 1584 } 1585 } 1586 1587 return 0; 1588 } 1589 1590 static int get_dimm_config(struct mem_ctl_info *mci) 1591 { 1592 struct sbridge_pvt *pvt = mci->pvt_info; 1593 struct dimm_info *dimm; 1594 unsigned i, j, banks, ranks, rows, cols, npages; 1595 u64 size; 1596 u32 reg; 1597 enum edac_type mode; 1598 enum mem_type mtype; 1599 int channels = pvt->info.type == KNIGHTS_LANDING ? 1600 KNL_MAX_CHANNELS : NUM_CHANNELS; 1601 u64 knl_mc_sizes[KNL_MAX_CHANNELS]; 1602 1603 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) { 1604 pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, ®); 1605 pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21); 1606 } 1607 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL || 1608 pvt->info.type == KNIGHTS_LANDING) 1609 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®); 1610 else 1611 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®); 1612 1613 if (pvt->info.type == KNIGHTS_LANDING) 1614 pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg); 1615 else 1616 pvt->sbridge_dev->source_id = SOURCE_ID(reg); 1617 1618 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt); 1619 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", 1620 pvt->sbridge_dev->mc, 1621 pvt->sbridge_dev->node_id, 1622 pvt->sbridge_dev->source_id); 1623 1624 /* KNL doesn't support mirroring or lockstep, 1625 * and is always closed page 1626 */ 1627 if (pvt->info.type == KNIGHTS_LANDING) { 1628 mode = EDAC_S4ECD4ED; 1629 pvt->is_mirrored = false; 1630 1631 if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0) 1632 return -1; 1633 } else { 1634 pci_read_config_dword(pvt->pci_ras, RASENABLES, ®); 1635 if (IS_MIRROR_ENABLED(reg)) { 1636 edac_dbg(0, "Memory mirror is enabled\n"); 1637 pvt->is_mirrored = true; 1638 } else { 1639 edac_dbg(0, "Memory mirror is disabled\n"); 1640 pvt->is_mirrored = false; 1641 } 1642 1643 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); 1644 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { 1645 edac_dbg(0, "Lockstep is enabled\n"); 1646 mode = EDAC_S8ECD8ED; 1647 pvt->is_lockstep = true; 1648 } else { 1649 edac_dbg(0, "Lockstep is disabled\n"); 1650 mode = EDAC_S4ECD4ED; 1651 pvt->is_lockstep = false; 1652 } 1653 if (IS_CLOSE_PG(pvt->info.mcmtr)) { 1654 edac_dbg(0, "address map is on closed page mode\n"); 1655 pvt->is_close_pg = true; 1656 } else { 1657 edac_dbg(0, "address map is on open page mode\n"); 1658 pvt->is_close_pg = false; 1659 } 1660 } 1661 1662 mtype = pvt->info.get_memory_type(pvt); 1663 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4) 1664 edac_dbg(0, "Memory is registered\n"); 1665 else if (mtype == MEM_UNKNOWN) 1666 edac_dbg(0, "Cannot determine memory type\n"); 1667 else 1668 edac_dbg(0, "Memory is unregistered\n"); 1669 1670 if (mtype == MEM_DDR4 || mtype == MEM_RDDR4) 1671 banks = 16; 1672 else 1673 banks = 8; 1674 1675 for (i = 0; i < channels; i++) { 1676 u32 mtr; 1677 1678 int max_dimms_per_channel; 1679 1680 if (pvt->info.type == KNIGHTS_LANDING) { 1681 max_dimms_per_channel = 1; 1682 if (!pvt->knl.pci_channel[i]) 1683 continue; 1684 } else { 1685 max_dimms_per_channel = ARRAY_SIZE(mtr_regs); 1686 if (!pvt->pci_tad[i]) 1687 continue; 1688 } 1689 1690 for (j = 0; j < max_dimms_per_channel; j++) { 1691 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 1692 i, j, 0); 1693 if (pvt->info.type == KNIGHTS_LANDING) { 1694 pci_read_config_dword(pvt->knl.pci_channel[i], 1695 knl_mtr_reg, &mtr); 1696 } else { 1697 pci_read_config_dword(pvt->pci_tad[i], 1698 mtr_regs[j], &mtr); 1699 } 1700 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); 1701 if (IS_DIMM_PRESENT(mtr)) { 1702 pvt->channel[i].dimms++; 1703 1704 ranks = numrank(pvt->info.type, mtr); 1705 1706 if (pvt->info.type == KNIGHTS_LANDING) { 1707 /* For DDR4, this is fixed. */ 1708 cols = 1 << 10; 1709 rows = knl_mc_sizes[i] / 1710 ((u64) cols * ranks * banks * 8); 1711 } else { 1712 rows = numrow(mtr); 1713 cols = numcol(mtr); 1714 } 1715 1716 size = ((u64)rows * cols * banks * ranks) >> (20 - 3); 1717 npages = MiB_TO_PAGES(size); 1718 1719 edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", 1720 pvt->sbridge_dev->mc, i/4, i%4, j, 1721 size, npages, 1722 banks, ranks, rows, cols); 1723 1724 dimm->nr_pages = npages; 1725 dimm->grain = 32; 1726 dimm->dtype = pvt->info.get_width(pvt, mtr); 1727 dimm->mtype = mtype; 1728 dimm->edac_mode = mode; 1729 snprintf(dimm->label, sizeof(dimm->label), 1730 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u", 1731 pvt->sbridge_dev->source_id, i/4, i%4, j); 1732 } 1733 } 1734 } 1735 1736 return 0; 1737 } 1738 1739 static void get_memory_layout(const struct mem_ctl_info *mci) 1740 { 1741 struct sbridge_pvt *pvt = mci->pvt_info; 1742 int i, j, k, n_sads, n_tads, sad_interl; 1743 u32 reg; 1744 u64 limit, prv = 0; 1745 u64 tmp_mb; 1746 u32 gb, mb; 1747 u32 rir_way; 1748 1749 /* 1750 * Step 1) Get TOLM/TOHM ranges 1751 */ 1752 1753 pvt->tolm = pvt->info.get_tolm(pvt); 1754 tmp_mb = (1 + pvt->tolm) >> 20; 1755 1756 gb = div_u64_rem(tmp_mb, 1024, &mb); 1757 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", 1758 gb, (mb*1000)/1024, (u64)pvt->tolm); 1759 1760 /* Address range is already 45:25 */ 1761 pvt->tohm = pvt->info.get_tohm(pvt); 1762 tmp_mb = (1 + pvt->tohm) >> 20; 1763 1764 gb = div_u64_rem(tmp_mb, 1024, &mb); 1765 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", 1766 gb, (mb*1000)/1024, (u64)pvt->tohm); 1767 1768 /* 1769 * Step 2) Get SAD range and SAD Interleave list 1770 * TAD registers contain the interleave wayness. However, it 1771 * seems simpler to just discover it indirectly, with the 1772 * algorithm bellow. 1773 */ 1774 prv = 0; 1775 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { 1776 /* SAD_LIMIT Address range is 45:26 */ 1777 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], 1778 ®); 1779 limit = pvt->info.sad_limit(reg); 1780 1781 if (!DRAM_RULE_ENABLE(reg)) 1782 continue; 1783 1784 if (limit <= prv) 1785 break; 1786 1787 tmp_mb = (limit + 1) >> 20; 1788 gb = div_u64_rem(tmp_mb, 1024, &mb); 1789 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n", 1790 n_sads, 1791 show_dram_attr(pvt->info.dram_attr(reg)), 1792 gb, (mb*1000)/1024, 1793 ((u64)tmp_mb) << 20L, 1794 get_intlv_mode_str(reg, pvt->info.type), 1795 reg); 1796 prv = limit; 1797 1798 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], 1799 ®); 1800 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); 1801 for (j = 0; j < 8; j++) { 1802 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j); 1803 if (j > 0 && sad_interl == pkg) 1804 break; 1805 1806 edac_dbg(0, "SAD#%d, interleave #%d: %d\n", 1807 n_sads, j, pkg); 1808 } 1809 } 1810 1811 if (pvt->info.type == KNIGHTS_LANDING) 1812 return; 1813 1814 /* 1815 * Step 3) Get TAD range 1816 */ 1817 prv = 0; 1818 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { 1819 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads], 1820 ®); 1821 limit = TAD_LIMIT(reg); 1822 if (limit <= prv) 1823 break; 1824 tmp_mb = (limit + 1) >> 20; 1825 1826 gb = div_u64_rem(tmp_mb, 1024, &mb); 1827 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", 1828 n_tads, gb, (mb*1000)/1024, 1829 ((u64)tmp_mb) << 20L, 1830 (u32)(1 << TAD_SOCK(reg)), 1831 (u32)TAD_CH(reg) + 1, 1832 (u32)TAD_TGT0(reg), 1833 (u32)TAD_TGT1(reg), 1834 (u32)TAD_TGT2(reg), 1835 (u32)TAD_TGT3(reg), 1836 reg); 1837 prv = limit; 1838 } 1839 1840 /* 1841 * Step 4) Get TAD offsets, per each channel 1842 */ 1843 for (i = 0; i < NUM_CHANNELS; i++) { 1844 if (!pvt->channel[i].dimms) 1845 continue; 1846 for (j = 0; j < n_tads; j++) { 1847 pci_read_config_dword(pvt->pci_tad[i], 1848 tad_ch_nilv_offset[j], 1849 ®); 1850 tmp_mb = TAD_OFFSET(reg) >> 20; 1851 gb = div_u64_rem(tmp_mb, 1024, &mb); 1852 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", 1853 i, j, 1854 gb, (mb*1000)/1024, 1855 ((u64)tmp_mb) << 20L, 1856 reg); 1857 } 1858 } 1859 1860 /* 1861 * Step 6) Get RIR Wayness/Limit, per each channel 1862 */ 1863 for (i = 0; i < NUM_CHANNELS; i++) { 1864 if (!pvt->channel[i].dimms) 1865 continue; 1866 for (j = 0; j < MAX_RIR_RANGES; j++) { 1867 pci_read_config_dword(pvt->pci_tad[i], 1868 rir_way_limit[j], 1869 ®); 1870 1871 if (!IS_RIR_VALID(reg)) 1872 continue; 1873 1874 tmp_mb = pvt->info.rir_limit(reg) >> 20; 1875 rir_way = 1 << RIR_WAY(reg); 1876 gb = div_u64_rem(tmp_mb, 1024, &mb); 1877 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", 1878 i, j, 1879 gb, (mb*1000)/1024, 1880 ((u64)tmp_mb) << 20L, 1881 rir_way, 1882 reg); 1883 1884 for (k = 0; k < rir_way; k++) { 1885 pci_read_config_dword(pvt->pci_tad[i], 1886 rir_offset[j][k], 1887 ®); 1888 tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6; 1889 1890 gb = div_u64_rem(tmp_mb, 1024, &mb); 1891 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", 1892 i, j, k, 1893 gb, (mb*1000)/1024, 1894 ((u64)tmp_mb) << 20L, 1895 (u32)RIR_RNK_TGT(pvt->info.type, reg), 1896 reg); 1897 } 1898 } 1899 } 1900 } 1901 1902 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id) 1903 { 1904 struct sbridge_dev *sbridge_dev; 1905 1906 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 1907 if (sbridge_dev->node_id == node_id) 1908 return sbridge_dev->mci; 1909 } 1910 return NULL; 1911 } 1912 1913 static int get_memory_error_data(struct mem_ctl_info *mci, 1914 u64 addr, 1915 u8 *socket, u8 *ha, 1916 long *channel_mask, 1917 u8 *rank, 1918 char **area_type, char *msg) 1919 { 1920 struct mem_ctl_info *new_mci; 1921 struct sbridge_pvt *pvt = mci->pvt_info; 1922 struct pci_dev *pci_ha; 1923 int n_rir, n_sads, n_tads, sad_way, sck_xch; 1924 int sad_interl, idx, base_ch; 1925 int interleave_mode, shiftup = 0; 1926 unsigned sad_interleave[pvt->info.max_interleave]; 1927 u32 reg, dram_rule; 1928 u8 ch_way, sck_way, pkg, sad_ha = 0, ch_add = 0; 1929 u32 tad_offset; 1930 u32 rir_way; 1931 u32 mb, gb; 1932 u64 ch_addr, offset, limit = 0, prv = 0; 1933 1934 1935 /* 1936 * Step 0) Check if the address is at special memory ranges 1937 * The check bellow is probably enough to fill all cases where 1938 * the error is not inside a memory, except for the legacy 1939 * range (e. g. VGA addresses). It is unlikely, however, that the 1940 * memory controller would generate an error on that range. 1941 */ 1942 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) { 1943 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr); 1944 return -EINVAL; 1945 } 1946 if (addr >= (u64)pvt->tohm) { 1947 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr); 1948 return -EINVAL; 1949 } 1950 1951 /* 1952 * Step 1) Get socket 1953 */ 1954 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { 1955 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], 1956 ®); 1957 1958 if (!DRAM_RULE_ENABLE(reg)) 1959 continue; 1960 1961 limit = pvt->info.sad_limit(reg); 1962 if (limit <= prv) { 1963 sprintf(msg, "Can't discover the memory socket"); 1964 return -EINVAL; 1965 } 1966 if (addr <= limit) 1967 break; 1968 prv = limit; 1969 } 1970 if (n_sads == pvt->info.max_sad) { 1971 sprintf(msg, "Can't discover the memory socket"); 1972 return -EINVAL; 1973 } 1974 dram_rule = reg; 1975 *area_type = show_dram_attr(pvt->info.dram_attr(dram_rule)); 1976 interleave_mode = pvt->info.interleave_mode(dram_rule); 1977 1978 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], 1979 ®); 1980 1981 if (pvt->info.type == SANDY_BRIDGE) { 1982 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); 1983 for (sad_way = 0; sad_way < 8; sad_way++) { 1984 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way); 1985 if (sad_way > 0 && sad_interl == pkg) 1986 break; 1987 sad_interleave[sad_way] = pkg; 1988 edac_dbg(0, "SAD interleave #%d: %d\n", 1989 sad_way, sad_interleave[sad_way]); 1990 } 1991 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", 1992 pvt->sbridge_dev->mc, 1993 n_sads, 1994 addr, 1995 limit, 1996 sad_way + 7, 1997 !interleave_mode ? "" : "XOR[18:16]"); 1998 if (interleave_mode) 1999 idx = ((addr >> 6) ^ (addr >> 16)) & 7; 2000 else 2001 idx = (addr >> 6) & 7; 2002 switch (sad_way) { 2003 case 1: 2004 idx = 0; 2005 break; 2006 case 2: 2007 idx = idx & 1; 2008 break; 2009 case 4: 2010 idx = idx & 3; 2011 break; 2012 case 8: 2013 break; 2014 default: 2015 sprintf(msg, "Can't discover socket interleave"); 2016 return -EINVAL; 2017 } 2018 *socket = sad_interleave[idx]; 2019 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", 2020 idx, sad_way, *socket); 2021 } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) { 2022 int bits, a7mode = A7MODE(dram_rule); 2023 2024 if (a7mode) { 2025 /* A7 mode swaps P9 with P6 */ 2026 bits = GET_BITFIELD(addr, 7, 8) << 1; 2027 bits |= GET_BITFIELD(addr, 9, 9); 2028 } else 2029 bits = GET_BITFIELD(addr, 6, 8); 2030 2031 if (interleave_mode == 0) { 2032 /* interleave mode will XOR {8,7,6} with {18,17,16} */ 2033 idx = GET_BITFIELD(addr, 16, 18); 2034 idx ^= bits; 2035 } else 2036 idx = bits; 2037 2038 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); 2039 *socket = sad_pkg_socket(pkg); 2040 sad_ha = sad_pkg_ha(pkg); 2041 if (sad_ha) 2042 ch_add = 4; 2043 2044 if (a7mode) { 2045 /* MCChanShiftUpEnable */ 2046 pci_read_config_dword(pvt->pci_ha0, 2047 HASWELL_HASYSDEFEATURE2, ®); 2048 shiftup = GET_BITFIELD(reg, 22, 22); 2049 } 2050 2051 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n", 2052 idx, *socket, sad_ha, shiftup); 2053 } else { 2054 /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */ 2055 idx = (addr >> 6) & 7; 2056 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); 2057 *socket = sad_pkg_socket(pkg); 2058 sad_ha = sad_pkg_ha(pkg); 2059 if (sad_ha) 2060 ch_add = 4; 2061 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n", 2062 idx, *socket, sad_ha); 2063 } 2064 2065 *ha = sad_ha; 2066 2067 /* 2068 * Move to the proper node structure, in order to access the 2069 * right PCI registers 2070 */ 2071 new_mci = get_mci_for_node_id(*socket); 2072 if (!new_mci) { 2073 sprintf(msg, "Struct for socket #%u wasn't initialized", 2074 *socket); 2075 return -EINVAL; 2076 } 2077 mci = new_mci; 2078 pvt = mci->pvt_info; 2079 2080 /* 2081 * Step 2) Get memory channel 2082 */ 2083 prv = 0; 2084 if (pvt->info.type == SANDY_BRIDGE) 2085 pci_ha = pvt->pci_ha0; 2086 else { 2087 if (sad_ha) 2088 pci_ha = pvt->pci_ha1; 2089 else 2090 pci_ha = pvt->pci_ha0; 2091 } 2092 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { 2093 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®); 2094 limit = TAD_LIMIT(reg); 2095 if (limit <= prv) { 2096 sprintf(msg, "Can't discover the memory channel"); 2097 return -EINVAL; 2098 } 2099 if (addr <= limit) 2100 break; 2101 prv = limit; 2102 } 2103 if (n_tads == MAX_TAD) { 2104 sprintf(msg, "Can't discover the memory channel"); 2105 return -EINVAL; 2106 } 2107 2108 ch_way = TAD_CH(reg) + 1; 2109 sck_way = TAD_SOCK(reg); 2110 2111 if (ch_way == 3) 2112 idx = addr >> 6; 2113 else { 2114 idx = (addr >> (6 + sck_way + shiftup)) & 0x3; 2115 if (pvt->is_chan_hash) 2116 idx = haswell_chan_hash(idx, addr); 2117 } 2118 idx = idx % ch_way; 2119 2120 /* 2121 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ??? 2122 */ 2123 switch (idx) { 2124 case 0: 2125 base_ch = TAD_TGT0(reg); 2126 break; 2127 case 1: 2128 base_ch = TAD_TGT1(reg); 2129 break; 2130 case 2: 2131 base_ch = TAD_TGT2(reg); 2132 break; 2133 case 3: 2134 base_ch = TAD_TGT3(reg); 2135 break; 2136 default: 2137 sprintf(msg, "Can't discover the TAD target"); 2138 return -EINVAL; 2139 } 2140 *channel_mask = 1 << base_ch; 2141 2142 pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], 2143 tad_ch_nilv_offset[n_tads], 2144 &tad_offset); 2145 2146 if (pvt->is_mirrored) { 2147 *channel_mask |= 1 << ((base_ch + 2) % 4); 2148 switch(ch_way) { 2149 case 2: 2150 case 4: 2151 sck_xch = (1 << sck_way) * (ch_way >> 1); 2152 break; 2153 default: 2154 sprintf(msg, "Invalid mirror set. Can't decode addr"); 2155 return -EINVAL; 2156 } 2157 } else 2158 sck_xch = (1 << sck_way) * ch_way; 2159 2160 if (pvt->is_lockstep) 2161 *channel_mask |= 1 << ((base_ch + 1) % 4); 2162 2163 offset = TAD_OFFSET(tad_offset); 2164 2165 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n", 2166 n_tads, 2167 addr, 2168 limit, 2169 sck_way, 2170 ch_way, 2171 offset, 2172 idx, 2173 base_ch, 2174 *channel_mask); 2175 2176 /* Calculate channel address */ 2177 /* Remove the TAD offset */ 2178 2179 if (offset > addr) { 2180 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!", 2181 offset, addr); 2182 return -EINVAL; 2183 } 2184 2185 ch_addr = addr - offset; 2186 ch_addr >>= (6 + shiftup); 2187 ch_addr /= sck_xch; 2188 ch_addr <<= (6 + shiftup); 2189 ch_addr |= addr & ((1 << (6 + shiftup)) - 1); 2190 2191 /* 2192 * Step 3) Decode rank 2193 */ 2194 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) { 2195 pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], 2196 rir_way_limit[n_rir], 2197 ®); 2198 2199 if (!IS_RIR_VALID(reg)) 2200 continue; 2201 2202 limit = pvt->info.rir_limit(reg); 2203 gb = div_u64_rem(limit >> 20, 1024, &mb); 2204 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", 2205 n_rir, 2206 gb, (mb*1000)/1024, 2207 limit, 2208 1 << RIR_WAY(reg)); 2209 if (ch_addr <= limit) 2210 break; 2211 } 2212 if (n_rir == MAX_RIR_RANGES) { 2213 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx", 2214 ch_addr); 2215 return -EINVAL; 2216 } 2217 rir_way = RIR_WAY(reg); 2218 2219 if (pvt->is_close_pg) 2220 idx = (ch_addr >> 6); 2221 else 2222 idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */ 2223 idx %= 1 << rir_way; 2224 2225 pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], 2226 rir_offset[n_rir][idx], 2227 ®); 2228 *rank = RIR_RNK_TGT(pvt->info.type, reg); 2229 2230 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", 2231 n_rir, 2232 ch_addr, 2233 limit, 2234 rir_way, 2235 idx); 2236 2237 return 0; 2238 } 2239 2240 /**************************************************************************** 2241 Device initialization routines: put/get, init/exit 2242 ****************************************************************************/ 2243 2244 /* 2245 * sbridge_put_all_devices 'put' all the devices that we have 2246 * reserved via 'get' 2247 */ 2248 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev) 2249 { 2250 int i; 2251 2252 edac_dbg(0, "\n"); 2253 for (i = 0; i < sbridge_dev->n_devs; i++) { 2254 struct pci_dev *pdev = sbridge_dev->pdev[i]; 2255 if (!pdev) 2256 continue; 2257 edac_dbg(0, "Removing dev %02x:%02x.%d\n", 2258 pdev->bus->number, 2259 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 2260 pci_dev_put(pdev); 2261 } 2262 } 2263 2264 static void sbridge_put_all_devices(void) 2265 { 2266 struct sbridge_dev *sbridge_dev, *tmp; 2267 2268 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) { 2269 sbridge_put_devices(sbridge_dev); 2270 free_sbridge_dev(sbridge_dev); 2271 } 2272 } 2273 2274 static int sbridge_get_onedevice(struct pci_dev **prev, 2275 u8 *num_mc, 2276 const struct pci_id_table *table, 2277 const unsigned devno, 2278 const int multi_bus) 2279 { 2280 struct sbridge_dev *sbridge_dev; 2281 const struct pci_id_descr *dev_descr = &table->descr[devno]; 2282 struct pci_dev *pdev = NULL; 2283 u8 bus = 0; 2284 2285 sbridge_printk(KERN_DEBUG, 2286 "Seeking for: PCI ID %04x:%04x\n", 2287 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2288 2289 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 2290 dev_descr->dev_id, *prev); 2291 2292 if (!pdev) { 2293 if (*prev) { 2294 *prev = pdev; 2295 return 0; 2296 } 2297 2298 if (dev_descr->optional) 2299 return 0; 2300 2301 /* if the HA wasn't found */ 2302 if (devno == 0) 2303 return -ENODEV; 2304 2305 sbridge_printk(KERN_INFO, 2306 "Device not found: %04x:%04x\n", 2307 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2308 2309 /* End of list, leave */ 2310 return -ENODEV; 2311 } 2312 bus = pdev->bus->number; 2313 2314 sbridge_dev = get_sbridge_dev(bus, multi_bus); 2315 if (!sbridge_dev) { 2316 sbridge_dev = alloc_sbridge_dev(bus, table); 2317 if (!sbridge_dev) { 2318 pci_dev_put(pdev); 2319 return -ENOMEM; 2320 } 2321 (*num_mc)++; 2322 } 2323 2324 if (sbridge_dev->pdev[devno]) { 2325 sbridge_printk(KERN_ERR, 2326 "Duplicated device for %04x:%04x\n", 2327 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2328 pci_dev_put(pdev); 2329 return -ENODEV; 2330 } 2331 2332 sbridge_dev->pdev[devno] = pdev; 2333 2334 /* Be sure that the device is enabled */ 2335 if (unlikely(pci_enable_device(pdev) < 0)) { 2336 sbridge_printk(KERN_ERR, 2337 "Couldn't enable %04x:%04x\n", 2338 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2339 return -ENODEV; 2340 } 2341 2342 edac_dbg(0, "Detected %04x:%04x\n", 2343 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 2344 2345 /* 2346 * As stated on drivers/pci/search.c, the reference count for 2347 * @from is always decremented if it is not %NULL. So, as we need 2348 * to get all devices up to null, we need to do a get for the device 2349 */ 2350 pci_dev_get(pdev); 2351 2352 *prev = pdev; 2353 2354 return 0; 2355 } 2356 2357 /* 2358 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's 2359 * devices we want to reference for this driver. 2360 * @num_mc: pointer to the memory controllers count, to be incremented in case 2361 * of success. 2362 * @table: model specific table 2363 * 2364 * returns 0 in case of success or error code 2365 */ 2366 static int sbridge_get_all_devices(u8 *num_mc, 2367 const struct pci_id_table *table) 2368 { 2369 int i, rc; 2370 struct pci_dev *pdev = NULL; 2371 int allow_dups = 0; 2372 int multi_bus = 0; 2373 2374 if (table->type == KNIGHTS_LANDING) 2375 allow_dups = multi_bus = 1; 2376 while (table && table->descr) { 2377 for (i = 0; i < table->n_devs; i++) { 2378 if (!allow_dups || i == 0 || 2379 table->descr[i].dev_id != 2380 table->descr[i-1].dev_id) { 2381 pdev = NULL; 2382 } 2383 do { 2384 rc = sbridge_get_onedevice(&pdev, num_mc, 2385 table, i, multi_bus); 2386 if (rc < 0) { 2387 if (i == 0) { 2388 i = table->n_devs; 2389 break; 2390 } 2391 sbridge_put_all_devices(); 2392 return -ENODEV; 2393 } 2394 } while (pdev && !allow_dups); 2395 } 2396 table++; 2397 } 2398 2399 return 0; 2400 } 2401 2402 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, 2403 struct sbridge_dev *sbridge_dev) 2404 { 2405 struct sbridge_pvt *pvt = mci->pvt_info; 2406 struct pci_dev *pdev; 2407 u8 saw_chan_mask = 0; 2408 int i; 2409 2410 for (i = 0; i < sbridge_dev->n_devs; i++) { 2411 pdev = sbridge_dev->pdev[i]; 2412 if (!pdev) 2413 continue; 2414 2415 switch (pdev->device) { 2416 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0: 2417 pvt->pci_sad0 = pdev; 2418 break; 2419 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1: 2420 pvt->pci_sad1 = pdev; 2421 break; 2422 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR: 2423 pvt->pci_br0 = pdev; 2424 break; 2425 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0: 2426 pvt->pci_ha0 = pdev; 2427 break; 2428 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: 2429 pvt->pci_ta = pdev; 2430 break; 2431 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS: 2432 pvt->pci_ras = pdev; 2433 break; 2434 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0: 2435 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1: 2436 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2: 2437 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3: 2438 { 2439 int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0; 2440 pvt->pci_tad[id] = pdev; 2441 saw_chan_mask |= 1 << id; 2442 } 2443 break; 2444 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO: 2445 pvt->pci_ddrio = pdev; 2446 break; 2447 default: 2448 goto error; 2449 } 2450 2451 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n", 2452 pdev->vendor, pdev->device, 2453 sbridge_dev->bus, 2454 pdev); 2455 } 2456 2457 /* Check if everything were registered */ 2458 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 || 2459 !pvt->pci_ras || !pvt->pci_ta) 2460 goto enodev; 2461 2462 if (saw_chan_mask != 0x0f) 2463 goto enodev; 2464 return 0; 2465 2466 enodev: 2467 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 2468 return -ENODEV; 2469 2470 error: 2471 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n", 2472 PCI_VENDOR_ID_INTEL, pdev->device); 2473 return -EINVAL; 2474 } 2475 2476 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, 2477 struct sbridge_dev *sbridge_dev) 2478 { 2479 struct sbridge_pvt *pvt = mci->pvt_info; 2480 struct pci_dev *pdev; 2481 u8 saw_chan_mask = 0; 2482 int i; 2483 2484 for (i = 0; i < sbridge_dev->n_devs; i++) { 2485 pdev = sbridge_dev->pdev[i]; 2486 if (!pdev) 2487 continue; 2488 2489 switch (pdev->device) { 2490 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0: 2491 pvt->pci_ha0 = pdev; 2492 break; 2493 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: 2494 pvt->pci_ta = pdev; 2495 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: 2496 pvt->pci_ras = pdev; 2497 break; 2498 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0: 2499 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1: 2500 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2: 2501 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3: 2502 { 2503 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0; 2504 pvt->pci_tad[id] = pdev; 2505 saw_chan_mask |= 1 << id; 2506 } 2507 break; 2508 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0: 2509 pvt->pci_ddrio = pdev; 2510 break; 2511 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0: 2512 pvt->pci_ddrio = pdev; 2513 break; 2514 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD: 2515 pvt->pci_sad0 = pdev; 2516 break; 2517 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0: 2518 pvt->pci_br0 = pdev; 2519 break; 2520 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1: 2521 pvt->pci_br1 = pdev; 2522 break; 2523 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1: 2524 pvt->pci_ha1 = pdev; 2525 break; 2526 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0: 2527 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1: 2528 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2: 2529 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3: 2530 { 2531 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 4; 2532 pvt->pci_tad[id] = pdev; 2533 saw_chan_mask |= 1 << id; 2534 } 2535 break; 2536 default: 2537 goto error; 2538 } 2539 2540 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 2541 sbridge_dev->bus, 2542 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2543 pdev); 2544 } 2545 2546 /* Check if everything were registered */ 2547 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 || 2548 !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta) 2549 goto enodev; 2550 2551 if (saw_chan_mask != 0x0f && /* -EN */ 2552 saw_chan_mask != 0x33 && /* -EP */ 2553 saw_chan_mask != 0xff) /* -EX */ 2554 goto enodev; 2555 return 0; 2556 2557 enodev: 2558 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 2559 return -ENODEV; 2560 2561 error: 2562 sbridge_printk(KERN_ERR, 2563 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL, 2564 pdev->device); 2565 return -EINVAL; 2566 } 2567 2568 static int haswell_mci_bind_devs(struct mem_ctl_info *mci, 2569 struct sbridge_dev *sbridge_dev) 2570 { 2571 struct sbridge_pvt *pvt = mci->pvt_info; 2572 struct pci_dev *pdev; 2573 u8 saw_chan_mask = 0; 2574 int i; 2575 2576 /* there's only one device per system; not tied to any bus */ 2577 if (pvt->info.pci_vtd == NULL) 2578 /* result will be checked later */ 2579 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL, 2580 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC, 2581 NULL); 2582 2583 for (i = 0; i < sbridge_dev->n_devs; i++) { 2584 pdev = sbridge_dev->pdev[i]; 2585 if (!pdev) 2586 continue; 2587 2588 switch (pdev->device) { 2589 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0: 2590 pvt->pci_sad0 = pdev; 2591 break; 2592 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1: 2593 pvt->pci_sad1 = pdev; 2594 break; 2595 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: 2596 pvt->pci_ha0 = pdev; 2597 break; 2598 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA: 2599 pvt->pci_ta = pdev; 2600 break; 2601 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL: 2602 pvt->pci_ras = pdev; 2603 break; 2604 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0: 2605 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1: 2606 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2: 2607 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3: 2608 { 2609 int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0; 2610 2611 pvt->pci_tad[id] = pdev; 2612 saw_chan_mask |= 1 << id; 2613 } 2614 break; 2615 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0: 2616 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1: 2617 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2: 2618 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3: 2619 { 2620 int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 + 4; 2621 2622 pvt->pci_tad[id] = pdev; 2623 saw_chan_mask |= 1 << id; 2624 } 2625 break; 2626 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0: 2627 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1: 2628 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2: 2629 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3: 2630 if (!pvt->pci_ddrio) 2631 pvt->pci_ddrio = pdev; 2632 break; 2633 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1: 2634 pvt->pci_ha1 = pdev; 2635 break; 2636 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA: 2637 pvt->pci_ha1_ta = pdev; 2638 break; 2639 default: 2640 break; 2641 } 2642 2643 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 2644 sbridge_dev->bus, 2645 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2646 pdev); 2647 } 2648 2649 /* Check if everything were registered */ 2650 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 || 2651 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) 2652 goto enodev; 2653 2654 if (saw_chan_mask != 0x0f && /* -EN */ 2655 saw_chan_mask != 0x33 && /* -EP */ 2656 saw_chan_mask != 0xff) /* -EX */ 2657 goto enodev; 2658 return 0; 2659 2660 enodev: 2661 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 2662 return -ENODEV; 2663 } 2664 2665 static int broadwell_mci_bind_devs(struct mem_ctl_info *mci, 2666 struct sbridge_dev *sbridge_dev) 2667 { 2668 struct sbridge_pvt *pvt = mci->pvt_info; 2669 struct pci_dev *pdev; 2670 u8 saw_chan_mask = 0; 2671 int i; 2672 2673 /* there's only one device per system; not tied to any bus */ 2674 if (pvt->info.pci_vtd == NULL) 2675 /* result will be checked later */ 2676 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL, 2677 PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC, 2678 NULL); 2679 2680 for (i = 0; i < sbridge_dev->n_devs; i++) { 2681 pdev = sbridge_dev->pdev[i]; 2682 if (!pdev) 2683 continue; 2684 2685 switch (pdev->device) { 2686 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0: 2687 pvt->pci_sad0 = pdev; 2688 break; 2689 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1: 2690 pvt->pci_sad1 = pdev; 2691 break; 2692 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0: 2693 pvt->pci_ha0 = pdev; 2694 break; 2695 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA: 2696 pvt->pci_ta = pdev; 2697 break; 2698 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL: 2699 pvt->pci_ras = pdev; 2700 break; 2701 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0: 2702 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1: 2703 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2: 2704 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3: 2705 { 2706 int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0; 2707 pvt->pci_tad[id] = pdev; 2708 saw_chan_mask |= 1 << id; 2709 } 2710 break; 2711 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0: 2712 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1: 2713 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2: 2714 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3: 2715 { 2716 int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 + 4; 2717 pvt->pci_tad[id] = pdev; 2718 saw_chan_mask |= 1 << id; 2719 } 2720 break; 2721 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0: 2722 pvt->pci_ddrio = pdev; 2723 break; 2724 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1: 2725 pvt->pci_ha1 = pdev; 2726 break; 2727 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA: 2728 pvt->pci_ha1_ta = pdev; 2729 break; 2730 default: 2731 break; 2732 } 2733 2734 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 2735 sbridge_dev->bus, 2736 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2737 pdev); 2738 } 2739 2740 /* Check if everything were registered */ 2741 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 || 2742 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) 2743 goto enodev; 2744 2745 if (saw_chan_mask != 0x0f && /* -EN */ 2746 saw_chan_mask != 0x33 && /* -EP */ 2747 saw_chan_mask != 0xff) /* -EX */ 2748 goto enodev; 2749 return 0; 2750 2751 enodev: 2752 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 2753 return -ENODEV; 2754 } 2755 2756 static int knl_mci_bind_devs(struct mem_ctl_info *mci, 2757 struct sbridge_dev *sbridge_dev) 2758 { 2759 struct sbridge_pvt *pvt = mci->pvt_info; 2760 struct pci_dev *pdev; 2761 int dev, func; 2762 2763 int i; 2764 int devidx; 2765 2766 for (i = 0; i < sbridge_dev->n_devs; i++) { 2767 pdev = sbridge_dev->pdev[i]; 2768 if (!pdev) 2769 continue; 2770 2771 /* Extract PCI device and function. */ 2772 dev = (pdev->devfn >> 3) & 0x1f; 2773 func = pdev->devfn & 0x7; 2774 2775 switch (pdev->device) { 2776 case PCI_DEVICE_ID_INTEL_KNL_IMC_MC: 2777 if (dev == 8) 2778 pvt->knl.pci_mc0 = pdev; 2779 else if (dev == 9) 2780 pvt->knl.pci_mc1 = pdev; 2781 else { 2782 sbridge_printk(KERN_ERR, 2783 "Memory controller in unexpected place! (dev %d, fn %d)\n", 2784 dev, func); 2785 continue; 2786 } 2787 break; 2788 2789 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0: 2790 pvt->pci_sad0 = pdev; 2791 break; 2792 2793 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1: 2794 pvt->pci_sad1 = pdev; 2795 break; 2796 2797 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA: 2798 /* There are one of these per tile, and range from 2799 * 1.14.0 to 1.18.5. 2800 */ 2801 devidx = ((dev-14)*8)+func; 2802 2803 if (devidx < 0 || devidx >= KNL_MAX_CHAS) { 2804 sbridge_printk(KERN_ERR, 2805 "Caching and Home Agent in unexpected place! (dev %d, fn %d)\n", 2806 dev, func); 2807 continue; 2808 } 2809 2810 WARN_ON(pvt->knl.pci_cha[devidx] != NULL); 2811 2812 pvt->knl.pci_cha[devidx] = pdev; 2813 break; 2814 2815 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL: 2816 devidx = -1; 2817 2818 /* 2819 * MC0 channels 0-2 are device 9 function 2-4, 2820 * MC1 channels 3-5 are device 8 function 2-4. 2821 */ 2822 2823 if (dev == 9) 2824 devidx = func-2; 2825 else if (dev == 8) 2826 devidx = 3 + (func-2); 2827 2828 if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) { 2829 sbridge_printk(KERN_ERR, 2830 "DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n", 2831 dev, func); 2832 continue; 2833 } 2834 2835 WARN_ON(pvt->knl.pci_channel[devidx] != NULL); 2836 pvt->knl.pci_channel[devidx] = pdev; 2837 break; 2838 2839 case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM: 2840 pvt->knl.pci_mc_info = pdev; 2841 break; 2842 2843 case PCI_DEVICE_ID_INTEL_KNL_IMC_TA: 2844 pvt->pci_ta = pdev; 2845 break; 2846 2847 default: 2848 sbridge_printk(KERN_ERR, "Unexpected device %d\n", 2849 pdev->device); 2850 break; 2851 } 2852 } 2853 2854 if (!pvt->knl.pci_mc0 || !pvt->knl.pci_mc1 || 2855 !pvt->pci_sad0 || !pvt->pci_sad1 || 2856 !pvt->pci_ta) { 2857 goto enodev; 2858 } 2859 2860 for (i = 0; i < KNL_MAX_CHANNELS; i++) { 2861 if (!pvt->knl.pci_channel[i]) { 2862 sbridge_printk(KERN_ERR, "Missing channel %d\n", i); 2863 goto enodev; 2864 } 2865 } 2866 2867 for (i = 0; i < KNL_MAX_CHAS; i++) { 2868 if (!pvt->knl.pci_cha[i]) { 2869 sbridge_printk(KERN_ERR, "Missing CHA %d\n", i); 2870 goto enodev; 2871 } 2872 } 2873 2874 return 0; 2875 2876 enodev: 2877 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 2878 return -ENODEV; 2879 } 2880 2881 /**************************************************************************** 2882 Error check routines 2883 ****************************************************************************/ 2884 2885 /* 2886 * While Sandy Bridge has error count registers, SMI BIOS read values from 2887 * and resets the counters. So, they are not reliable for the OS to read 2888 * from them. So, we have no option but to just trust on whatever MCE is 2889 * telling us about the errors. 2890 */ 2891 static void sbridge_mce_output_error(struct mem_ctl_info *mci, 2892 const struct mce *m) 2893 { 2894 struct mem_ctl_info *new_mci; 2895 struct sbridge_pvt *pvt = mci->pvt_info; 2896 enum hw_event_mc_err_type tp_event; 2897 char *type, *optype, msg[256]; 2898 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); 2899 bool overflow = GET_BITFIELD(m->status, 62, 62); 2900 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); 2901 bool recoverable; 2902 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); 2903 u32 mscod = GET_BITFIELD(m->status, 16, 31); 2904 u32 errcode = GET_BITFIELD(m->status, 0, 15); 2905 u32 channel = GET_BITFIELD(m->status, 0, 3); 2906 u32 optypenum = GET_BITFIELD(m->status, 4, 6); 2907 long channel_mask, first_channel; 2908 u8 rank, socket, ha; 2909 int rc, dimm; 2910 char *area_type = NULL; 2911 2912 if (pvt->info.type != SANDY_BRIDGE) 2913 recoverable = true; 2914 else 2915 recoverable = GET_BITFIELD(m->status, 56, 56); 2916 2917 if (uncorrected_error) { 2918 if (ripv) { 2919 type = "FATAL"; 2920 tp_event = HW_EVENT_ERR_FATAL; 2921 } else { 2922 type = "NON_FATAL"; 2923 tp_event = HW_EVENT_ERR_UNCORRECTED; 2924 } 2925 } else { 2926 type = "CORRECTED"; 2927 tp_event = HW_EVENT_ERR_CORRECTED; 2928 } 2929 2930 /* 2931 * According with Table 15-9 of the Intel Architecture spec vol 3A, 2932 * memory errors should fit in this mask: 2933 * 000f 0000 1mmm cccc (binary) 2934 * where: 2935 * f = Correction Report Filtering Bit. If 1, subsequent errors 2936 * won't be shown 2937 * mmm = error type 2938 * cccc = channel 2939 * If the mask doesn't match, report an error to the parsing logic 2940 */ 2941 if (! ((errcode & 0xef80) == 0x80)) { 2942 optype = "Can't parse: it is not a mem"; 2943 } else { 2944 switch (optypenum) { 2945 case 0: 2946 optype = "generic undef request error"; 2947 break; 2948 case 1: 2949 optype = "memory read error"; 2950 break; 2951 case 2: 2952 optype = "memory write error"; 2953 break; 2954 case 3: 2955 optype = "addr/cmd error"; 2956 break; 2957 case 4: 2958 optype = "memory scrubbing error"; 2959 break; 2960 default: 2961 optype = "reserved"; 2962 break; 2963 } 2964 } 2965 2966 /* Only decode errors with an valid address (ADDRV) */ 2967 if (!GET_BITFIELD(m->status, 58, 58)) 2968 return; 2969 2970 if (pvt->info.type == KNIGHTS_LANDING) { 2971 if (channel == 14) { 2972 edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n", 2973 overflow ? " OVERFLOW" : "", 2974 (uncorrected_error && recoverable) 2975 ? " recoverable" : "", 2976 mscod, errcode, 2977 m->bank); 2978 } else { 2979 char A = *("A"); 2980 2981 /* 2982 * Reported channel is in range 0-2, so we can't map it 2983 * back to mc. To figure out mc we check machine check 2984 * bank register that reported this error. 2985 * bank15 means mc0 and bank16 means mc1. 2986 */ 2987 channel = knl_channel_remap(m->bank == 16, channel); 2988 channel_mask = 1 << channel; 2989 2990 snprintf(msg, sizeof(msg), 2991 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", 2992 overflow ? " OVERFLOW" : "", 2993 (uncorrected_error && recoverable) 2994 ? " recoverable" : " ", 2995 mscod, errcode, channel, A + channel); 2996 edac_mc_handle_error(tp_event, mci, core_err_cnt, 2997 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, 2998 channel, 0, -1, 2999 optype, msg); 3000 } 3001 return; 3002 } else { 3003 rc = get_memory_error_data(mci, m->addr, &socket, &ha, 3004 &channel_mask, &rank, &area_type, msg); 3005 } 3006 3007 if (rc < 0) 3008 goto err_parsing; 3009 new_mci = get_mci_for_node_id(socket); 3010 if (!new_mci) { 3011 strcpy(msg, "Error: socket got corrupted!"); 3012 goto err_parsing; 3013 } 3014 mci = new_mci; 3015 pvt = mci->pvt_info; 3016 3017 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS); 3018 3019 if (rank < 4) 3020 dimm = 0; 3021 else if (rank < 8) 3022 dimm = 1; 3023 else 3024 dimm = 2; 3025 3026 3027 /* 3028 * FIXME: On some memory configurations (mirror, lockstep), the 3029 * Memory Controller can't point the error to a single DIMM. The 3030 * EDAC core should be handling the channel mask, in order to point 3031 * to the group of dimm's where the error may be happening. 3032 */ 3033 if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg) 3034 channel = first_channel; 3035 3036 snprintf(msg, sizeof(msg), 3037 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d", 3038 overflow ? " OVERFLOW" : "", 3039 (uncorrected_error && recoverable) ? " recoverable" : "", 3040 area_type, 3041 mscod, errcode, 3042 socket, ha, 3043 channel_mask, 3044 rank); 3045 3046 edac_dbg(0, "%s\n", msg); 3047 3048 /* FIXME: need support for channel mask */ 3049 3050 if (channel == CHANNEL_UNSPECIFIED) 3051 channel = -1; 3052 3053 /* Call the helper to output message */ 3054 edac_mc_handle_error(tp_event, mci, core_err_cnt, 3055 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, 3056 4*ha+channel, dimm, -1, 3057 optype, msg); 3058 return; 3059 err_parsing: 3060 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, 3061 -1, -1, -1, 3062 msg, ""); 3063 3064 } 3065 3066 /* 3067 * Check that logging is enabled and that this is the right type 3068 * of error for us to handle. 3069 */ 3070 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, 3071 void *data) 3072 { 3073 struct mce *mce = (struct mce *)data; 3074 struct mem_ctl_info *mci; 3075 struct sbridge_pvt *pvt; 3076 char *type; 3077 3078 if (edac_get_report_status() == EDAC_REPORTING_DISABLED) 3079 return NOTIFY_DONE; 3080 3081 mci = get_mci_for_node_id(mce->socketid); 3082 if (!mci) 3083 return NOTIFY_DONE; 3084 pvt = mci->pvt_info; 3085 3086 /* 3087 * Just let mcelog handle it if the error is 3088 * outside the memory controller. A memory error 3089 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0. 3090 * bit 12 has an special meaning. 3091 */ 3092 if ((mce->status & 0xefff) >> 7 != 1) 3093 return NOTIFY_DONE; 3094 3095 if (mce->mcgstatus & MCG_STATUS_MCIP) 3096 type = "Exception"; 3097 else 3098 type = "Event"; 3099 3100 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); 3101 3102 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx " 3103 "Bank %d: %016Lx\n", mce->extcpu, type, 3104 mce->mcgstatus, mce->bank, mce->status); 3105 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc); 3106 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr); 3107 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc); 3108 3109 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET " 3110 "%u APIC %x\n", mce->cpuvendor, mce->cpuid, 3111 mce->time, mce->socketid, mce->apicid); 3112 3113 sbridge_mce_output_error(mci, mce); 3114 3115 /* Advice mcelog that the error were handled */ 3116 return NOTIFY_STOP; 3117 } 3118 3119 static struct notifier_block sbridge_mce_dec = { 3120 .notifier_call = sbridge_mce_check_error, 3121 .priority = MCE_PRIO_EDAC, 3122 }; 3123 3124 /**************************************************************************** 3125 EDAC register/unregister logic 3126 ****************************************************************************/ 3127 3128 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) 3129 { 3130 struct mem_ctl_info *mci = sbridge_dev->mci; 3131 struct sbridge_pvt *pvt; 3132 3133 if (unlikely(!mci || !mci->pvt_info)) { 3134 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev); 3135 3136 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n"); 3137 return; 3138 } 3139 3140 pvt = mci->pvt_info; 3141 3142 edac_dbg(0, "MC: mci = %p, dev = %p\n", 3143 mci, &sbridge_dev->pdev[0]->dev); 3144 3145 /* Remove MC sysfs nodes */ 3146 edac_mc_del_mc(mci->pdev); 3147 3148 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); 3149 kfree(mci->ctl_name); 3150 edac_mc_free(mci); 3151 sbridge_dev->mci = NULL; 3152 } 3153 3154 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) 3155 { 3156 struct mem_ctl_info *mci; 3157 struct edac_mc_layer layers[2]; 3158 struct sbridge_pvt *pvt; 3159 struct pci_dev *pdev = sbridge_dev->pdev[0]; 3160 int rc; 3161 3162 /* Check the number of active and not disabled channels */ 3163 rc = check_if_ecc_is_active(sbridge_dev->bus, type); 3164 if (unlikely(rc < 0)) 3165 return rc; 3166 3167 /* allocate a new MC control structure */ 3168 layers[0].type = EDAC_MC_LAYER_CHANNEL; 3169 layers[0].size = type == KNIGHTS_LANDING ? 3170 KNL_MAX_CHANNELS : NUM_CHANNELS; 3171 layers[0].is_virt_csrow = false; 3172 layers[1].type = EDAC_MC_LAYER_SLOT; 3173 layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS; 3174 layers[1].is_virt_csrow = true; 3175 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers, 3176 sizeof(*pvt)); 3177 3178 if (unlikely(!mci)) 3179 return -ENOMEM; 3180 3181 edac_dbg(0, "MC: mci = %p, dev = %p\n", 3182 mci, &pdev->dev); 3183 3184 pvt = mci->pvt_info; 3185 memset(pvt, 0, sizeof(*pvt)); 3186 3187 /* Associate sbridge_dev and mci for future usage */ 3188 pvt->sbridge_dev = sbridge_dev; 3189 sbridge_dev->mci = mci; 3190 3191 mci->mtype_cap = type == KNIGHTS_LANDING ? 3192 MEM_FLAG_DDR4 : MEM_FLAG_DDR3; 3193 mci->edac_ctl_cap = EDAC_FLAG_NONE; 3194 mci->edac_cap = EDAC_FLAG_NONE; 3195 mci->mod_name = "sbridge_edac.c"; 3196 mci->mod_ver = SBRIDGE_REVISION; 3197 mci->dev_name = pci_name(pdev); 3198 mci->ctl_page_to_phys = NULL; 3199 3200 pvt->info.type = type; 3201 switch (type) { 3202 case IVY_BRIDGE: 3203 pvt->info.rankcfgr = IB_RANK_CFG_A; 3204 pvt->info.get_tolm = ibridge_get_tolm; 3205 pvt->info.get_tohm = ibridge_get_tohm; 3206 pvt->info.dram_rule = ibridge_dram_rule; 3207 pvt->info.get_memory_type = get_memory_type; 3208 pvt->info.get_node_id = get_node_id; 3209 pvt->info.rir_limit = rir_limit; 3210 pvt->info.sad_limit = sad_limit; 3211 pvt->info.interleave_mode = interleave_mode; 3212 pvt->info.dram_attr = dram_attr; 3213 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 3214 pvt->info.interleave_list = ibridge_interleave_list; 3215 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 3216 pvt->info.interleave_pkg = ibridge_interleave_pkg; 3217 pvt->info.get_width = ibridge_get_width; 3218 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx); 3219 3220 /* Store pci devices at mci for faster access */ 3221 rc = ibridge_mci_bind_devs(mci, sbridge_dev); 3222 if (unlikely(rc < 0)) 3223 goto fail0; 3224 break; 3225 case SANDY_BRIDGE: 3226 pvt->info.rankcfgr = SB_RANK_CFG_A; 3227 pvt->info.get_tolm = sbridge_get_tolm; 3228 pvt->info.get_tohm = sbridge_get_tohm; 3229 pvt->info.dram_rule = sbridge_dram_rule; 3230 pvt->info.get_memory_type = get_memory_type; 3231 pvt->info.get_node_id = get_node_id; 3232 pvt->info.rir_limit = rir_limit; 3233 pvt->info.sad_limit = sad_limit; 3234 pvt->info.interleave_mode = interleave_mode; 3235 pvt->info.dram_attr = dram_attr; 3236 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule); 3237 pvt->info.interleave_list = sbridge_interleave_list; 3238 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); 3239 pvt->info.interleave_pkg = sbridge_interleave_pkg; 3240 pvt->info.get_width = sbridge_get_width; 3241 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); 3242 3243 /* Store pci devices at mci for faster access */ 3244 rc = sbridge_mci_bind_devs(mci, sbridge_dev); 3245 if (unlikely(rc < 0)) 3246 goto fail0; 3247 break; 3248 case HASWELL: 3249 /* rankcfgr isn't used */ 3250 pvt->info.get_tolm = haswell_get_tolm; 3251 pvt->info.get_tohm = haswell_get_tohm; 3252 pvt->info.dram_rule = ibridge_dram_rule; 3253 pvt->info.get_memory_type = haswell_get_memory_type; 3254 pvt->info.get_node_id = haswell_get_node_id; 3255 pvt->info.rir_limit = haswell_rir_limit; 3256 pvt->info.sad_limit = sad_limit; 3257 pvt->info.interleave_mode = interleave_mode; 3258 pvt->info.dram_attr = dram_attr; 3259 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 3260 pvt->info.interleave_list = ibridge_interleave_list; 3261 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 3262 pvt->info.interleave_pkg = ibridge_interleave_pkg; 3263 pvt->info.get_width = ibridge_get_width; 3264 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx); 3265 3266 /* Store pci devices at mci for faster access */ 3267 rc = haswell_mci_bind_devs(mci, sbridge_dev); 3268 if (unlikely(rc < 0)) 3269 goto fail0; 3270 break; 3271 case BROADWELL: 3272 /* rankcfgr isn't used */ 3273 pvt->info.get_tolm = haswell_get_tolm; 3274 pvt->info.get_tohm = haswell_get_tohm; 3275 pvt->info.dram_rule = ibridge_dram_rule; 3276 pvt->info.get_memory_type = haswell_get_memory_type; 3277 pvt->info.get_node_id = haswell_get_node_id; 3278 pvt->info.rir_limit = haswell_rir_limit; 3279 pvt->info.sad_limit = sad_limit; 3280 pvt->info.interleave_mode = interleave_mode; 3281 pvt->info.dram_attr = dram_attr; 3282 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 3283 pvt->info.interleave_list = ibridge_interleave_list; 3284 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 3285 pvt->info.interleave_pkg = ibridge_interleave_pkg; 3286 pvt->info.get_width = broadwell_get_width; 3287 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx); 3288 3289 /* Store pci devices at mci for faster access */ 3290 rc = broadwell_mci_bind_devs(mci, sbridge_dev); 3291 if (unlikely(rc < 0)) 3292 goto fail0; 3293 break; 3294 case KNIGHTS_LANDING: 3295 /* pvt->info.rankcfgr == ??? */ 3296 pvt->info.get_tolm = knl_get_tolm; 3297 pvt->info.get_tohm = knl_get_tohm; 3298 pvt->info.dram_rule = knl_dram_rule; 3299 pvt->info.get_memory_type = knl_get_memory_type; 3300 pvt->info.get_node_id = knl_get_node_id; 3301 pvt->info.rir_limit = NULL; 3302 pvt->info.sad_limit = knl_sad_limit; 3303 pvt->info.interleave_mode = knl_interleave_mode; 3304 pvt->info.dram_attr = dram_attr_knl; 3305 pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule); 3306 pvt->info.interleave_list = knl_interleave_list; 3307 pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list); 3308 pvt->info.interleave_pkg = ibridge_interleave_pkg; 3309 pvt->info.get_width = knl_get_width; 3310 mci->ctl_name = kasprintf(GFP_KERNEL, 3311 "Knights Landing Socket#%d", mci->mc_idx); 3312 3313 rc = knl_mci_bind_devs(mci, sbridge_dev); 3314 if (unlikely(rc < 0)) 3315 goto fail0; 3316 break; 3317 } 3318 3319 /* Get dimm basic config and the memory layout */ 3320 get_dimm_config(mci); 3321 get_memory_layout(mci); 3322 3323 /* record ptr to the generic device */ 3324 mci->pdev = &pdev->dev; 3325 3326 /* add this new MC control structure to EDAC's list of MCs */ 3327 if (unlikely(edac_mc_add_mc(mci))) { 3328 edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); 3329 rc = -EINVAL; 3330 goto fail0; 3331 } 3332 3333 return 0; 3334 3335 fail0: 3336 kfree(mci->ctl_name); 3337 edac_mc_free(mci); 3338 sbridge_dev->mci = NULL; 3339 return rc; 3340 } 3341 3342 #define ICPU(model, table) \ 3343 { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table } 3344 3345 static const struct x86_cpu_id sbridge_cpuids[] = { 3346 ICPU(INTEL_FAM6_SANDYBRIDGE_X, pci_dev_descr_sbridge_table), 3347 ICPU(INTEL_FAM6_IVYBRIDGE_X, pci_dev_descr_ibridge_table), 3348 ICPU(INTEL_FAM6_HASWELL_X, pci_dev_descr_haswell_table), 3349 ICPU(INTEL_FAM6_BROADWELL_X, pci_dev_descr_broadwell_table), 3350 ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table), 3351 ICPU(INTEL_FAM6_XEON_PHI_KNL, pci_dev_descr_knl_table), 3352 ICPU(INTEL_FAM6_XEON_PHI_KNM, pci_dev_descr_knl_table), 3353 { } 3354 }; 3355 MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids); 3356 3357 /* 3358 * sbridge_probe Get all devices and register memory controllers 3359 * present. 3360 * return: 3361 * 0 for FOUND a device 3362 * < 0 for error code 3363 */ 3364 3365 static int sbridge_probe(const struct x86_cpu_id *id) 3366 { 3367 int rc = -ENODEV; 3368 u8 mc, num_mc = 0; 3369 struct sbridge_dev *sbridge_dev; 3370 struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data; 3371 3372 /* get the pci devices we want to reserve for our use */ 3373 rc = sbridge_get_all_devices(&num_mc, ptable); 3374 3375 if (unlikely(rc < 0)) { 3376 edac_dbg(0, "couldn't get all devices\n"); 3377 goto fail0; 3378 } 3379 3380 mc = 0; 3381 3382 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 3383 edac_dbg(0, "Registering MC#%d (%d of %d)\n", 3384 mc, mc + 1, num_mc); 3385 3386 sbridge_dev->mc = mc++; 3387 rc = sbridge_register_mci(sbridge_dev, ptable->type); 3388 if (unlikely(rc < 0)) 3389 goto fail1; 3390 } 3391 3392 sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION); 3393 3394 return 0; 3395 3396 fail1: 3397 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) 3398 sbridge_unregister_mci(sbridge_dev); 3399 3400 sbridge_put_all_devices(); 3401 fail0: 3402 return rc; 3403 } 3404 3405 /* 3406 * sbridge_remove cleanup 3407 * 3408 */ 3409 static void sbridge_remove(void) 3410 { 3411 struct sbridge_dev *sbridge_dev; 3412 3413 edac_dbg(0, "\n"); 3414 3415 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) 3416 sbridge_unregister_mci(sbridge_dev); 3417 3418 /* Release PCI resources */ 3419 sbridge_put_all_devices(); 3420 } 3421 3422 /* 3423 * sbridge_init Module entry function 3424 * Try to initialize this module for its devices 3425 */ 3426 static int __init sbridge_init(void) 3427 { 3428 const struct x86_cpu_id *id; 3429 int rc; 3430 3431 edac_dbg(2, "\n"); 3432 3433 id = x86_match_cpu(sbridge_cpuids); 3434 if (!id) 3435 return -ENODEV; 3436 3437 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 3438 opstate_init(); 3439 3440 rc = sbridge_probe(id); 3441 3442 if (rc >= 0) { 3443 mce_register_decode_chain(&sbridge_mce_dec); 3444 if (edac_get_report_status() == EDAC_REPORTING_DISABLED) 3445 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n"); 3446 return 0; 3447 } 3448 3449 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", 3450 rc); 3451 3452 return rc; 3453 } 3454 3455 /* 3456 * sbridge_exit() Module exit function 3457 * Unregister the driver 3458 */ 3459 static void __exit sbridge_exit(void) 3460 { 3461 edac_dbg(2, "\n"); 3462 sbridge_remove(); 3463 mce_unregister_decode_chain(&sbridge_mce_dec); 3464 } 3465 3466 module_init(sbridge_init); 3467 module_exit(sbridge_exit); 3468 3469 module_param(edac_op_state, int, 0444); 3470 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 3471 3472 MODULE_LICENSE("GPL"); 3473 MODULE_AUTHOR("Mauro Carvalho Chehab"); 3474 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); 3475 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - " 3476 SBRIDGE_REVISION); 3477