1 /* 2 * Intel 3200/3210 Memory Controller kernel module 3 * Copyright (C) 2008-2009 Akamai Technologies, Inc. 4 * Portions by Hitoshi Mitake <h.mitake@gmail.com>. 5 * 6 * This file may be distributed under the terms of the 7 * GNU General Public License. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/pci.h> 13 #include <linux/pci_ids.h> 14 #include <linux/edac.h> 15 #include <linux/io.h> 16 #include "edac_core.h" 17 18 #include <asm-generic/io-64-nonatomic-lo-hi.h> 19 20 #define I3200_REVISION "1.1" 21 22 #define EDAC_MOD_STR "i3200_edac" 23 24 #define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0 25 26 #define I3200_DIMMS 4 27 #define I3200_RANKS 8 28 #define I3200_RANKS_PER_CHANNEL 4 29 #define I3200_CHANNELS 2 30 31 /* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */ 32 33 #define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ 34 #define I3200_MCHBAR_HIGH 0x4c 35 #define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ 36 #define I3200_MMR_WINDOW_SIZE 16384 37 38 #define I3200_TOM 0xa0 /* Top of Memory (16b) 39 * 40 * 15:10 reserved 41 * 9:0 total populated physical memory 42 */ 43 #define I3200_TOM_MASK 0x3ff /* bits 9:0 */ 44 #define I3200_TOM_SHIFT 26 /* 64MiB grain */ 45 46 #define I3200_ERRSTS 0xc8 /* Error Status Register (16b) 47 * 48 * 15 reserved 49 * 14 Isochronous TBWRR Run Behind FIFO Full 50 * (ITCV) 51 * 13 Isochronous TBWRR Run Behind FIFO Put 52 * (ITSTV) 53 * 12 reserved 54 * 11 MCH Thermal Sensor Event 55 * for SMI/SCI/SERR (GTSE) 56 * 10 reserved 57 * 9 LOCK to non-DRAM Memory Flag (LCKF) 58 * 8 reserved 59 * 7 DRAM Throttle Flag (DTF) 60 * 6:2 reserved 61 * 1 Multi-bit DRAM ECC Error Flag (DMERR) 62 * 0 Single-bit DRAM ECC Error Flag (DSERR) 63 */ 64 #define I3200_ERRSTS_UE 0x0002 65 #define I3200_ERRSTS_CE 0x0001 66 #define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE) 67 68 69 /* Intel MMIO register space - device 0 function 0 - MMR space */ 70 71 #define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4) 72 * 73 * 15:10 reserved 74 * 9:0 Channel 0 DRAM Rank Boundary Address 75 */ 76 #define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */ 77 #define I3200_DRB_MASK 0x3ff /* bits 9:0 */ 78 #define I3200_DRB_SHIFT 26 /* 64MiB grain */ 79 80 #define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b) 81 * 82 * 63:48 Error Column Address (ERRCOL) 83 * 47:32 Error Row Address (ERRROW) 84 * 31:29 Error Bank Address (ERRBANK) 85 * 28:27 Error Rank Address (ERRRANK) 86 * 26:24 reserved 87 * 23:16 Error Syndrome (ERRSYND) 88 * 15: 2 reserved 89 * 1 Multiple Bit Error Status (MERRSTS) 90 * 0 Correctable Error Status (CERRSTS) 91 */ 92 #define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */ 93 #define I3200_ECCERRLOG_CE 0x1 94 #define I3200_ECCERRLOG_UE 0x2 95 #define I3200_ECCERRLOG_RANK_BITS 0x18000000 96 #define I3200_ECCERRLOG_RANK_SHIFT 27 97 #define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000 98 #define I3200_ECCERRLOG_SYNDROME_SHIFT 16 99 #define I3200_CAPID0 0xe0 /* P.95 of spec for details */ 100 101 struct i3200_priv { 102 void __iomem *window; 103 }; 104 105 static int nr_channels; 106 107 static int how_many_channels(struct pci_dev *pdev) 108 { 109 unsigned char capid0_8b; /* 8th byte of CAPID0 */ 110 111 pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); 112 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ 113 edac_dbg(0, "In single channel mode\n"); 114 return 1; 115 } else { 116 edac_dbg(0, "In dual channel mode\n"); 117 return 2; 118 } 119 } 120 121 static unsigned long eccerrlog_syndrome(u64 log) 122 { 123 return (log & I3200_ECCERRLOG_SYNDROME_BITS) >> 124 I3200_ECCERRLOG_SYNDROME_SHIFT; 125 } 126 127 static int eccerrlog_row(int channel, u64 log) 128 { 129 u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >> 130 I3200_ECCERRLOG_RANK_SHIFT); 131 return rank | (channel * I3200_RANKS_PER_CHANNEL); 132 } 133 134 enum i3200_chips { 135 I3200 = 0, 136 }; 137 138 struct i3200_dev_info { 139 const char *ctl_name; 140 }; 141 142 struct i3200_error_info { 143 u16 errsts; 144 u16 errsts2; 145 u64 eccerrlog[I3200_CHANNELS]; 146 }; 147 148 static const struct i3200_dev_info i3200_devs[] = { 149 [I3200] = { 150 .ctl_name = "i3200" 151 }, 152 }; 153 154 static struct pci_dev *mci_pdev; 155 static int i3200_registered = 1; 156 157 158 static void i3200_clear_error_info(struct mem_ctl_info *mci) 159 { 160 struct pci_dev *pdev; 161 162 pdev = to_pci_dev(mci->pdev); 163 164 /* 165 * Clear any error bits. 166 * (Yes, we really clear bits by writing 1 to them.) 167 */ 168 pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS, 169 I3200_ERRSTS_BITS); 170 } 171 172 static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci, 173 struct i3200_error_info *info) 174 { 175 struct pci_dev *pdev; 176 struct i3200_priv *priv = mci->pvt_info; 177 void __iomem *window = priv->window; 178 179 pdev = to_pci_dev(mci->pdev); 180 181 /* 182 * This is a mess because there is no atomic way to read all the 183 * registers at once and the registers can transition from CE being 184 * overwritten by UE. 185 */ 186 pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts); 187 if (!(info->errsts & I3200_ERRSTS_BITS)) 188 return; 189 190 info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); 191 if (nr_channels == 2) 192 info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); 193 194 pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2); 195 196 /* 197 * If the error is the same for both reads then the first set 198 * of reads is valid. If there is a change then there is a CE 199 * with no info and the second set of reads is valid and 200 * should be UE info. 201 */ 202 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { 203 info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); 204 if (nr_channels == 2) 205 info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); 206 } 207 208 i3200_clear_error_info(mci); 209 } 210 211 static void i3200_process_error_info(struct mem_ctl_info *mci, 212 struct i3200_error_info *info) 213 { 214 int channel; 215 u64 log; 216 217 if (!(info->errsts & I3200_ERRSTS_BITS)) 218 return; 219 220 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { 221 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, 222 -1, -1, -1, "UE overwrote CE", ""); 223 info->errsts = info->errsts2; 224 } 225 226 for (channel = 0; channel < nr_channels; channel++) { 227 log = info->eccerrlog[channel]; 228 if (log & I3200_ECCERRLOG_UE) { 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 230 0, 0, 0, 231 eccerrlog_row(channel, log), 232 -1, -1, 233 "i3000 UE", ""); 234 } else if (log & I3200_ECCERRLOG_CE) { 235 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 236 0, 0, eccerrlog_syndrome(log), 237 eccerrlog_row(channel, log), 238 -1, -1, 239 "i3000 UE", ""); 240 } 241 } 242 } 243 244 static void i3200_check(struct mem_ctl_info *mci) 245 { 246 struct i3200_error_info info; 247 248 edac_dbg(1, "MC%d\n", mci->mc_idx); 249 i3200_get_and_clear_error_info(mci, &info); 250 i3200_process_error_info(mci, &info); 251 } 252 253 254 void __iomem *i3200_map_mchbar(struct pci_dev *pdev) 255 { 256 union { 257 u64 mchbar; 258 struct { 259 u32 mchbar_low; 260 u32 mchbar_high; 261 }; 262 } u; 263 void __iomem *window; 264 265 pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low); 266 pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high); 267 u.mchbar &= I3200_MCHBAR_MASK; 268 269 if (u.mchbar != (resource_size_t)u.mchbar) { 270 printk(KERN_ERR 271 "i3200: mmio space beyond accessible range (0x%llx)\n", 272 (unsigned long long)u.mchbar); 273 return NULL; 274 } 275 276 window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE); 277 if (!window) 278 printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n", 279 (unsigned long long)u.mchbar); 280 281 return window; 282 } 283 284 285 static void i3200_get_drbs(void __iomem *window, 286 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) 287 { 288 int i; 289 290 for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) { 291 drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK; 292 drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK; 293 } 294 } 295 296 static bool i3200_is_stacked(struct pci_dev *pdev, 297 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) 298 { 299 u16 tom; 300 301 pci_read_config_word(pdev, I3200_TOM, &tom); 302 tom &= I3200_TOM_MASK; 303 304 return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom; 305 } 306 307 static unsigned long drb_to_nr_pages( 308 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked, 309 int channel, int rank) 310 { 311 int n; 312 313 n = drbs[channel][rank]; 314 if (rank > 0) 315 n -= drbs[channel][rank - 1]; 316 if (stacked && (channel == 1) && 317 drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1]) 318 n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1]; 319 320 n <<= (I3200_DRB_SHIFT - PAGE_SHIFT); 321 return n; 322 } 323 324 static int i3200_probe1(struct pci_dev *pdev, int dev_idx) 325 { 326 int rc; 327 int i, j; 328 struct mem_ctl_info *mci = NULL; 329 struct edac_mc_layer layers[2]; 330 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]; 331 bool stacked; 332 void __iomem *window; 333 struct i3200_priv *priv; 334 335 edac_dbg(0, "MC:\n"); 336 337 window = i3200_map_mchbar(pdev); 338 if (!window) 339 return -ENODEV; 340 341 i3200_get_drbs(window, drbs); 342 nr_channels = how_many_channels(pdev); 343 344 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 345 layers[0].size = I3200_DIMMS; 346 layers[0].is_virt_csrow = true; 347 layers[1].type = EDAC_MC_LAYER_CHANNEL; 348 layers[1].size = nr_channels; 349 layers[1].is_virt_csrow = false; 350 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 351 sizeof(struct i3200_priv)); 352 if (!mci) 353 return -ENOMEM; 354 355 edac_dbg(3, "MC: init mci\n"); 356 357 mci->pdev = &pdev->dev; 358 mci->mtype_cap = MEM_FLAG_DDR2; 359 360 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 361 mci->edac_cap = EDAC_FLAG_SECDED; 362 363 mci->mod_name = EDAC_MOD_STR; 364 mci->mod_ver = I3200_REVISION; 365 mci->ctl_name = i3200_devs[dev_idx].ctl_name; 366 mci->dev_name = pci_name(pdev); 367 mci->edac_check = i3200_check; 368 mci->ctl_page_to_phys = NULL; 369 priv = mci->pvt_info; 370 priv->window = window; 371 372 stacked = i3200_is_stacked(pdev, drbs); 373 374 /* 375 * The dram rank boundary (DRB) reg values are boundary addresses 376 * for each DRAM rank with a granularity of 64MB. DRB regs are 377 * cumulative; the last one will contain the total memory 378 * contained in all ranks. 379 */ 380 for (i = 0; i < mci->nr_csrows; i++) { 381 unsigned long nr_pages; 382 struct csrow_info *csrow = mci->csrows[i]; 383 384 nr_pages = drb_to_nr_pages(drbs, stacked, 385 i / I3200_RANKS_PER_CHANNEL, 386 i % I3200_RANKS_PER_CHANNEL); 387 388 if (nr_pages == 0) 389 continue; 390 391 for (j = 0; j < nr_channels; j++) { 392 struct dimm_info *dimm = csrow->channels[j]->dimm; 393 394 dimm->nr_pages = nr_pages / nr_channels; 395 dimm->grain = nr_pages << PAGE_SHIFT; 396 dimm->mtype = MEM_DDR2; 397 dimm->dtype = DEV_UNKNOWN; 398 dimm->edac_mode = EDAC_UNKNOWN; 399 } 400 } 401 402 i3200_clear_error_info(mci); 403 404 rc = -ENODEV; 405 if (edac_mc_add_mc(mci)) { 406 edac_dbg(3, "MC: failed edac_mc_add_mc()\n"); 407 goto fail; 408 } 409 410 /* get this far and it's successful */ 411 edac_dbg(3, "MC: success\n"); 412 return 0; 413 414 fail: 415 iounmap(window); 416 if (mci) 417 edac_mc_free(mci); 418 419 return rc; 420 } 421 422 static int __devinit i3200_init_one(struct pci_dev *pdev, 423 const struct pci_device_id *ent) 424 { 425 int rc; 426 427 edac_dbg(0, "MC:\n"); 428 429 if (pci_enable_device(pdev) < 0) 430 return -EIO; 431 432 rc = i3200_probe1(pdev, ent->driver_data); 433 if (!mci_pdev) 434 mci_pdev = pci_dev_get(pdev); 435 436 return rc; 437 } 438 439 static void __devexit i3200_remove_one(struct pci_dev *pdev) 440 { 441 struct mem_ctl_info *mci; 442 struct i3200_priv *priv; 443 444 edac_dbg(0, "\n"); 445 446 mci = edac_mc_del_mc(&pdev->dev); 447 if (!mci) 448 return; 449 450 priv = mci->pvt_info; 451 iounmap(priv->window); 452 453 edac_mc_free(mci); 454 } 455 456 static DEFINE_PCI_DEVICE_TABLE(i3200_pci_tbl) = { 457 { 458 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 459 I3200}, 460 { 461 0, 462 } /* 0 terminated list. */ 463 }; 464 465 MODULE_DEVICE_TABLE(pci, i3200_pci_tbl); 466 467 static struct pci_driver i3200_driver = { 468 .name = EDAC_MOD_STR, 469 .probe = i3200_init_one, 470 .remove = __devexit_p(i3200_remove_one), 471 .id_table = i3200_pci_tbl, 472 }; 473 474 static int __init i3200_init(void) 475 { 476 int pci_rc; 477 478 edac_dbg(3, "MC:\n"); 479 480 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 481 opstate_init(); 482 483 pci_rc = pci_register_driver(&i3200_driver); 484 if (pci_rc < 0) 485 goto fail0; 486 487 if (!mci_pdev) { 488 i3200_registered = 0; 489 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 490 PCI_DEVICE_ID_INTEL_3200_HB, NULL); 491 if (!mci_pdev) { 492 edac_dbg(0, "i3200 pci_get_device fail\n"); 493 pci_rc = -ENODEV; 494 goto fail1; 495 } 496 497 pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); 498 if (pci_rc < 0) { 499 edac_dbg(0, "i3200 init fail\n"); 500 pci_rc = -ENODEV; 501 goto fail1; 502 } 503 } 504 505 return 0; 506 507 fail1: 508 pci_unregister_driver(&i3200_driver); 509 510 fail0: 511 if (mci_pdev) 512 pci_dev_put(mci_pdev); 513 514 return pci_rc; 515 } 516 517 static void __exit i3200_exit(void) 518 { 519 edac_dbg(3, "MC:\n"); 520 521 pci_unregister_driver(&i3200_driver); 522 if (!i3200_registered) { 523 i3200_remove_one(mci_pdev); 524 pci_dev_put(mci_pdev); 525 } 526 } 527 528 module_init(i3200_init); 529 module_exit(i3200_exit); 530 531 MODULE_LICENSE("GPL"); 532 MODULE_AUTHOR("Akamai Technologies, Inc."); 533 MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers"); 534 535 module_param(edac_op_state, int, 0444); 536 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 537