1 /* 2 * Intel 7300 class Memory Controllers kernel module (Clarksboro) 3 * 4 * This file may be distributed under the terms of the 5 * GNU General Public License version 2 only. 6 * 7 * Copyright (c) 2010 by: 8 * Mauro Carvalho Chehab <mchehab@redhat.com> 9 * 10 * Red Hat Inc. http://www.redhat.com 11 * 12 * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet 13 * http://www.intel.com/Assets/PDF/datasheet/318082.pdf 14 * 15 * TODO: The chipset allow checking for PCI Express errors also. Currently, 16 * the driver covers only memory error errors 17 * 18 * This driver uses "csrows" EDAC attribute to represent DIMM slot# 19 */ 20 21 #include <linux/module.h> 22 #include <linux/init.h> 23 #include <linux/pci.h> 24 #include <linux/pci_ids.h> 25 #include <linux/slab.h> 26 #include <linux/edac.h> 27 #include <linux/mmzone.h> 28 29 #include "edac_core.h" 30 31 /* 32 * Alter this version for the I7300 module when modifications are made 33 */ 34 #define I7300_REVISION " Ver: 1.0.0 " __DATE__ 35 36 #define EDAC_MOD_STR "i7300_edac" 37 38 #define i7300_printk(level, fmt, arg...) \ 39 edac_printk(level, "i7300", fmt, ##arg) 40 41 #define i7300_mc_printk(mci, level, fmt, arg...) \ 42 edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg) 43 44 /* 45 * Memory topology is organized as: 46 * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0) 47 * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0) 48 * Each channel can have to 8 DIMM sets (called as SLOTS) 49 * Slots should generally be filled in pairs 50 * Except on Single Channel mode of operation 51 * just slot 0/channel0 filled on this mode 52 * On normal operation mode, the two channels on a branch should be 53 * filled together for the same SLOT# 54 * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four 55 * channels on both branches should be filled 56 */ 57 58 /* Limits for i7300 */ 59 #define MAX_SLOTS 8 60 #define MAX_BRANCHES 2 61 #define MAX_CH_PER_BRANCH 2 62 #define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES) 63 #define MAX_MIR 3 64 65 #define to_channel(ch, branch) ((((branch)) << 1) | (ch)) 66 67 #define to_csrow(slot, ch, branch) \ 68 (to_channel(ch, branch) | ((slot) << 2)) 69 70 /* 71 * I7300 devices 72 * All 3 functions of Device 16 (0,1,2) share the SAME DID and 73 * uses PCI_DEVICE_ID_INTEL_I7300_MCH_ERR for device 16 (0,1,2), 74 * PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 and PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 75 * for device 21 (0,1). 76 */ 77 78 /**************************************************** 79 * i7300 Register definitions for memory enumberation 80 ****************************************************/ 81 82 /* 83 * Device 16, 84 * Function 0: System Address (not documented) 85 * Function 1: Memory Branch Map, Control, Errors Register 86 */ 87 88 /* OFFSETS for Function 0 */ 89 #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */ 90 #define MAXCH 0x56 /* Max Channel Number */ 91 #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */ 92 93 /* OFFSETS for Function 1 */ 94 #define MC_SETTINGS 0x40 95 #define IS_MIRRORED(mc) ((mc) & (1 << 16)) 96 #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5)) 97 #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31)) 98 #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8)) 99 100 #define MC_SETTINGS_A 0x58 101 #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14)) 102 103 #define TOLM 0x6C 104 #define REDMEMB 0x7C 105 106 #define MIR0 0x80 107 #define MIR1 0x84 108 #define MIR2 0x88 109 110 /* 111 * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available 112 * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it 113 * seems that we cannot use this information directly for the same usage. 114 * Each memory slot may have up to 2 AMB interfaces, one for income and another 115 * for outcome interface to the next slot. 116 * For now, the driver just stores the AMB present registers, but rely only at 117 * the MTR info to detect memory. 118 * Datasheet is also not clear about how to map each AMBPRESENT registers to 119 * one of the 4 available channels. 120 */ 121 #define AMBPRESENT_0 0x64 122 #define AMBPRESENT_1 0x66 123 124 const static u16 mtr_regs [MAX_SLOTS] = { 125 0x80, 0x84, 0x88, 0x8c, 126 0x82, 0x86, 0x8a, 0x8e 127 }; 128 129 /* Defines to extract the vaious fields from the 130 * MTRx - Memory Technology Registers 131 */ 132 #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8)) 133 #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7)) 134 #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4) 135 #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4) 136 #define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0) 137 #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3) 138 #define MTR_DRAM_BANKS_ADDR_BITS 2 139 #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13) 140 #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) 141 #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) 142 143 #ifdef CONFIG_EDAC_DEBUG 144 /* MTR NUMROW */ 145 static const char *numrow_toString[] = { 146 "8,192 - 13 rows", 147 "16,384 - 14 rows", 148 "32,768 - 15 rows", 149 "65,536 - 16 rows" 150 }; 151 152 /* MTR NUMCOL */ 153 static const char *numcol_toString[] = { 154 "1,024 - 10 columns", 155 "2,048 - 11 columns", 156 "4,096 - 12 columns", 157 "reserved" 158 }; 159 #endif 160 161 /************************************************ 162 * i7300 Register definitions for error detection 163 ************************************************/ 164 165 /* 166 * Device 16.1: FBD Error Registers 167 */ 168 #define FERR_FAT_FBD 0x98 169 static const char *ferr_fat_fbd_name[] = { 170 [22] = "Non-Redundant Fast Reset Timeout", 171 [2] = ">Tmid Thermal event with intelligent throttling disabled", 172 [1] = "Memory or FBD configuration CRC read error", 173 [0] = "Memory Write error on non-redundant retry or " 174 "FBD configuration Write error on retry", 175 }; 176 #define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28)) 177 #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)) 178 179 #define FERR_NF_FBD 0xa0 180 static const char *ferr_nf_fbd_name[] = { 181 [24] = "DIMM-Spare Copy Completed", 182 [23] = "DIMM-Spare Copy Initiated", 183 [22] = "Redundant Fast Reset Timeout", 184 [21] = "Memory Write error on redundant retry", 185 [18] = "SPD protocol Error", 186 [17] = "FBD Northbound parity error on FBD Sync Status", 187 [16] = "Correctable Patrol Data ECC", 188 [15] = "Correctable Resilver- or Spare-Copy Data ECC", 189 [14] = "Correctable Mirrored Demand Data ECC", 190 [13] = "Correctable Non-Mirrored Demand Data ECC", 191 [11] = "Memory or FBD configuration CRC read error", 192 [10] = "FBD Configuration Write error on first attempt", 193 [9] = "Memory Write error on first attempt", 194 [8] = "Non-Aliased Uncorrectable Patrol Data ECC", 195 [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", 196 [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC", 197 [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC", 198 [4] = "Aliased Uncorrectable Patrol Data ECC", 199 [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", 200 [2] = "Aliased Uncorrectable Mirrored Demand Data ECC", 201 [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", 202 [0] = "Uncorrectable Data ECC on Replay", 203 }; 204 #define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28)) 205 #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\ 206 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\ 207 (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\ 208 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\ 209 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\ 210 (1 << 1) | (1 << 0)) 211 212 #define EMASK_FBD 0xa8 213 #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\ 214 (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\ 215 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\ 216 (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\ 217 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\ 218 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\ 219 (1 << 1) | (1 << 0)) 220 221 /* 222 * Device 16.2: Global Error Registers 223 */ 224 225 #define FERR_GLOBAL_HI 0x48 226 static const char *ferr_global_hi_name[] = { 227 [3] = "FSB 3 Fatal Error", 228 [2] = "FSB 2 Fatal Error", 229 [1] = "FSB 1 Fatal Error", 230 [0] = "FSB 0 Fatal Error", 231 }; 232 #define ferr_global_hi_is_fatal(errno) 1 233 234 #define FERR_GLOBAL_LO 0x40 235 static const char *ferr_global_lo_name[] = { 236 [31] = "Internal MCH Fatal Error", 237 [30] = "Intel QuickData Technology Device Fatal Error", 238 [29] = "FSB1 Fatal Error", 239 [28] = "FSB0 Fatal Error", 240 [27] = "FBD Channel 3 Fatal Error", 241 [26] = "FBD Channel 2 Fatal Error", 242 [25] = "FBD Channel 1 Fatal Error", 243 [24] = "FBD Channel 0 Fatal Error", 244 [23] = "PCI Express Device 7Fatal Error", 245 [22] = "PCI Express Device 6 Fatal Error", 246 [21] = "PCI Express Device 5 Fatal Error", 247 [20] = "PCI Express Device 4 Fatal Error", 248 [19] = "PCI Express Device 3 Fatal Error", 249 [18] = "PCI Express Device 2 Fatal Error", 250 [17] = "PCI Express Device 1 Fatal Error", 251 [16] = "ESI Fatal Error", 252 [15] = "Internal MCH Non-Fatal Error", 253 [14] = "Intel QuickData Technology Device Non Fatal Error", 254 [13] = "FSB1 Non-Fatal Error", 255 [12] = "FSB 0 Non-Fatal Error", 256 [11] = "FBD Channel 3 Non-Fatal Error", 257 [10] = "FBD Channel 2 Non-Fatal Error", 258 [9] = "FBD Channel 1 Non-Fatal Error", 259 [8] = "FBD Channel 0 Non-Fatal Error", 260 [7] = "PCI Express Device 7 Non-Fatal Error", 261 [6] = "PCI Express Device 6 Non-Fatal Error", 262 [5] = "PCI Express Device 5 Non-Fatal Error", 263 [4] = "PCI Express Device 4 Non-Fatal Error", 264 [3] = "PCI Express Device 3 Non-Fatal Error", 265 [2] = "PCI Express Device 2 Non-Fatal Error", 266 [1] = "PCI Express Device 1 Non-Fatal Error", 267 [0] = "ESI Non-Fatal Error", 268 }; 269 #define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1) 270 271 #define NRECMEMA 0xbe 272 #define NRECMEMA_BANK(v) (((v) >> 12) & 7) 273 #define NRECMEMA_RANK(v) (((v) >> 8) & 15) 274 275 #define NRECMEMB 0xc0 276 #define NRECMEMB_IS_WR(v) ((v) & (1 << 31)) 277 #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff) 278 #define NRECMEMB_RAS(v) ((v) & 0xffff) 279 280 281 /* Device name and register DID (Device ID) */ 282 struct i7300_dev_info { 283 const char *ctl_name; /* name for this device */ 284 u16 fsb_mapping_errors; /* DID for the branchmap,control */ 285 }; 286 287 /* Table of devices attributes supported by this driver */ 288 static const struct i7300_dev_info i7300_devs[] = { 289 { 290 .ctl_name = "I7300", 291 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, 292 }, 293 }; 294 295 struct i7300_dimm_info { 296 int megabytes; /* size, 0 means not present */ 297 }; 298 299 /* driver private data structure */ 300 struct i7300_pvt { 301 struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */ 302 struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */ 303 struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */ 304 struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */ 305 306 u16 tolm; /* top of low memory */ 307 u64 ambase; /* AMB BAR */ 308 309 u32 mc_settings; /* Report several settings */ 310 u32 mc_settings_a; 311 312 u16 mir[MAX_MIR]; /* Memory Interleave Reg*/ 313 314 u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */ 315 u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */ 316 317 /* DIMM information matrix, allocating architecture maximums */ 318 struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS]; 319 320 /* Temporary buffer for use when preparing error messages */ 321 char *tmp_prt_buffer; 322 }; 323 324 /* FIXME: Why do we need to have this static? */ 325 static struct edac_pci_ctl_info *i7300_pci; 326 327 /******************************************** 328 * i7300 Functions related to error detection 329 ********************************************/ 330 331 const char *get_err_from_table(const char *table[], int size, int pos) 332 { 333 if (pos >= size) 334 return "Reserved"; 335 336 return table[pos]; 337 } 338 339 #define GET_ERR_FROM_TABLE(table, pos) \ 340 get_err_from_table(table, ARRAY_SIZE(table), pos) 341 342 /* 343 * i7300_process_error_global Retrieve the hardware error information from 344 * the hardware and cache it in the 'info' 345 * structure 346 */ 347 static void i7300_process_error_global(struct mem_ctl_info *mci) 348 { 349 struct i7300_pvt *pvt; 350 u32 errnum, value; 351 unsigned long errors; 352 const char *specific; 353 bool is_fatal; 354 355 pvt = mci->pvt_info; 356 357 /* read in the 1st FATAL error register */ 358 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 359 FERR_GLOBAL_HI, &value); 360 if (unlikely(value)) { 361 errors = value; 362 errnum = find_first_bit(&errors, 363 ARRAY_SIZE(ferr_global_hi_name)); 364 specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum); 365 is_fatal = ferr_global_hi_is_fatal(errnum); 366 367 /* Clear the error bit */ 368 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 369 FERR_GLOBAL_HI, value); 370 371 goto error_global; 372 } 373 374 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 375 FERR_GLOBAL_LO, &value); 376 if (unlikely(value)) { 377 errors = value; 378 errnum = find_first_bit(&errors, 379 ARRAY_SIZE(ferr_global_lo_name)); 380 specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum); 381 is_fatal = ferr_global_lo_is_fatal(errnum); 382 383 /* Clear the error bit */ 384 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 385 FERR_GLOBAL_LO, value); 386 387 goto error_global; 388 } 389 return; 390 391 error_global: 392 i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n", 393 is_fatal ? "Fatal" : "NOT fatal", specific); 394 } 395 396 /* 397 * i7300_process_fbd_error Retrieve the hardware error information from 398 * the hardware and cache it in the 'info' 399 * structure 400 */ 401 static void i7300_process_fbd_error(struct mem_ctl_info *mci) 402 { 403 struct i7300_pvt *pvt; 404 u32 errnum, value; 405 u16 val16; 406 int branch, bank, rank, cas, ras; 407 unsigned long errors; 408 const char *specific; 409 bool is_fatal, is_wr; 410 411 pvt = mci->pvt_info; 412 413 /* read in the 1st FATAL error register */ 414 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 415 FERR_FAT_FBD, &value); 416 if (unlikely(value & FERR_FAT_FBD_ERR_MASK)) { 417 errors = value & FERR_FAT_FBD_ERR_MASK ; 418 errnum = find_first_bit(&errors, 419 ARRAY_SIZE(ferr_fat_fbd_name)); 420 specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum); 421 is_fatal = 1; 422 423 branch = (GET_FBD_FAT_IDX(value) == 2) ? 1 : 0; 424 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, 425 NRECMEMA, &val16); 426 bank = NRECMEMA_BANK(val16); 427 rank = NRECMEMA_RANK(val16); 428 429 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 430 NRECMEMB, &value); 431 432 is_wr = NRECMEMB_IS_WR(value); 433 cas = NRECMEMB_CAS(value); 434 ras = NRECMEMB_RAS(value); 435 436 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, 437 "FATAL (Branch=%d DRAM-Bank=%d %s " 438 "RAS=%d CAS=%d Err=0x%lx (%s))", 439 branch >> 1, bank, 440 is_wr ? "RDWR" : "RD", 441 ras, cas, 442 errors, specific); 443 444 /* Call the helper to output message */ 445 edac_mc_handle_fbd_ue(mci, rank, branch << 1, 446 (branch << 1) + 1, 447 pvt->tmp_prt_buffer); 448 return; 449 } 450 451 /* read in the 1st NON-FATAL error register */ 452 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 453 FERR_NF_FBD, &value); 454 if (unlikely(value & FERR_NF_FBD_ERR_MASK)) { 455 errors = value & FERR_NF_FBD_ERR_MASK; 456 errnum = find_first_bit(&errors, 457 ARRAY_SIZE(ferr_nf_fbd_name)); 458 specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum); 459 is_fatal = 0; 460 461 /* Clear the error bit */ 462 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 463 FERR_GLOBAL_LO, value); 464 465 goto error_fbd; 466 } 467 return; 468 469 error_fbd: 470 471 i7300_mc_printk(mci, KERN_EMERG, "%s FBD error on branch %d: %s\n", 472 is_fatal ? "Fatal" : "NOT fatal", branch, specific); 473 } 474 475 /* 476 * i7300_check_error Retrieve the hardware error information from 477 * the hardware and cache it in the 'info' 478 * structure 479 */ 480 static void i7300_check_error(struct mem_ctl_info *mci) 481 { 482 i7300_process_error_global(mci); 483 i7300_process_fbd_error(mci); 484 }; 485 486 /* 487 * i7300_clear_error Retrieve any error from the hardware 488 * but do NOT process that error. 489 * Used for 'clearing' out of previous errors 490 * Called by the Core module. 491 */ 492 static void i7300_clear_error(struct mem_ctl_info *mci) 493 { 494 struct i7300_pvt *pvt = mci->pvt_info; 495 u32 value; 496 /* 497 * All error values are RWC - we need to read and write 1 to the 498 * bit that we want to cleanup 499 */ 500 501 /* Clear global error registers */ 502 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 503 FERR_GLOBAL_HI, &value); 504 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 505 FERR_GLOBAL_HI, value); 506 507 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 508 FERR_GLOBAL_LO, &value); 509 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 510 FERR_GLOBAL_LO, value); 511 512 /* Clear FBD error registers */ 513 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 514 FERR_FAT_FBD, &value); 515 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 516 FERR_FAT_FBD, value); 517 518 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 519 FERR_NF_FBD, &value); 520 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 521 FERR_NF_FBD, value); 522 } 523 524 /* 525 * i7300_enable_error_reporting 526 * Turn on the memory reporting features of the hardware 527 */ 528 static void i7300_enable_error_reporting(struct mem_ctl_info *mci) 529 { 530 struct i7300_pvt *pvt = mci->pvt_info; 531 u32 fbd_error_mask; 532 533 /* Read the FBD Error Mask Register */ 534 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 535 EMASK_FBD, &fbd_error_mask); 536 537 /* Enable with a '0' */ 538 fbd_error_mask &= ~(EMASK_FBD_ERR_MASK); 539 540 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 541 EMASK_FBD, fbd_error_mask); 542 } 543 544 /************************************************ 545 * i7300 Functions related to memory enumberation 546 ************************************************/ 547 548 /* 549 * determine_mtr(pvt, csrow, channel) 550 * 551 * return the proper MTR register as determine by the csrow and desired channel 552 */ 553 static int decode_mtr(struct i7300_pvt *pvt, 554 int slot, int ch, int branch, 555 struct i7300_dimm_info *dinfo, 556 struct csrow_info *p_csrow) 557 { 558 int mtr, ans, addrBits, channel; 559 560 channel = to_channel(ch, branch); 561 562 mtr = pvt->mtr[slot][branch]; 563 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0; 564 565 debugf2("\tMTR%d CH%d: DIMMs are %s (mtr)\n", 566 slot, channel, 567 ans ? "Present" : "NOT Present"); 568 569 /* Determine if there is a DIMM present in this DIMM slot */ 570 571 #if 0 572 if (!amb_present || !ans) 573 return 0; 574 #else 575 if (!ans) 576 return 0; 577 #endif 578 579 /* Start with the number of bits for a Bank 580 * on the DRAM */ 581 addrBits = MTR_DRAM_BANKS_ADDR_BITS; 582 /* Add thenumber of ROW bits */ 583 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); 584 /* add the number of COLUMN bits */ 585 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); 586 /* add the number of RANK bits */ 587 addrBits += MTR_DIMM_RANKS(mtr); 588 589 addrBits += 6; /* add 64 bits per DIMM */ 590 addrBits -= 20; /* divide by 2^^20 */ 591 addrBits -= 3; /* 8 bits per bytes */ 592 593 dinfo->megabytes = 1 << addrBits; 594 595 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 596 597 debugf2("\t\tELECTRICAL THROTTLING is %s\n", 598 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); 599 600 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); 601 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single"); 602 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); 603 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 604 debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes); 605 606 p_csrow->grain = 8; 607 p_csrow->nr_pages = dinfo->megabytes << 8; 608 p_csrow->mtype = MEM_FB_DDR2; 609 610 /* 611 * The type of error detection actually depends of the 612 * mode of operation. When it is just one single memory chip, at 613 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code. 614 * In normal or mirrored mode, it uses Lockstep mode, 615 * with the possibility of using an extended algorithm for x8 memories 616 * See datasheet Sections 7.3.6 to 7.3.8 617 */ 618 619 if (IS_SINGLE_MODE(pvt->mc_settings_a)) { 620 p_csrow->edac_mode = EDAC_SECDED; 621 debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); 622 } else { 623 debugf2("\t\tECC code is on Lockstep mode\n"); 624 if (MTR_DRAM_WIDTH(mtr) == 8) 625 p_csrow->edac_mode = EDAC_S8ECD8ED; 626 else 627 p_csrow->edac_mode = EDAC_S4ECD4ED; 628 } 629 630 /* ask what device type on this row */ 631 if (MTR_DRAM_WIDTH(mtr) == 8) { 632 debugf2("\t\tScrub algorithm for x8 is on %s mode\n", 633 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? 634 "enhanced" : "normal"); 635 636 p_csrow->dtype = DEV_X8; 637 } else 638 p_csrow->dtype = DEV_X4; 639 640 return mtr; 641 } 642 643 /* 644 * print_dimm_size 645 * 646 * also will output a DIMM matrix map, if debug is enabled, for viewing 647 * how the DIMMs are populated 648 */ 649 static void print_dimm_size(struct i7300_pvt *pvt) 650 { 651 struct i7300_dimm_info *dinfo; 652 char *p; 653 int space, n; 654 int channel, slot; 655 656 space = PAGE_SIZE; 657 p = pvt->tmp_prt_buffer; 658 659 n = snprintf(p, space, " "); 660 p += n; 661 space -= n; 662 for (channel = 0; channel < MAX_CHANNELS; channel++) { 663 n = snprintf(p, space, "channel %d | ", channel); 664 p += n; 665 space -= n; 666 } 667 debugf2("%s\n", pvt->tmp_prt_buffer); 668 p = pvt->tmp_prt_buffer; 669 space = PAGE_SIZE; 670 n = snprintf(p, space, "-------------------------------" 671 "------------------------------"); 672 p += n; 673 space -= n; 674 debugf2("%s\n", pvt->tmp_prt_buffer); 675 p = pvt->tmp_prt_buffer; 676 space = PAGE_SIZE; 677 678 for (slot = 0; slot < MAX_SLOTS; slot++) { 679 n = snprintf(p, space, "csrow/SLOT %d ", slot); 680 p += n; 681 space -= n; 682 683 for (channel = 0; channel < MAX_CHANNELS; channel++) { 684 dinfo = &pvt->dimm_info[slot][channel]; 685 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); 686 p += n; 687 space -= n; 688 } 689 690 debugf2("%s\n", pvt->tmp_prt_buffer); 691 p = pvt->tmp_prt_buffer; 692 space = PAGE_SIZE; 693 } 694 695 n = snprintf(p, space, "-------------------------------" 696 "------------------------------"); 697 p += n; 698 space -= n; 699 debugf2("%s\n", pvt->tmp_prt_buffer); 700 p = pvt->tmp_prt_buffer; 701 space = PAGE_SIZE; 702 } 703 704 /* 705 * i7300_init_csrows Initialize the 'csrows' table within 706 * the mci control structure with the 707 * addressing of memory. 708 * 709 * return: 710 * 0 success 711 * 1 no actual memory found on this MC 712 */ 713 static int i7300_init_csrows(struct mem_ctl_info *mci) 714 { 715 struct i7300_pvt *pvt; 716 struct i7300_dimm_info *dinfo; 717 struct csrow_info *p_csrow; 718 int empty; 719 int mtr; 720 int ch, branch, slot, channel; 721 722 pvt = mci->pvt_info; 723 724 empty = 1; /* Assume NO memory */ 725 726 debugf2("Memory Technology Registers:\n"); 727 728 /* Get the AMB present registers for the four channels */ 729 for (branch = 0; branch < MAX_BRANCHES; branch++) { 730 /* Read and dump branch 0's MTRs */ 731 channel = to_channel(0, branch); 732 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], AMBPRESENT_0, 733 &pvt->ambpresent[channel]); 734 debugf2("\t\tAMB-present CH%d = 0x%x:\n", 735 channel, pvt->ambpresent[channel]); 736 737 channel = to_channel(1, branch); 738 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], AMBPRESENT_1, 739 &pvt->ambpresent[channel]); 740 debugf2("\t\tAMB-present CH%d = 0x%x:\n", 741 channel, pvt->ambpresent[channel]); 742 } 743 744 /* Get the set of MTR[0-7] regs by each branch */ 745 for (slot = 0; slot < MAX_SLOTS; slot++) { 746 int where = mtr_regs[slot]; 747 for (branch = 0; branch < MAX_BRANCHES; branch++) { 748 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 749 where, 750 &pvt->mtr[slot][branch]); 751 for (ch = 0; ch < MAX_BRANCHES; ch++) { 752 int channel = to_channel(ch, branch); 753 754 dinfo = &pvt->dimm_info[slot][channel]; 755 p_csrow = &mci->csrows[slot]; 756 757 mtr = decode_mtr(pvt, slot, ch, branch, 758 dinfo, p_csrow); 759 /* if no DIMMS on this row, continue */ 760 if (!MTR_DIMMS_PRESENT(mtr)) 761 continue; 762 763 p_csrow->csrow_idx = slot; 764 765 /* FAKE OUT VALUES, FIXME */ 766 p_csrow->first_page = 0 + slot * 20; 767 p_csrow->last_page = 9 + slot * 20; 768 p_csrow->page_mask = 0xfff; 769 770 empty = 0; 771 } 772 } 773 } 774 775 return empty; 776 } 777 778 static void decode_mir(int mir_no, u16 mir[MAX_MIR]) 779 { 780 if (mir[mir_no] & 3) 781 debugf2("MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n", 782 mir_no, 783 (mir[mir_no] >> 4) & 0xfff, 784 (mir[mir_no] & 1) ? "B0" : "", 785 (mir[mir_no] & 2) ? "B1": ""); 786 } 787 788 /* 789 * i7300_get_mc_regs read in the necessary registers and 790 * cache locally 791 * 792 * Fills in the private data members 793 */ 794 static int i7300_get_mc_regs(struct mem_ctl_info *mci) 795 { 796 struct i7300_pvt *pvt; 797 u32 actual_tolm; 798 int i, rc; 799 800 pvt = mci->pvt_info; 801 802 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE, 803 (u32 *) &pvt->ambase); 804 805 debugf2("AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); 806 807 /* Get the Branch Map regs */ 808 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm); 809 pvt->tolm >>= 12; 810 debugf2("TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, 811 pvt->tolm); 812 813 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); 814 debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", 815 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); 816 817 /* Get memory controller settings */ 818 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS, 819 &pvt->mc_settings); 820 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A, 821 &pvt->mc_settings_a); 822 823 if (IS_SINGLE_MODE(pvt->mc_settings_a)) 824 debugf0("Memory controller operating on single mode\n"); 825 else 826 debugf0("Memory controller operating on %s mode\n", 827 IS_MIRRORED(pvt->mc_settings) ? "mirrored" : "non-mirrored"); 828 829 debugf0("Error detection is %s\n", 830 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); 831 debugf0("Retry is %s\n", 832 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); 833 834 /* Get Memory Interleave Range registers */ 835 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0, &pvt->mir[0]); 836 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1, &pvt->mir[1]); 837 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2, &pvt->mir[2]); 838 839 /* Decode the MIR regs */ 840 for (i = 0; i < MAX_MIR; i++) 841 decode_mir(i, pvt->mir); 842 843 rc = i7300_init_csrows(mci); 844 if (rc < 0) 845 return rc; 846 847 /* Go and determine the size of each DIMM and place in an 848 * orderly matrix */ 849 print_dimm_size(pvt); 850 851 return 0; 852 } 853 854 /************************************************* 855 * i7300 Functions related to device probe/release 856 *************************************************/ 857 858 /* 859 * i7300_put_devices 'put' all the devices that we have 860 * reserved via 'get' 861 */ 862 static void i7300_put_devices(struct mem_ctl_info *mci) 863 { 864 struct i7300_pvt *pvt; 865 int branch; 866 867 pvt = mci->pvt_info; 868 869 /* Decrement usage count for devices */ 870 for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++) 871 pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]); 872 pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs); 873 pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map); 874 } 875 876 /* 877 * i7300_get_devices Find and perform 'get' operation on the MCH's 878 * device/functions we want to reference for this driver 879 * 880 * Need to 'get' device 16 func 1 and func 2 881 */ 882 static int i7300_get_devices(struct mem_ctl_info *mci, int dev_idx) 883 { 884 struct i7300_pvt *pvt; 885 struct pci_dev *pdev; 886 887 pvt = mci->pvt_info; 888 889 /* Attempt to 'get' the MCH register we want */ 890 pdev = NULL; 891 while (!pvt->pci_dev_16_1_fsb_addr_map || !pvt->pci_dev_16_2_fsb_err_regs) { 892 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 893 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev); 894 if (!pdev) { 895 /* End of list, leave */ 896 i7300_printk(KERN_ERR, 897 "'system address,Process Bus' " 898 "device not found:" 899 "vendor 0x%x device 0x%x ERR funcs " 900 "(broken BIOS?)\n", 901 PCI_VENDOR_ID_INTEL, 902 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR); 903 goto error; 904 } 905 906 /* Store device 16 funcs 1 and 2 */ 907 switch (PCI_FUNC(pdev->devfn)) { 908 case 1: 909 pvt->pci_dev_16_1_fsb_addr_map = pdev; 910 break; 911 case 2: 912 pvt->pci_dev_16_2_fsb_err_regs = pdev; 913 break; 914 } 915 } 916 917 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", 918 pci_name(pvt->pci_dev_16_0_fsb_ctlr), 919 pvt->pci_dev_16_0_fsb_ctlr->vendor, pvt->pci_dev_16_0_fsb_ctlr->device); 920 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", 921 pci_name(pvt->pci_dev_16_1_fsb_addr_map), 922 pvt->pci_dev_16_1_fsb_addr_map->vendor, pvt->pci_dev_16_1_fsb_addr_map->device); 923 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", 924 pci_name(pvt->pci_dev_16_2_fsb_err_regs), 925 pvt->pci_dev_16_2_fsb_err_regs->vendor, pvt->pci_dev_16_2_fsb_err_regs->device); 926 927 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL, 928 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0, 929 NULL); 930 if (!pvt->pci_dev_2x_0_fbd_branch[0]) { 931 i7300_printk(KERN_ERR, 932 "MC: 'BRANCH 0' device not found:" 933 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", 934 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0); 935 goto error; 936 } 937 938 pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL, 939 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1, 940 NULL); 941 if (!pvt->pci_dev_2x_0_fbd_branch[1]) { 942 i7300_printk(KERN_ERR, 943 "MC: 'BRANCH 1' device not found:" 944 "vendor 0x%x device 0x%x Func 0 " 945 "(broken BIOS?)\n", 946 PCI_VENDOR_ID_INTEL, 947 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1); 948 goto error; 949 } 950 951 return 0; 952 953 error: 954 i7300_put_devices(mci); 955 return -ENODEV; 956 } 957 958 /* 959 * i7300_probe1 Probe for ONE instance of device to see if it is 960 * present. 961 * return: 962 * 0 for FOUND a device 963 * < 0 for error code 964 */ 965 static int i7300_probe1(struct pci_dev *pdev, int dev_idx) 966 { 967 struct mem_ctl_info *mci; 968 struct i7300_pvt *pvt; 969 int num_channels; 970 int num_dimms_per_channel; 971 int num_csrows; 972 973 if (dev_idx >= ARRAY_SIZE(i7300_devs)) 974 return -EINVAL; 975 976 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 977 __func__, 978 pdev->bus->number, 979 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 980 981 /* We only are looking for func 0 of the set */ 982 if (PCI_FUNC(pdev->devfn) != 0) 983 return -ENODEV; 984 985 /* As we don't have a motherboard identification routine to determine 986 * actual number of slots/dimms per channel, we thus utilize the 987 * resource as specified by the chipset. Thus, we might have 988 * have more DIMMs per channel than actually on the mobo, but this 989 * allows the driver to support upto the chipset max, without 990 * some fancy mobo determination. 991 */ 992 num_dimms_per_channel = MAX_SLOTS; 993 num_channels = MAX_CHANNELS; 994 num_csrows = MAX_SLOTS * MAX_CHANNELS; 995 996 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", 997 __func__, num_channels, num_dimms_per_channel, num_csrows); 998 999 /* allocate a new MC control structure */ 1000 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); 1001 1002 if (mci == NULL) 1003 return -ENOMEM; 1004 1005 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1006 1007 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1008 1009 pvt = mci->pvt_info; 1010 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */ 1011 1012 pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); 1013 if (!pvt->tmp_prt_buffer) { 1014 edac_mc_free(mci); 1015 return -ENOMEM; 1016 } 1017 1018 /* 'get' the pci devices we want to reserve for our use */ 1019 if (i7300_get_devices(mci, dev_idx)) 1020 goto fail0; 1021 1022 mci->mc_idx = 0; 1023 mci->mtype_cap = MEM_FLAG_FB_DDR2; 1024 mci->edac_ctl_cap = EDAC_FLAG_NONE; 1025 mci->edac_cap = EDAC_FLAG_NONE; 1026 mci->mod_name = "i7300_edac.c"; 1027 mci->mod_ver = I7300_REVISION; 1028 mci->ctl_name = i7300_devs[dev_idx].ctl_name; 1029 mci->dev_name = pci_name(pdev); 1030 mci->ctl_page_to_phys = NULL; 1031 1032 /* Set the function pointer to an actual operation function */ 1033 mci->edac_check = i7300_check_error; 1034 1035 /* initialize the MC control structure 'csrows' table 1036 * with the mapping and control information */ 1037 if (i7300_get_mc_regs(mci)) { 1038 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" 1039 " because i7300_init_csrows() returned nonzero " 1040 "value\n"); 1041 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ 1042 } else { 1043 debugf1("MC: Enable error reporting now\n"); 1044 i7300_enable_error_reporting(mci); 1045 } 1046 1047 /* add this new MC control structure to EDAC's list of MCs */ 1048 if (edac_mc_add_mc(mci)) { 1049 debugf0("MC: " __FILE__ 1050 ": %s(): failed edac_mc_add_mc()\n", __func__); 1051 /* FIXME: perhaps some code should go here that disables error 1052 * reporting if we just enabled it 1053 */ 1054 goto fail1; 1055 } 1056 1057 i7300_clear_error(mci); 1058 1059 /* allocating generic PCI control info */ 1060 i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 1061 if (!i7300_pci) { 1062 printk(KERN_WARNING 1063 "%s(): Unable to create PCI control\n", 1064 __func__); 1065 printk(KERN_WARNING 1066 "%s(): PCI error report via EDAC not setup\n", 1067 __func__); 1068 } 1069 1070 return 0; 1071 1072 /* Error exit unwinding stack */ 1073 fail1: 1074 1075 i7300_put_devices(mci); 1076 1077 fail0: 1078 kfree(pvt->tmp_prt_buffer); 1079 edac_mc_free(mci); 1080 return -ENODEV; 1081 } 1082 1083 /* 1084 * i7300_init_one constructor for one instance of device 1085 * 1086 * returns: 1087 * negative on error 1088 * count (>= 0) 1089 */ 1090 static int __devinit i7300_init_one(struct pci_dev *pdev, 1091 const struct pci_device_id *id) 1092 { 1093 int rc; 1094 1095 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1096 1097 /* wake up device */ 1098 rc = pci_enable_device(pdev); 1099 if (rc == -EIO) 1100 return rc; 1101 1102 /* now probe and enable the device */ 1103 return i7300_probe1(pdev, id->driver_data); 1104 } 1105 1106 /* 1107 * i7300_remove_one destructor for one instance of device 1108 * 1109 */ 1110 static void __devexit i7300_remove_one(struct pci_dev *pdev) 1111 { 1112 struct mem_ctl_info *mci; 1113 char *tmp; 1114 1115 debugf0(__FILE__ ": %s()\n", __func__); 1116 1117 if (i7300_pci) 1118 edac_pci_release_generic_ctl(i7300_pci); 1119 1120 mci = edac_mc_del_mc(&pdev->dev); 1121 if (!mci) 1122 return; 1123 1124 tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer; 1125 1126 /* retrieve references to resources, and free those resources */ 1127 i7300_put_devices(mci); 1128 1129 kfree(tmp); 1130 edac_mc_free(mci); 1131 } 1132 1133 /* 1134 * pci_device_id table for which devices we are looking for 1135 * 1136 * The "E500P" device is the first device supported. 1137 */ 1138 static const struct pci_device_id i7300_pci_tbl[] __devinitdata = { 1139 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, 1140 {0,} /* 0 terminated list. */ 1141 }; 1142 1143 MODULE_DEVICE_TABLE(pci, i7300_pci_tbl); 1144 1145 /* 1146 * i7300_driver pci_driver structure for this module 1147 * 1148 */ 1149 static struct pci_driver i7300_driver = { 1150 .name = "i7300_edac", 1151 .probe = i7300_init_one, 1152 .remove = __devexit_p(i7300_remove_one), 1153 .id_table = i7300_pci_tbl, 1154 }; 1155 1156 /* 1157 * i7300_init Module entry function 1158 * Try to initialize this module for its devices 1159 */ 1160 static int __init i7300_init(void) 1161 { 1162 int pci_rc; 1163 1164 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1165 1166 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1167 opstate_init(); 1168 1169 pci_rc = pci_register_driver(&i7300_driver); 1170 1171 return (pci_rc < 0) ? pci_rc : 0; 1172 } 1173 1174 /* 1175 * i7300_exit() Module exit function 1176 * Unregister the driver 1177 */ 1178 static void __exit i7300_exit(void) 1179 { 1180 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1181 pci_unregister_driver(&i7300_driver); 1182 } 1183 1184 module_init(i7300_init); 1185 module_exit(i7300_exit); 1186 1187 MODULE_LICENSE("GPL"); 1188 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); 1189 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); 1190 MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - " 1191 I7300_REVISION); 1192 1193 module_param(edac_op_state, int, 0444); 1194 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1195