1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Intel 7300 class Memory Controllers kernel module (Clarksboro) 4 * 5 * Copyright (c) 2010 by: 6 * Mauro Carvalho Chehab 7 * 8 * Red Hat Inc. https://www.redhat.com 9 * 10 * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet 11 * http://www.intel.com/Assets/PDF/datasheet/318082.pdf 12 * 13 * TODO: The chipset allow checking for PCI Express errors also. Currently, 14 * the driver covers only memory error errors 15 * 16 * This driver uses "csrows" EDAC attribute to represent DIMM slot# 17 */ 18 19 #include <linux/module.h> 20 #include <linux/init.h> 21 #include <linux/pci.h> 22 #include <linux/pci_ids.h> 23 #include <linux/slab.h> 24 #include <linux/edac.h> 25 #include <linux/mmzone.h> 26 27 #include "edac_module.h" 28 29 /* 30 * Alter this version for the I7300 module when modifications are made 31 */ 32 #define I7300_REVISION " Ver: 1.0.0" 33 34 #define EDAC_MOD_STR "i7300_edac" 35 36 #define i7300_printk(level, fmt, arg...) \ 37 edac_printk(level, "i7300", fmt, ##arg) 38 39 #define i7300_mc_printk(mci, level, fmt, arg...) \ 40 edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg) 41 42 /*********************************************** 43 * i7300 Limit constants Structs and static vars 44 ***********************************************/ 45 46 /* 47 * Memory topology is organized as: 48 * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0) 49 * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0) 50 * Each channel can have to 8 DIMM sets (called as SLOTS) 51 * Slots should generally be filled in pairs 52 * Except on Single Channel mode of operation 53 * just slot 0/channel0 filled on this mode 54 * On normal operation mode, the two channels on a branch should be 55 * filled together for the same SLOT# 56 * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four 57 * channels on both branches should be filled 58 */ 59 60 /* Limits for i7300 */ 61 #define MAX_SLOTS 8 62 #define MAX_BRANCHES 2 63 #define MAX_CH_PER_BRANCH 2 64 #define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES) 65 #define MAX_MIR 3 66 67 #define to_channel(ch, branch) ((((branch)) << 1) | (ch)) 68 69 #define to_csrow(slot, ch, branch) \ 70 (to_channel(ch, branch) | ((slot) << 2)) 71 72 /* Device name and register DID (Device ID) */ 73 struct i7300_dev_info { 74 const char *ctl_name; /* name for this device */ 75 u16 fsb_mapping_errors; /* DID for the branchmap,control */ 76 }; 77 78 /* Table of devices attributes supported by this driver */ 79 static const struct i7300_dev_info i7300_devs[] = { 80 { 81 .ctl_name = "I7300", 82 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, 83 }, 84 }; 85 86 struct i7300_dimm_info { 87 int megabytes; /* size, 0 means not present */ 88 }; 89 90 /* driver private data structure */ 91 struct i7300_pvt { 92 struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */ 93 struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */ 94 struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */ 95 struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */ 96 97 u16 tolm; /* top of low memory */ 98 u64 ambase; /* AMB BAR */ 99 100 u32 mc_settings; /* Report several settings */ 101 u32 mc_settings_a; 102 103 u16 mir[MAX_MIR]; /* Memory Interleave Reg*/ 104 105 u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */ 106 u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */ 107 108 /* DIMM information matrix, allocating architecture maximums */ 109 struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS]; 110 111 /* Temporary buffer for use when preparing error messages */ 112 char *tmp_prt_buffer; 113 }; 114 115 /* FIXME: Why do we need to have this static? */ 116 static struct edac_pci_ctl_info *i7300_pci; 117 118 /*************************************************** 119 * i7300 Register definitions for memory enumeration 120 ***************************************************/ 121 122 /* 123 * Device 16, 124 * Function 0: System Address (not documented) 125 * Function 1: Memory Branch Map, Control, Errors Register 126 */ 127 128 /* OFFSETS for Function 0 */ 129 #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */ 130 #define MAXCH 0x56 /* Max Channel Number */ 131 #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */ 132 133 /* OFFSETS for Function 1 */ 134 #define MC_SETTINGS 0x40 135 #define IS_MIRRORED(mc) ((mc) & (1 << 16)) 136 #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5)) 137 #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31)) 138 #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8)) 139 140 #define MC_SETTINGS_A 0x58 141 #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14)) 142 143 #define TOLM 0x6C 144 145 #define MIR0 0x80 146 #define MIR1 0x84 147 #define MIR2 0x88 148 149 /* 150 * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available 151 * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it 152 * seems that we cannot use this information directly for the same usage. 153 * Each memory slot may have up to 2 AMB interfaces, one for income and another 154 * for outcome interface to the next slot. 155 * For now, the driver just stores the AMB present registers, but rely only at 156 * the MTR info to detect memory. 157 * Datasheet is also not clear about how to map each AMBPRESENT registers to 158 * one of the 4 available channels. 159 */ 160 #define AMBPRESENT_0 0x64 161 #define AMBPRESENT_1 0x66 162 163 static const u16 mtr_regs[MAX_SLOTS] = { 164 0x80, 0x84, 0x88, 0x8c, 165 0x82, 0x86, 0x8a, 0x8e 166 }; 167 168 /* 169 * Defines to extract the vaious fields from the 170 * MTRx - Memory Technology Registers 171 */ 172 #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8)) 173 #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7)) 174 #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4) 175 #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4) 176 #define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0) 177 #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3) 178 #define MTR_DRAM_BANKS_ADDR_BITS 2 179 #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13) 180 #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) 181 #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) 182 183 /************************************************ 184 * i7300 Register definitions for error detection 185 ************************************************/ 186 187 /* 188 * Device 16.1: FBD Error Registers 189 */ 190 #define FERR_FAT_FBD 0x98 191 static const char *ferr_fat_fbd_name[] = { 192 [22] = "Non-Redundant Fast Reset Timeout", 193 [2] = ">Tmid Thermal event with intelligent throttling disabled", 194 [1] = "Memory or FBD configuration CRC read error", 195 [0] = "Memory Write error on non-redundant retry or " 196 "FBD configuration Write error on retry", 197 }; 198 #define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3) 199 #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22)) 200 201 #define FERR_NF_FBD 0xa0 202 static const char *ferr_nf_fbd_name[] = { 203 [24] = "DIMM-Spare Copy Completed", 204 [23] = "DIMM-Spare Copy Initiated", 205 [22] = "Redundant Fast Reset Timeout", 206 [21] = "Memory Write error on redundant retry", 207 [18] = "SPD protocol Error", 208 [17] = "FBD Northbound parity error on FBD Sync Status", 209 [16] = "Correctable Patrol Data ECC", 210 [15] = "Correctable Resilver- or Spare-Copy Data ECC", 211 [14] = "Correctable Mirrored Demand Data ECC", 212 [13] = "Correctable Non-Mirrored Demand Data ECC", 213 [11] = "Memory or FBD configuration CRC read error", 214 [10] = "FBD Configuration Write error on first attempt", 215 [9] = "Memory Write error on first attempt", 216 [8] = "Non-Aliased Uncorrectable Patrol Data ECC", 217 [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", 218 [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC", 219 [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC", 220 [4] = "Aliased Uncorrectable Patrol Data ECC", 221 [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", 222 [2] = "Aliased Uncorrectable Mirrored Demand Data ECC", 223 [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", 224 [0] = "Uncorrectable Data ECC on Replay", 225 }; 226 #define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3) 227 #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\ 228 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\ 229 (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\ 230 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\ 231 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\ 232 (1 << 1) | (1 << 0)) 233 234 #define EMASK_FBD 0xa8 235 #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\ 236 (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\ 237 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\ 238 (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\ 239 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\ 240 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\ 241 (1 << 1) | (1 << 0)) 242 243 /* 244 * Device 16.2: Global Error Registers 245 */ 246 247 #define FERR_GLOBAL_HI 0x48 248 static const char *ferr_global_hi_name[] = { 249 [3] = "FSB 3 Fatal Error", 250 [2] = "FSB 2 Fatal Error", 251 [1] = "FSB 1 Fatal Error", 252 [0] = "FSB 0 Fatal Error", 253 }; 254 #define ferr_global_hi_is_fatal(errno) 1 255 256 #define FERR_GLOBAL_LO 0x40 257 static const char *ferr_global_lo_name[] = { 258 [31] = "Internal MCH Fatal Error", 259 [30] = "Intel QuickData Technology Device Fatal Error", 260 [29] = "FSB1 Fatal Error", 261 [28] = "FSB0 Fatal Error", 262 [27] = "FBD Channel 3 Fatal Error", 263 [26] = "FBD Channel 2 Fatal Error", 264 [25] = "FBD Channel 1 Fatal Error", 265 [24] = "FBD Channel 0 Fatal Error", 266 [23] = "PCI Express Device 7Fatal Error", 267 [22] = "PCI Express Device 6 Fatal Error", 268 [21] = "PCI Express Device 5 Fatal Error", 269 [20] = "PCI Express Device 4 Fatal Error", 270 [19] = "PCI Express Device 3 Fatal Error", 271 [18] = "PCI Express Device 2 Fatal Error", 272 [17] = "PCI Express Device 1 Fatal Error", 273 [16] = "ESI Fatal Error", 274 [15] = "Internal MCH Non-Fatal Error", 275 [14] = "Intel QuickData Technology Device Non Fatal Error", 276 [13] = "FSB1 Non-Fatal Error", 277 [12] = "FSB 0 Non-Fatal Error", 278 [11] = "FBD Channel 3 Non-Fatal Error", 279 [10] = "FBD Channel 2 Non-Fatal Error", 280 [9] = "FBD Channel 1 Non-Fatal Error", 281 [8] = "FBD Channel 0 Non-Fatal Error", 282 [7] = "PCI Express Device 7 Non-Fatal Error", 283 [6] = "PCI Express Device 6 Non-Fatal Error", 284 [5] = "PCI Express Device 5 Non-Fatal Error", 285 [4] = "PCI Express Device 4 Non-Fatal Error", 286 [3] = "PCI Express Device 3 Non-Fatal Error", 287 [2] = "PCI Express Device 2 Non-Fatal Error", 288 [1] = "PCI Express Device 1 Non-Fatal Error", 289 [0] = "ESI Non-Fatal Error", 290 }; 291 #define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1) 292 293 #define NRECMEMA 0xbe 294 #define NRECMEMA_BANK(v) (((v) >> 12) & 7) 295 #define NRECMEMA_RANK(v) (((v) >> 8) & 15) 296 297 #define NRECMEMB 0xc0 298 #define NRECMEMB_IS_WR(v) ((v) & (1 << 31)) 299 #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff) 300 #define NRECMEMB_RAS(v) ((v) & 0xffff) 301 302 #define REDMEMA 0xdc 303 304 #define REDMEMB 0x7c 305 306 #define RECMEMA 0xe0 307 #define RECMEMA_BANK(v) (((v) >> 12) & 7) 308 #define RECMEMA_RANK(v) (((v) >> 8) & 15) 309 310 #define RECMEMB 0xe4 311 #define RECMEMB_IS_WR(v) ((v) & (1 << 31)) 312 #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff) 313 #define RECMEMB_RAS(v) ((v) & 0xffff) 314 315 /******************************************** 316 * i7300 Functions related to error detection 317 ********************************************/ 318 319 /** 320 * get_err_from_table() - Gets the error message from a table 321 * @table: table name (array of char *) 322 * @size: number of elements at the table 323 * @pos: position of the element to be returned 324 * 325 * This is a small routine that gets the pos-th element of a table. If the 326 * element doesn't exist (or it is empty), it returns "reserved". 327 * Instead of calling it directly, the better is to call via the macro 328 * GET_ERR_FROM_TABLE(), that automatically checks the table size via 329 * ARRAY_SIZE() macro 330 */ 331 static const char *get_err_from_table(const char *table[], int size, int pos) 332 { 333 if (unlikely(pos >= size)) 334 return "Reserved"; 335 336 if (unlikely(!table[pos])) 337 return "Reserved"; 338 339 return table[pos]; 340 } 341 342 #define GET_ERR_FROM_TABLE(table, pos) \ 343 get_err_from_table(table, ARRAY_SIZE(table), pos) 344 345 /** 346 * i7300_process_error_global() - Retrieve the hardware error information from 347 * the hardware global error registers and 348 * sends it to dmesg 349 * @mci: struct mem_ctl_info pointer 350 */ 351 static void i7300_process_error_global(struct mem_ctl_info *mci) 352 { 353 struct i7300_pvt *pvt; 354 u32 errnum, error_reg; 355 unsigned long errors; 356 const char *specific; 357 bool is_fatal; 358 359 pvt = mci->pvt_info; 360 361 /* read in the 1st FATAL error register */ 362 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 363 FERR_GLOBAL_HI, &error_reg); 364 if (unlikely(error_reg)) { 365 errors = error_reg; 366 errnum = find_first_bit(&errors, 367 ARRAY_SIZE(ferr_global_hi_name)); 368 specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum); 369 is_fatal = ferr_global_hi_is_fatal(errnum); 370 371 /* Clear the error bit */ 372 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 373 FERR_GLOBAL_HI, error_reg); 374 375 goto error_global; 376 } 377 378 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 379 FERR_GLOBAL_LO, &error_reg); 380 if (unlikely(error_reg)) { 381 errors = error_reg; 382 errnum = find_first_bit(&errors, 383 ARRAY_SIZE(ferr_global_lo_name)); 384 specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum); 385 is_fatal = ferr_global_lo_is_fatal(errnum); 386 387 /* Clear the error bit */ 388 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 389 FERR_GLOBAL_LO, error_reg); 390 391 goto error_global; 392 } 393 return; 394 395 error_global: 396 i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n", 397 is_fatal ? "Fatal" : "NOT fatal", specific); 398 } 399 400 /** 401 * i7300_process_fbd_error() - Retrieve the hardware error information from 402 * the FBD error registers and sends it via 403 * EDAC error API calls 404 * @mci: struct mem_ctl_info pointer 405 */ 406 static void i7300_process_fbd_error(struct mem_ctl_info *mci) 407 { 408 struct i7300_pvt *pvt; 409 u32 errnum, value, error_reg; 410 u16 val16; 411 unsigned branch, channel, bank, rank, cas, ras; 412 u32 syndrome; 413 414 unsigned long errors; 415 const char *specific; 416 bool is_wr; 417 418 pvt = mci->pvt_info; 419 420 /* read in the 1st FATAL error register */ 421 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 422 FERR_FAT_FBD, &error_reg); 423 if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) { 424 errors = error_reg & FERR_FAT_FBD_ERR_MASK ; 425 errnum = find_first_bit(&errors, 426 ARRAY_SIZE(ferr_fat_fbd_name)); 427 specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum); 428 branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0; 429 430 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, 431 NRECMEMA, &val16); 432 bank = NRECMEMA_BANK(val16); 433 rank = NRECMEMA_RANK(val16); 434 435 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 436 NRECMEMB, &value); 437 is_wr = NRECMEMB_IS_WR(value); 438 cas = NRECMEMB_CAS(value); 439 ras = NRECMEMB_RAS(value); 440 441 /* Clean the error register */ 442 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 443 FERR_FAT_FBD, error_reg); 444 445 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, 446 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))", 447 bank, ras, cas, errors, specific); 448 449 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0, 450 branch, -1, rank, 451 is_wr ? "Write error" : "Read error", 452 pvt->tmp_prt_buffer); 453 454 } 455 456 /* read in the 1st NON-FATAL error register */ 457 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 458 FERR_NF_FBD, &error_reg); 459 if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) { 460 errors = error_reg & FERR_NF_FBD_ERR_MASK; 461 errnum = find_first_bit(&errors, 462 ARRAY_SIZE(ferr_nf_fbd_name)); 463 specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum); 464 branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0; 465 466 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 467 REDMEMA, &syndrome); 468 469 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, 470 RECMEMA, &val16); 471 bank = RECMEMA_BANK(val16); 472 rank = RECMEMA_RANK(val16); 473 474 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 475 RECMEMB, &value); 476 is_wr = RECMEMB_IS_WR(value); 477 cas = RECMEMB_CAS(value); 478 ras = RECMEMB_RAS(value); 479 480 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 481 REDMEMB, &value); 482 channel = (branch << 1); 483 484 /* Second channel ? */ 485 channel += !!(value & BIT(17)); 486 487 /* Clear the error bit */ 488 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 489 FERR_NF_FBD, error_reg); 490 491 /* Form out message */ 492 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, 493 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))", 494 bank, ras, cas, errors, specific); 495 496 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 497 syndrome, 498 branch >> 1, channel % 2, rank, 499 is_wr ? "Write error" : "Read error", 500 pvt->tmp_prt_buffer); 501 } 502 return; 503 } 504 505 /** 506 * i7300_check_error() - Calls the error checking subroutines 507 * @mci: struct mem_ctl_info pointer 508 */ 509 static void i7300_check_error(struct mem_ctl_info *mci) 510 { 511 i7300_process_error_global(mci); 512 i7300_process_fbd_error(mci); 513 }; 514 515 /** 516 * i7300_clear_error() - Clears the error registers 517 * @mci: struct mem_ctl_info pointer 518 */ 519 static void i7300_clear_error(struct mem_ctl_info *mci) 520 { 521 struct i7300_pvt *pvt = mci->pvt_info; 522 u32 value; 523 /* 524 * All error values are RWC - we need to read and write 1 to the 525 * bit that we want to cleanup 526 */ 527 528 /* Clear global error registers */ 529 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 530 FERR_GLOBAL_HI, &value); 531 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 532 FERR_GLOBAL_HI, value); 533 534 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 535 FERR_GLOBAL_LO, &value); 536 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 537 FERR_GLOBAL_LO, value); 538 539 /* Clear FBD error registers */ 540 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 541 FERR_FAT_FBD, &value); 542 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 543 FERR_FAT_FBD, value); 544 545 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 546 FERR_NF_FBD, &value); 547 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 548 FERR_NF_FBD, value); 549 } 550 551 /** 552 * i7300_enable_error_reporting() - Enable the memory reporting logic at the 553 * hardware 554 * @mci: struct mem_ctl_info pointer 555 */ 556 static void i7300_enable_error_reporting(struct mem_ctl_info *mci) 557 { 558 struct i7300_pvt *pvt = mci->pvt_info; 559 u32 fbd_error_mask; 560 561 /* Read the FBD Error Mask Register */ 562 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 563 EMASK_FBD, &fbd_error_mask); 564 565 /* Enable with a '0' */ 566 fbd_error_mask &= ~(EMASK_FBD_ERR_MASK); 567 568 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 569 EMASK_FBD, fbd_error_mask); 570 } 571 572 /************************************************ 573 * i7300 Functions related to memory enumberation 574 ************************************************/ 575 576 /** 577 * decode_mtr() - Decodes the MTR descriptor, filling the edac structs 578 * @pvt: pointer to the private data struct used by i7300 driver 579 * @slot: DIMM slot (0 to 7) 580 * @ch: Channel number within the branch (0 or 1) 581 * @branch: Branch number (0 or 1) 582 * @dinfo: Pointer to DIMM info where dimm size is stored 583 * @dimm: Pointer to the struct dimm_info that corresponds to that element 584 */ 585 static int decode_mtr(struct i7300_pvt *pvt, 586 int slot, int ch, int branch, 587 struct i7300_dimm_info *dinfo, 588 struct dimm_info *dimm) 589 { 590 int mtr, ans, addrBits, channel; 591 592 channel = to_channel(ch, branch); 593 594 mtr = pvt->mtr[slot][branch]; 595 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0; 596 597 edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n", 598 slot, channel, ans ? "" : "NOT "); 599 600 /* Determine if there is a DIMM present in this DIMM slot */ 601 if (!ans) 602 return 0; 603 604 /* Start with the number of bits for a Bank 605 * on the DRAM */ 606 addrBits = MTR_DRAM_BANKS_ADDR_BITS; 607 /* Add thenumber of ROW bits */ 608 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); 609 /* add the number of COLUMN bits */ 610 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); 611 /* add the number of RANK bits */ 612 addrBits += MTR_DIMM_RANKS(mtr); 613 614 addrBits += 6; /* add 64 bits per DIMM */ 615 addrBits -= 20; /* divide by 2^^20 */ 616 addrBits -= 3; /* 8 bits per bytes */ 617 618 dinfo->megabytes = 1 << addrBits; 619 620 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 621 622 edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n", 623 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); 624 625 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); 626 edac_dbg(2, "\t\tNUMRANK: %s\n", 627 MTR_DIMM_RANKS(mtr) ? "double" : "single"); 628 edac_dbg(2, "\t\tNUMROW: %s\n", 629 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" : 630 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" : 631 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" : 632 "65,536 - 16 rows"); 633 edac_dbg(2, "\t\tNUMCOL: %s\n", 634 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" : 635 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" : 636 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" : 637 "reserved"); 638 edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes); 639 640 /* 641 * The type of error detection actually depends of the 642 * mode of operation. When it is just one single memory chip, at 643 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code. 644 * In normal or mirrored mode, it uses Lockstep mode, 645 * with the possibility of using an extended algorithm for x8 memories 646 * See datasheet Sections 7.3.6 to 7.3.8 647 */ 648 649 dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes); 650 dimm->grain = 8; 651 dimm->mtype = MEM_FB_DDR2; 652 if (IS_SINGLE_MODE(pvt->mc_settings_a)) { 653 dimm->edac_mode = EDAC_SECDED; 654 edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); 655 } else { 656 edac_dbg(2, "\t\tECC code is on Lockstep mode\n"); 657 if (MTR_DRAM_WIDTH(mtr) == 8) 658 dimm->edac_mode = EDAC_S8ECD8ED; 659 else 660 dimm->edac_mode = EDAC_S4ECD4ED; 661 } 662 663 /* ask what device type on this row */ 664 if (MTR_DRAM_WIDTH(mtr) == 8) { 665 edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n", 666 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? 667 "enhanced" : "normal"); 668 669 dimm->dtype = DEV_X8; 670 } else 671 dimm->dtype = DEV_X4; 672 673 return mtr; 674 } 675 676 /** 677 * print_dimm_size() - Prints dump of the memory organization 678 * @pvt: pointer to the private data struct used by i7300 driver 679 * 680 * Useful for debug. If debug is disabled, this routine do nothing 681 */ 682 static void print_dimm_size(struct i7300_pvt *pvt) 683 { 684 #ifdef CONFIG_EDAC_DEBUG 685 struct i7300_dimm_info *dinfo; 686 char *p; 687 int space, n; 688 int channel, slot; 689 690 space = PAGE_SIZE; 691 p = pvt->tmp_prt_buffer; 692 693 n = snprintf(p, space, " "); 694 p += n; 695 space -= n; 696 for (channel = 0; channel < MAX_CHANNELS; channel++) { 697 n = snprintf(p, space, "channel %d | ", channel); 698 p += n; 699 space -= n; 700 } 701 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); 702 p = pvt->tmp_prt_buffer; 703 space = PAGE_SIZE; 704 n = snprintf(p, space, "-------------------------------" 705 "------------------------------"); 706 p += n; 707 space -= n; 708 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); 709 p = pvt->tmp_prt_buffer; 710 space = PAGE_SIZE; 711 712 for (slot = 0; slot < MAX_SLOTS; slot++) { 713 n = snprintf(p, space, "csrow/SLOT %d ", slot); 714 p += n; 715 space -= n; 716 717 for (channel = 0; channel < MAX_CHANNELS; channel++) { 718 dinfo = &pvt->dimm_info[slot][channel]; 719 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); 720 p += n; 721 space -= n; 722 } 723 724 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); 725 p = pvt->tmp_prt_buffer; 726 space = PAGE_SIZE; 727 } 728 729 n = snprintf(p, space, "-------------------------------" 730 "------------------------------"); 731 p += n; 732 space -= n; 733 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); 734 p = pvt->tmp_prt_buffer; 735 space = PAGE_SIZE; 736 #endif 737 } 738 739 /** 740 * i7300_init_csrows() - Initialize the 'csrows' table within 741 * the mci control structure with the 742 * addressing of memory. 743 * @mci: struct mem_ctl_info pointer 744 */ 745 static int i7300_init_csrows(struct mem_ctl_info *mci) 746 { 747 struct i7300_pvt *pvt; 748 struct i7300_dimm_info *dinfo; 749 int rc = -ENODEV; 750 int mtr; 751 int ch, branch, slot, channel, max_channel, max_branch; 752 struct dimm_info *dimm; 753 754 pvt = mci->pvt_info; 755 756 edac_dbg(2, "Memory Technology Registers:\n"); 757 758 if (IS_SINGLE_MODE(pvt->mc_settings_a)) { 759 max_branch = 1; 760 max_channel = 1; 761 } else { 762 max_branch = MAX_BRANCHES; 763 max_channel = MAX_CH_PER_BRANCH; 764 } 765 766 /* Get the AMB present registers for the four channels */ 767 for (branch = 0; branch < max_branch; branch++) { 768 /* Read and dump branch 0's MTRs */ 769 channel = to_channel(0, branch); 770 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 771 AMBPRESENT_0, 772 &pvt->ambpresent[channel]); 773 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n", 774 channel, pvt->ambpresent[channel]); 775 776 if (max_channel == 1) 777 continue; 778 779 channel = to_channel(1, branch); 780 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 781 AMBPRESENT_1, 782 &pvt->ambpresent[channel]); 783 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n", 784 channel, pvt->ambpresent[channel]); 785 } 786 787 /* Get the set of MTR[0-7] regs by each branch */ 788 for (slot = 0; slot < MAX_SLOTS; slot++) { 789 int where = mtr_regs[slot]; 790 for (branch = 0; branch < max_branch; branch++) { 791 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 792 where, 793 &pvt->mtr[slot][branch]); 794 for (ch = 0; ch < max_channel; ch++) { 795 int channel = to_channel(ch, branch); 796 797 dimm = edac_get_dimm(mci, branch, ch, slot); 798 799 dinfo = &pvt->dimm_info[slot][channel]; 800 801 mtr = decode_mtr(pvt, slot, ch, branch, 802 dinfo, dimm); 803 804 /* if no DIMMS on this row, continue */ 805 if (!MTR_DIMMS_PRESENT(mtr)) 806 continue; 807 808 rc = 0; 809 810 } 811 } 812 } 813 814 return rc; 815 } 816 817 /** 818 * decode_mir() - Decodes Memory Interleave Register (MIR) info 819 * @mir_no: number of the MIR register to decode 820 * @mir: array with the MIR data cached on the driver 821 */ 822 static void decode_mir(int mir_no, u16 mir[MAX_MIR]) 823 { 824 if (mir[mir_no] & 3) 825 edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n", 826 mir_no, 827 (mir[mir_no] >> 4) & 0xfff, 828 (mir[mir_no] & 1) ? "B0" : "", 829 (mir[mir_no] & 2) ? "B1" : ""); 830 } 831 832 /** 833 * i7300_get_mc_regs() - Get the contents of the MC enumeration registers 834 * @mci: struct mem_ctl_info pointer 835 * 836 * Data read is cached internally for its usage when needed 837 */ 838 static int i7300_get_mc_regs(struct mem_ctl_info *mci) 839 { 840 struct i7300_pvt *pvt; 841 u32 actual_tolm; 842 int i, rc; 843 844 pvt = mci->pvt_info; 845 846 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE, 847 (u32 *) &pvt->ambase); 848 849 edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); 850 851 /* Get the Branch Map regs */ 852 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm); 853 pvt->tolm >>= 12; 854 edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n", 855 pvt->tolm, pvt->tolm); 856 857 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); 858 edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n", 859 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); 860 861 /* Get memory controller settings */ 862 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS, 863 &pvt->mc_settings); 864 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A, 865 &pvt->mc_settings_a); 866 867 if (IS_SINGLE_MODE(pvt->mc_settings_a)) 868 edac_dbg(0, "Memory controller operating on single mode\n"); 869 else 870 edac_dbg(0, "Memory controller operating on %smirrored mode\n", 871 IS_MIRRORED(pvt->mc_settings) ? "" : "non-"); 872 873 edac_dbg(0, "Error detection is %s\n", 874 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); 875 edac_dbg(0, "Retry is %s\n", 876 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); 877 878 /* Get Memory Interleave Range registers */ 879 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0, 880 &pvt->mir[0]); 881 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1, 882 &pvt->mir[1]); 883 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2, 884 &pvt->mir[2]); 885 886 /* Decode the MIR regs */ 887 for (i = 0; i < MAX_MIR; i++) 888 decode_mir(i, pvt->mir); 889 890 rc = i7300_init_csrows(mci); 891 if (rc < 0) 892 return rc; 893 894 /* Go and determine the size of each DIMM and place in an 895 * orderly matrix */ 896 print_dimm_size(pvt); 897 898 return 0; 899 } 900 901 /************************************************* 902 * i7300 Functions related to device probe/release 903 *************************************************/ 904 905 /** 906 * i7300_put_devices() - Release the PCI devices 907 * @mci: struct mem_ctl_info pointer 908 */ 909 static void i7300_put_devices(struct mem_ctl_info *mci) 910 { 911 struct i7300_pvt *pvt; 912 int branch; 913 914 pvt = mci->pvt_info; 915 916 /* Decrement usage count for devices */ 917 for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++) 918 pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]); 919 pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs); 920 pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map); 921 } 922 923 /** 924 * i7300_get_devices() - Find and perform 'get' operation on the MCH's 925 * device/functions we want to reference for this driver 926 * @mci: struct mem_ctl_info pointer 927 * 928 * Access and prepare the several devices for usage: 929 * I7300 devices used by this driver: 930 * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 931 * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 932 * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 933 */ 934 static int i7300_get_devices(struct mem_ctl_info *mci) 935 { 936 struct i7300_pvt *pvt; 937 struct pci_dev *pdev; 938 939 pvt = mci->pvt_info; 940 941 /* Attempt to 'get' the MCH register we want */ 942 pdev = NULL; 943 while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 944 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, 945 pdev))) { 946 /* Store device 16 funcs 1 and 2 */ 947 switch (PCI_FUNC(pdev->devfn)) { 948 case 1: 949 if (!pvt->pci_dev_16_1_fsb_addr_map) 950 pvt->pci_dev_16_1_fsb_addr_map = 951 pci_dev_get(pdev); 952 break; 953 case 2: 954 if (!pvt->pci_dev_16_2_fsb_err_regs) 955 pvt->pci_dev_16_2_fsb_err_regs = 956 pci_dev_get(pdev); 957 break; 958 } 959 } 960 961 if (!pvt->pci_dev_16_1_fsb_addr_map || 962 !pvt->pci_dev_16_2_fsb_err_regs) { 963 /* At least one device was not found */ 964 i7300_printk(KERN_ERR, 965 "'system address,Process Bus' device not found:" 966 "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n", 967 PCI_VENDOR_ID_INTEL, 968 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR); 969 goto error; 970 } 971 972 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n", 973 pci_name(pvt->pci_dev_16_0_fsb_ctlr), 974 pvt->pci_dev_16_0_fsb_ctlr->vendor, 975 pvt->pci_dev_16_0_fsb_ctlr->device); 976 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", 977 pci_name(pvt->pci_dev_16_1_fsb_addr_map), 978 pvt->pci_dev_16_1_fsb_addr_map->vendor, 979 pvt->pci_dev_16_1_fsb_addr_map->device); 980 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n", 981 pci_name(pvt->pci_dev_16_2_fsb_err_regs), 982 pvt->pci_dev_16_2_fsb_err_regs->vendor, 983 pvt->pci_dev_16_2_fsb_err_regs->device); 984 985 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL, 986 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0, 987 NULL); 988 if (!pvt->pci_dev_2x_0_fbd_branch[0]) { 989 i7300_printk(KERN_ERR, 990 "MC: 'BRANCH 0' device not found:" 991 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", 992 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0); 993 goto error; 994 } 995 996 pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL, 997 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1, 998 NULL); 999 if (!pvt->pci_dev_2x_0_fbd_branch[1]) { 1000 i7300_printk(KERN_ERR, 1001 "MC: 'BRANCH 1' device not found:" 1002 "vendor 0x%x device 0x%x Func 0 " 1003 "(broken BIOS?)\n", 1004 PCI_VENDOR_ID_INTEL, 1005 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1); 1006 goto error; 1007 } 1008 1009 return 0; 1010 1011 error: 1012 i7300_put_devices(mci); 1013 return -ENODEV; 1014 } 1015 1016 /** 1017 * i7300_init_one() - Probe for one instance of the device 1018 * @pdev: struct pci_dev pointer 1019 * @id: struct pci_device_id pointer - currently unused 1020 */ 1021 static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1022 { 1023 struct mem_ctl_info *mci; 1024 struct edac_mc_layer layers[3]; 1025 struct i7300_pvt *pvt; 1026 int rc; 1027 1028 /* wake up device */ 1029 rc = pci_enable_device(pdev); 1030 if (rc == -EIO) 1031 return rc; 1032 1033 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n", 1034 pdev->bus->number, 1035 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1036 1037 /* We only are looking for func 0 of the set */ 1038 if (PCI_FUNC(pdev->devfn) != 0) 1039 return -ENODEV; 1040 1041 /* allocate a new MC control structure */ 1042 layers[0].type = EDAC_MC_LAYER_BRANCH; 1043 layers[0].size = MAX_BRANCHES; 1044 layers[0].is_virt_csrow = false; 1045 layers[1].type = EDAC_MC_LAYER_CHANNEL; 1046 layers[1].size = MAX_CH_PER_BRANCH; 1047 layers[1].is_virt_csrow = true; 1048 layers[2].type = EDAC_MC_LAYER_SLOT; 1049 layers[2].size = MAX_SLOTS; 1050 layers[2].is_virt_csrow = true; 1051 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); 1052 if (mci == NULL) 1053 return -ENOMEM; 1054 1055 edac_dbg(0, "MC: mci = %p\n", mci); 1056 1057 mci->pdev = &pdev->dev; /* record ptr to the generic device */ 1058 1059 pvt = mci->pvt_info; 1060 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */ 1061 1062 pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); 1063 if (!pvt->tmp_prt_buffer) { 1064 edac_mc_free(mci); 1065 return -ENOMEM; 1066 } 1067 1068 /* 'get' the pci devices we want to reserve for our use */ 1069 if (i7300_get_devices(mci)) 1070 goto fail0; 1071 1072 mci->mc_idx = 0; 1073 mci->mtype_cap = MEM_FLAG_FB_DDR2; 1074 mci->edac_ctl_cap = EDAC_FLAG_NONE; 1075 mci->edac_cap = EDAC_FLAG_NONE; 1076 mci->mod_name = "i7300_edac.c"; 1077 mci->ctl_name = i7300_devs[0].ctl_name; 1078 mci->dev_name = pci_name(pdev); 1079 mci->ctl_page_to_phys = NULL; 1080 1081 /* Set the function pointer to an actual operation function */ 1082 mci->edac_check = i7300_check_error; 1083 1084 /* initialize the MC control structure 'csrows' table 1085 * with the mapping and control information */ 1086 if (i7300_get_mc_regs(mci)) { 1087 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n"); 1088 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ 1089 } else { 1090 edac_dbg(1, "MC: Enable error reporting now\n"); 1091 i7300_enable_error_reporting(mci); 1092 } 1093 1094 /* add this new MC control structure to EDAC's list of MCs */ 1095 if (edac_mc_add_mc(mci)) { 1096 edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); 1097 /* FIXME: perhaps some code should go here that disables error 1098 * reporting if we just enabled it 1099 */ 1100 goto fail1; 1101 } 1102 1103 i7300_clear_error(mci); 1104 1105 /* allocating generic PCI control info */ 1106 i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 1107 if (!i7300_pci) { 1108 printk(KERN_WARNING 1109 "%s(): Unable to create PCI control\n", 1110 __func__); 1111 printk(KERN_WARNING 1112 "%s(): PCI error report via EDAC not setup\n", 1113 __func__); 1114 } 1115 1116 return 0; 1117 1118 /* Error exit unwinding stack */ 1119 fail1: 1120 1121 i7300_put_devices(mci); 1122 1123 fail0: 1124 kfree(pvt->tmp_prt_buffer); 1125 edac_mc_free(mci); 1126 return -ENODEV; 1127 } 1128 1129 /** 1130 * i7300_remove_one() - Remove the driver 1131 * @pdev: struct pci_dev pointer 1132 */ 1133 static void i7300_remove_one(struct pci_dev *pdev) 1134 { 1135 struct mem_ctl_info *mci; 1136 char *tmp; 1137 1138 edac_dbg(0, "\n"); 1139 1140 if (i7300_pci) 1141 edac_pci_release_generic_ctl(i7300_pci); 1142 1143 mci = edac_mc_del_mc(&pdev->dev); 1144 if (!mci) 1145 return; 1146 1147 tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer; 1148 1149 /* retrieve references to resources, and free those resources */ 1150 i7300_put_devices(mci); 1151 1152 kfree(tmp); 1153 edac_mc_free(mci); 1154 } 1155 1156 /* 1157 * pci_device_id: table for which devices we are looking for 1158 * 1159 * Has only 8086:360c PCI ID 1160 */ 1161 static const struct pci_device_id i7300_pci_tbl[] = { 1162 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, 1163 {0,} /* 0 terminated list. */ 1164 }; 1165 1166 MODULE_DEVICE_TABLE(pci, i7300_pci_tbl); 1167 1168 /* 1169 * i7300_driver: pci_driver structure for this module 1170 */ 1171 static struct pci_driver i7300_driver = { 1172 .name = "i7300_edac", 1173 .probe = i7300_init_one, 1174 .remove = i7300_remove_one, 1175 .id_table = i7300_pci_tbl, 1176 }; 1177 1178 /** 1179 * i7300_init() - Registers the driver 1180 */ 1181 static int __init i7300_init(void) 1182 { 1183 int pci_rc; 1184 1185 edac_dbg(2, "\n"); 1186 1187 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1188 opstate_init(); 1189 1190 pci_rc = pci_register_driver(&i7300_driver); 1191 1192 return (pci_rc < 0) ? pci_rc : 0; 1193 } 1194 1195 /** 1196 * i7300_init() - Unregisters the driver 1197 */ 1198 static void __exit i7300_exit(void) 1199 { 1200 edac_dbg(2, "\n"); 1201 pci_unregister_driver(&i7300_driver); 1202 } 1203 1204 module_init(i7300_init); 1205 module_exit(i7300_exit); 1206 1207 MODULE_LICENSE("GPL"); 1208 MODULE_AUTHOR("Mauro Carvalho Chehab"); 1209 MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)"); 1210 MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - " 1211 I7300_REVISION); 1212 1213 module_param(edac_op_state, int, 0444); 1214 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1215