1 /* 2 * edac_mc kernel module 3 * (C) 2005, 2006 Linux Networx (http://lnxi.com) 4 * This file may be distributed under the terms of the 5 * GNU General Public License. 6 * 7 * Written by Thayne Harbaugh 8 * Based on work by Dan Hollis <goemon at anime dot net> and others. 9 * http://www.anime.net/~goemon/linux-ecc/ 10 * 11 * Modified by Dave Peterson and Doug Thompson 12 * 13 */ 14 15 #include <linux/module.h> 16 #include <linux/proc_fs.h> 17 #include <linux/kernel.h> 18 #include <linux/types.h> 19 #include <linux/smp.h> 20 #include <linux/init.h> 21 #include <linux/sysctl.h> 22 #include <linux/highmem.h> 23 #include <linux/timer.h> 24 #include <linux/slab.h> 25 #include <linux/jiffies.h> 26 #include <linux/spinlock.h> 27 #include <linux/list.h> 28 #include <linux/ctype.h> 29 #include <linux/edac.h> 30 #include <linux/bitops.h> 31 #include <linux/uaccess.h> 32 #include <asm/page.h> 33 #include "edac_mc.h" 34 #include "edac_module.h" 35 #include <ras/ras_event.h> 36 37 #ifdef CONFIG_EDAC_ATOMIC_SCRUB 38 #include <asm/edac.h> 39 #else 40 #define edac_atomic_scrub(va, size) do { } while (0) 41 #endif 42 43 /* lock to memory controller's control array */ 44 static DEFINE_MUTEX(mem_ctls_mutex); 45 static LIST_HEAD(mc_devices); 46 47 /* 48 * Used to lock EDAC MC to just one module, avoiding two drivers e. g. 49 * apei/ghes and i7core_edac to be used at the same time. 50 */ 51 static void const *edac_mc_owner; 52 53 static struct bus_type mc_bus[EDAC_MAX_MCS]; 54 55 unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, 56 unsigned len) 57 { 58 struct mem_ctl_info *mci = dimm->mci; 59 int i, n, count = 0; 60 char *p = buf; 61 62 for (i = 0; i < mci->n_layers; i++) { 63 n = snprintf(p, len, "%s %d ", 64 edac_layer_name[mci->layers[i].type], 65 dimm->location[i]); 66 p += n; 67 len -= n; 68 count += n; 69 if (!len) 70 break; 71 } 72 73 return count; 74 } 75 76 #ifdef CONFIG_EDAC_DEBUG 77 78 static void edac_mc_dump_channel(struct rank_info *chan) 79 { 80 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx); 81 edac_dbg(4, " channel = %p\n", chan); 82 edac_dbg(4, " channel->csrow = %p\n", chan->csrow); 83 edac_dbg(4, " channel->dimm = %p\n", chan->dimm); 84 } 85 86 static void edac_mc_dump_dimm(struct dimm_info *dimm, int number) 87 { 88 char location[80]; 89 90 edac_dimm_info_location(dimm, location, sizeof(location)); 91 92 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", 93 dimm->mci->csbased ? "rank" : "dimm", 94 number, location, dimm->csrow, dimm->cschannel); 95 edac_dbg(4, " dimm = %p\n", dimm); 96 edac_dbg(4, " dimm->label = '%s'\n", dimm->label); 97 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); 98 edac_dbg(4, " dimm->grain = %d\n", dimm->grain); 99 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); 100 } 101 102 static void edac_mc_dump_csrow(struct csrow_info *csrow) 103 { 104 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx); 105 edac_dbg(4, " csrow = %p\n", csrow); 106 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page); 107 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page); 108 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask); 109 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels); 110 edac_dbg(4, " csrow->channels = %p\n", csrow->channels); 111 edac_dbg(4, " csrow->mci = %p\n", csrow->mci); 112 } 113 114 static void edac_mc_dump_mci(struct mem_ctl_info *mci) 115 { 116 edac_dbg(3, "\tmci = %p\n", mci); 117 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap); 118 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); 119 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap); 120 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check); 121 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n", 122 mci->nr_csrows, mci->csrows); 123 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n", 124 mci->tot_dimms, mci->dimms); 125 edac_dbg(3, "\tdev = %p\n", mci->pdev); 126 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", 127 mci->mod_name, mci->ctl_name); 128 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info); 129 } 130 131 #endif /* CONFIG_EDAC_DEBUG */ 132 133 const char * const edac_mem_types[] = { 134 [MEM_EMPTY] = "Empty csrow", 135 [MEM_RESERVED] = "Reserved csrow type", 136 [MEM_UNKNOWN] = "Unknown csrow type", 137 [MEM_FPM] = "Fast page mode RAM", 138 [MEM_EDO] = "Extended data out RAM", 139 [MEM_BEDO] = "Burst Extended data out RAM", 140 [MEM_SDR] = "Single data rate SDRAM", 141 [MEM_RDR] = "Registered single data rate SDRAM", 142 [MEM_DDR] = "Double data rate SDRAM", 143 [MEM_RDDR] = "Registered Double data rate SDRAM", 144 [MEM_RMBS] = "Rambus DRAM", 145 [MEM_DDR2] = "Unbuffered DDR2 RAM", 146 [MEM_FB_DDR2] = "Fully buffered DDR2", 147 [MEM_RDDR2] = "Registered DDR2 RAM", 148 [MEM_XDR] = "Rambus XDR", 149 [MEM_DDR3] = "Unbuffered DDR3 RAM", 150 [MEM_RDDR3] = "Registered DDR3 RAM", 151 [MEM_LRDDR3] = "Load-Reduced DDR3 RAM", 152 [MEM_DDR4] = "Unbuffered DDR4 RAM", 153 [MEM_RDDR4] = "Registered DDR4 RAM", 154 }; 155 EXPORT_SYMBOL_GPL(edac_mem_types); 156 157 /** 158 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation 159 * @p: pointer to a pointer with the memory offset to be used. At 160 * return, this will be incremented to point to the next offset 161 * @size: Size of the data structure to be reserved 162 * @n_elems: Number of elements that should be reserved 163 * 164 * If 'size' is a constant, the compiler will optimize this whole function 165 * down to either a no-op or the addition of a constant to the value of '*p'. 166 * 167 * The 'p' pointer is absolutely needed to keep the proper advancing 168 * further in memory to the proper offsets when allocating the struct along 169 * with its embedded structs, as edac_device_alloc_ctl_info() does it 170 * above, for example. 171 * 172 * At return, the pointer 'p' will be incremented to be used on a next call 173 * to this function. 174 */ 175 void *edac_align_ptr(void **p, unsigned size, int n_elems) 176 { 177 unsigned align, r; 178 void *ptr = *p; 179 180 *p += size * n_elems; 181 182 /* 183 * 'p' can possibly be an unaligned item X such that sizeof(X) is 184 * 'size'. Adjust 'p' so that its alignment is at least as 185 * stringent as what the compiler would provide for X and return 186 * the aligned result. 187 * Here we assume that the alignment of a "long long" is the most 188 * stringent alignment that the compiler will ever provide by default. 189 * As far as I know, this is a reasonable assumption. 190 */ 191 if (size > sizeof(long)) 192 align = sizeof(long long); 193 else if (size > sizeof(int)) 194 align = sizeof(long); 195 else if (size > sizeof(short)) 196 align = sizeof(int); 197 else if (size > sizeof(char)) 198 align = sizeof(short); 199 else 200 return (char *)ptr; 201 202 r = (unsigned long)p % align; 203 204 if (r == 0) 205 return (char *)ptr; 206 207 *p += align - r; 208 209 return (void *)(((unsigned long)ptr) + align - r); 210 } 211 212 static void _edac_mc_free(struct mem_ctl_info *mci) 213 { 214 int i, chn, row; 215 struct csrow_info *csr; 216 const unsigned int tot_dimms = mci->tot_dimms; 217 const unsigned int tot_channels = mci->num_cschannel; 218 const unsigned int tot_csrows = mci->nr_csrows; 219 220 if (mci->dimms) { 221 for (i = 0; i < tot_dimms; i++) 222 kfree(mci->dimms[i]); 223 kfree(mci->dimms); 224 } 225 if (mci->csrows) { 226 for (row = 0; row < tot_csrows; row++) { 227 csr = mci->csrows[row]; 228 if (csr) { 229 if (csr->channels) { 230 for (chn = 0; chn < tot_channels; chn++) 231 kfree(csr->channels[chn]); 232 kfree(csr->channels); 233 } 234 kfree(csr); 235 } 236 } 237 kfree(mci->csrows); 238 } 239 kfree(mci); 240 } 241 242 struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, 243 unsigned n_layers, 244 struct edac_mc_layer *layers, 245 unsigned sz_pvt) 246 { 247 struct mem_ctl_info *mci; 248 struct edac_mc_layer *layer; 249 struct csrow_info *csr; 250 struct rank_info *chan; 251 struct dimm_info *dimm; 252 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; 253 unsigned pos[EDAC_MAX_LAYERS]; 254 unsigned size, tot_dimms = 1, count = 1; 255 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0; 256 void *pvt, *p, *ptr = NULL; 257 int i, j, row, chn, n, len, off; 258 bool per_rank = false; 259 260 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0); 261 /* 262 * Calculate the total amount of dimms and csrows/cschannels while 263 * in the old API emulation mode 264 */ 265 for (i = 0; i < n_layers; i++) { 266 tot_dimms *= layers[i].size; 267 if (layers[i].is_virt_csrow) 268 tot_csrows *= layers[i].size; 269 else 270 tot_channels *= layers[i].size; 271 272 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT) 273 per_rank = true; 274 } 275 276 /* Figure out the offsets of the various items from the start of an mc 277 * structure. We want the alignment of each item to be at least as 278 * stringent as what the compiler would provide if we could simply 279 * hardcode everything into a single struct. 280 */ 281 mci = edac_align_ptr(&ptr, sizeof(*mci), 1); 282 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); 283 for (i = 0; i < n_layers; i++) { 284 count *= layers[i].size; 285 edac_dbg(4, "errcount layer %d size %d\n", i, count); 286 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); 287 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); 288 tot_errcount += 2 * count; 289 } 290 291 edac_dbg(4, "allocating %d error counters\n", tot_errcount); 292 pvt = edac_align_ptr(&ptr, sz_pvt, 1); 293 size = ((unsigned long)pvt) + sz_pvt; 294 295 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", 296 size, 297 tot_dimms, 298 per_rank ? "ranks" : "dimms", 299 tot_csrows * tot_channels); 300 301 mci = kzalloc(size, GFP_KERNEL); 302 if (mci == NULL) 303 return NULL; 304 305 /* Adjust pointers so they point within the memory we just allocated 306 * rather than an imaginary chunk of memory located at address 0. 307 */ 308 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); 309 for (i = 0; i < n_layers; i++) { 310 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); 311 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); 312 } 313 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; 314 315 /* setup index and various internal pointers */ 316 mci->mc_idx = mc_num; 317 mci->tot_dimms = tot_dimms; 318 mci->pvt_info = pvt; 319 mci->n_layers = n_layers; 320 mci->layers = layer; 321 memcpy(mci->layers, layers, sizeof(*layer) * n_layers); 322 mci->nr_csrows = tot_csrows; 323 mci->num_cschannel = tot_channels; 324 mci->csbased = per_rank; 325 326 /* 327 * Alocate and fill the csrow/channels structs 328 */ 329 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL); 330 if (!mci->csrows) 331 goto error; 332 for (row = 0; row < tot_csrows; row++) { 333 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); 334 if (!csr) 335 goto error; 336 mci->csrows[row] = csr; 337 csr->csrow_idx = row; 338 csr->mci = mci; 339 csr->nr_channels = tot_channels; 340 csr->channels = kcalloc(tot_channels, sizeof(*csr->channels), 341 GFP_KERNEL); 342 if (!csr->channels) 343 goto error; 344 345 for (chn = 0; chn < tot_channels; chn++) { 346 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL); 347 if (!chan) 348 goto error; 349 csr->channels[chn] = chan; 350 chan->chan_idx = chn; 351 chan->csrow = csr; 352 } 353 } 354 355 /* 356 * Allocate and fill the dimm structs 357 */ 358 mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL); 359 if (!mci->dimms) 360 goto error; 361 362 memset(&pos, 0, sizeof(pos)); 363 row = 0; 364 chn = 0; 365 for (i = 0; i < tot_dimms; i++) { 366 chan = mci->csrows[row]->channels[chn]; 367 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]); 368 if (off < 0 || off >= tot_dimms) { 369 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n"); 370 goto error; 371 } 372 373 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); 374 if (!dimm) 375 goto error; 376 mci->dimms[off] = dimm; 377 dimm->mci = mci; 378 379 /* 380 * Copy DIMM location and initialize it. 381 */ 382 len = sizeof(dimm->label); 383 p = dimm->label; 384 n = snprintf(p, len, "mc#%u", mc_num); 385 p += n; 386 len -= n; 387 for (j = 0; j < n_layers; j++) { 388 n = snprintf(p, len, "%s#%u", 389 edac_layer_name[layers[j].type], 390 pos[j]); 391 p += n; 392 len -= n; 393 dimm->location[j] = pos[j]; 394 395 if (len <= 0) 396 break; 397 } 398 399 /* Link it to the csrows old API data */ 400 chan->dimm = dimm; 401 dimm->csrow = row; 402 dimm->cschannel = chn; 403 404 /* Increment csrow location */ 405 if (layers[0].is_virt_csrow) { 406 chn++; 407 if (chn == tot_channels) { 408 chn = 0; 409 row++; 410 } 411 } else { 412 row++; 413 if (row == tot_csrows) { 414 row = 0; 415 chn++; 416 } 417 } 418 419 /* Increment dimm location */ 420 for (j = n_layers - 1; j >= 0; j--) { 421 pos[j]++; 422 if (pos[j] < layers[j].size) 423 break; 424 pos[j] = 0; 425 } 426 } 427 428 mci->op_state = OP_ALLOC; 429 430 return mci; 431 432 error: 433 _edac_mc_free(mci); 434 435 return NULL; 436 } 437 EXPORT_SYMBOL_GPL(edac_mc_alloc); 438 439 void edac_mc_free(struct mem_ctl_info *mci) 440 { 441 edac_dbg(1, "\n"); 442 443 /* If we're not yet registered with sysfs free only what was allocated 444 * in edac_mc_alloc(). 445 */ 446 if (!device_is_registered(&mci->dev)) { 447 _edac_mc_free(mci); 448 return; 449 } 450 451 /* the mci instance is freed here, when the sysfs object is dropped */ 452 edac_unregister_sysfs(mci); 453 } 454 EXPORT_SYMBOL_GPL(edac_mc_free); 455 456 /* Caller must hold mem_ctls_mutex */ 457 static struct mem_ctl_info *__find_mci_by_dev(struct device *dev) 458 { 459 struct mem_ctl_info *mci; 460 struct list_head *item; 461 462 edac_dbg(3, "\n"); 463 464 list_for_each(item, &mc_devices) { 465 mci = list_entry(item, struct mem_ctl_info, link); 466 467 if (mci->pdev == dev) 468 return mci; 469 } 470 471 return NULL; 472 } 473 474 /** 475 * find_mci_by_dev 476 * 477 * scan list of controllers looking for the one that manages 478 * the 'dev' device 479 * @dev: pointer to a struct device related with the MCI 480 */ 481 struct mem_ctl_info *find_mci_by_dev(struct device *dev) 482 { 483 struct mem_ctl_info *ret; 484 485 mutex_lock(&mem_ctls_mutex); 486 ret = __find_mci_by_dev(dev); 487 mutex_unlock(&mem_ctls_mutex); 488 489 return ret; 490 } 491 EXPORT_SYMBOL_GPL(find_mci_by_dev); 492 493 /* 494 * handler for EDAC to check if NMI type handler has asserted interrupt 495 */ 496 static int edac_mc_assert_error_check_and_clear(void) 497 { 498 int old_state; 499 500 if (edac_op_state == EDAC_OPSTATE_POLL) 501 return 1; 502 503 old_state = edac_err_assert; 504 edac_err_assert = 0; 505 506 return old_state; 507 } 508 509 /* 510 * edac_mc_workq_function 511 * performs the operation scheduled by a workq request 512 */ 513 static void edac_mc_workq_function(struct work_struct *work_req) 514 { 515 struct delayed_work *d_work = to_delayed_work(work_req); 516 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); 517 518 mutex_lock(&mem_ctls_mutex); 519 520 if (mci->op_state != OP_RUNNING_POLL) { 521 mutex_unlock(&mem_ctls_mutex); 522 return; 523 } 524 525 if (edac_mc_assert_error_check_and_clear()) 526 mci->edac_check(mci); 527 528 mutex_unlock(&mem_ctls_mutex); 529 530 /* Queue ourselves again. */ 531 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); 532 } 533 534 /* 535 * edac_mc_reset_delay_period(unsigned long value) 536 * 537 * user space has updated our poll period value, need to 538 * reset our workq delays 539 */ 540 void edac_mc_reset_delay_period(unsigned long value) 541 { 542 struct mem_ctl_info *mci; 543 struct list_head *item; 544 545 mutex_lock(&mem_ctls_mutex); 546 547 list_for_each(item, &mc_devices) { 548 mci = list_entry(item, struct mem_ctl_info, link); 549 550 if (mci->op_state == OP_RUNNING_POLL) 551 edac_mod_work(&mci->work, value); 552 } 553 mutex_unlock(&mem_ctls_mutex); 554 } 555 556 557 558 /* Return 0 on success, 1 on failure. 559 * Before calling this function, caller must 560 * assign a unique value to mci->mc_idx. 561 * 562 * locking model: 563 * 564 * called with the mem_ctls_mutex lock held 565 */ 566 static int add_mc_to_global_list(struct mem_ctl_info *mci) 567 { 568 struct list_head *item, *insert_before; 569 struct mem_ctl_info *p; 570 571 insert_before = &mc_devices; 572 573 p = __find_mci_by_dev(mci->pdev); 574 if (unlikely(p != NULL)) 575 goto fail0; 576 577 list_for_each(item, &mc_devices) { 578 p = list_entry(item, struct mem_ctl_info, link); 579 580 if (p->mc_idx >= mci->mc_idx) { 581 if (unlikely(p->mc_idx == mci->mc_idx)) 582 goto fail1; 583 584 insert_before = item; 585 break; 586 } 587 } 588 589 list_add_tail_rcu(&mci->link, insert_before); 590 atomic_inc(&edac_handlers); 591 return 0; 592 593 fail0: 594 edac_printk(KERN_WARNING, EDAC_MC, 595 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev), 596 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); 597 return 1; 598 599 fail1: 600 edac_printk(KERN_WARNING, EDAC_MC, 601 "bug in low-level driver: attempt to assign\n" 602 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__); 603 return 1; 604 } 605 606 static int del_mc_from_global_list(struct mem_ctl_info *mci) 607 { 608 int handlers = atomic_dec_return(&edac_handlers); 609 list_del_rcu(&mci->link); 610 611 /* these are for safe removal of devices from global list while 612 * NMI handlers may be traversing list 613 */ 614 synchronize_rcu(); 615 INIT_LIST_HEAD(&mci->link); 616 617 return handlers; 618 } 619 620 struct mem_ctl_info *edac_mc_find(int idx) 621 { 622 struct mem_ctl_info *mci = NULL; 623 struct list_head *item; 624 625 mutex_lock(&mem_ctls_mutex); 626 627 list_for_each(item, &mc_devices) { 628 mci = list_entry(item, struct mem_ctl_info, link); 629 630 if (mci->mc_idx >= idx) { 631 if (mci->mc_idx == idx) { 632 goto unlock; 633 } 634 break; 635 } 636 } 637 638 unlock: 639 mutex_unlock(&mem_ctls_mutex); 640 return mci; 641 } 642 EXPORT_SYMBOL(edac_mc_find); 643 644 645 /* FIXME - should a warning be printed if no error detection? correction? */ 646 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, 647 const struct attribute_group **groups) 648 { 649 int ret = -EINVAL; 650 edac_dbg(0, "\n"); 651 652 if (mci->mc_idx >= EDAC_MAX_MCS) { 653 pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx); 654 return -ENODEV; 655 } 656 657 #ifdef CONFIG_EDAC_DEBUG 658 if (edac_debug_level >= 3) 659 edac_mc_dump_mci(mci); 660 661 if (edac_debug_level >= 4) { 662 int i; 663 664 for (i = 0; i < mci->nr_csrows; i++) { 665 struct csrow_info *csrow = mci->csrows[i]; 666 u32 nr_pages = 0; 667 int j; 668 669 for (j = 0; j < csrow->nr_channels; j++) 670 nr_pages += csrow->channels[j]->dimm->nr_pages; 671 if (!nr_pages) 672 continue; 673 edac_mc_dump_csrow(csrow); 674 for (j = 0; j < csrow->nr_channels; j++) 675 if (csrow->channels[j]->dimm->nr_pages) 676 edac_mc_dump_channel(csrow->channels[j]); 677 } 678 for (i = 0; i < mci->tot_dimms; i++) 679 if (mci->dimms[i]->nr_pages) 680 edac_mc_dump_dimm(mci->dimms[i], i); 681 } 682 #endif 683 mutex_lock(&mem_ctls_mutex); 684 685 if (edac_mc_owner && edac_mc_owner != mci->mod_name) { 686 ret = -EPERM; 687 goto fail0; 688 } 689 690 if (add_mc_to_global_list(mci)) 691 goto fail0; 692 693 /* set load time so that error rate can be tracked */ 694 mci->start_time = jiffies; 695 696 mci->bus = &mc_bus[mci->mc_idx]; 697 698 if (edac_create_sysfs_mci_device(mci, groups)) { 699 edac_mc_printk(mci, KERN_WARNING, 700 "failed to create sysfs device\n"); 701 goto fail1; 702 } 703 704 if (mci->edac_check) { 705 mci->op_state = OP_RUNNING_POLL; 706 707 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); 708 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); 709 710 } else { 711 mci->op_state = OP_RUNNING_INTERRUPT; 712 } 713 714 /* Report action taken */ 715 edac_mc_printk(mci, KERN_INFO, 716 "Giving out device to module %s controller %s: DEV %s (%s)\n", 717 mci->mod_name, mci->ctl_name, mci->dev_name, 718 edac_op_state_to_string(mci->op_state)); 719 720 edac_mc_owner = mci->mod_name; 721 722 mutex_unlock(&mem_ctls_mutex); 723 return 0; 724 725 fail1: 726 del_mc_from_global_list(mci); 727 728 fail0: 729 mutex_unlock(&mem_ctls_mutex); 730 return ret; 731 } 732 EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups); 733 734 struct mem_ctl_info *edac_mc_del_mc(struct device *dev) 735 { 736 struct mem_ctl_info *mci; 737 738 edac_dbg(0, "\n"); 739 740 mutex_lock(&mem_ctls_mutex); 741 742 /* find the requested mci struct in the global list */ 743 mci = __find_mci_by_dev(dev); 744 if (mci == NULL) { 745 mutex_unlock(&mem_ctls_mutex); 746 return NULL; 747 } 748 749 /* mark MCI offline: */ 750 mci->op_state = OP_OFFLINE; 751 752 if (!del_mc_from_global_list(mci)) 753 edac_mc_owner = NULL; 754 755 mutex_unlock(&mem_ctls_mutex); 756 757 if (mci->edac_check) 758 edac_stop_work(&mci->work); 759 760 /* remove from sysfs */ 761 edac_remove_sysfs_mci_device(mci); 762 763 edac_printk(KERN_INFO, EDAC_MC, 764 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, 765 mci->mod_name, mci->ctl_name, edac_dev_name(mci)); 766 767 return mci; 768 } 769 EXPORT_SYMBOL_GPL(edac_mc_del_mc); 770 771 static void edac_mc_scrub_block(unsigned long page, unsigned long offset, 772 u32 size) 773 { 774 struct page *pg; 775 void *virt_addr; 776 unsigned long flags = 0; 777 778 edac_dbg(3, "\n"); 779 780 /* ECC error page was not in our memory. Ignore it. */ 781 if (!pfn_valid(page)) 782 return; 783 784 /* Find the actual page structure then map it and fix */ 785 pg = pfn_to_page(page); 786 787 if (PageHighMem(pg)) 788 local_irq_save(flags); 789 790 virt_addr = kmap_atomic(pg); 791 792 /* Perform architecture specific atomic scrub operation */ 793 edac_atomic_scrub(virt_addr + offset, size); 794 795 /* Unmap and complete */ 796 kunmap_atomic(virt_addr); 797 798 if (PageHighMem(pg)) 799 local_irq_restore(flags); 800 } 801 802 /* FIXME - should return -1 */ 803 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) 804 { 805 struct csrow_info **csrows = mci->csrows; 806 int row, i, j, n; 807 808 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page); 809 row = -1; 810 811 for (i = 0; i < mci->nr_csrows; i++) { 812 struct csrow_info *csrow = csrows[i]; 813 n = 0; 814 for (j = 0; j < csrow->nr_channels; j++) { 815 struct dimm_info *dimm = csrow->channels[j]->dimm; 816 n += dimm->nr_pages; 817 } 818 if (n == 0) 819 continue; 820 821 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n", 822 mci->mc_idx, 823 csrow->first_page, page, csrow->last_page, 824 csrow->page_mask); 825 826 if ((page >= csrow->first_page) && 827 (page <= csrow->last_page) && 828 ((page & csrow->page_mask) == 829 (csrow->first_page & csrow->page_mask))) { 830 row = i; 831 break; 832 } 833 } 834 835 if (row == -1) 836 edac_mc_printk(mci, KERN_ERR, 837 "could not look up page error address %lx\n", 838 (unsigned long)page); 839 840 return row; 841 } 842 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); 843 844 const char *edac_layer_name[] = { 845 [EDAC_MC_LAYER_BRANCH] = "branch", 846 [EDAC_MC_LAYER_CHANNEL] = "channel", 847 [EDAC_MC_LAYER_SLOT] = "slot", 848 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow", 849 [EDAC_MC_LAYER_ALL_MEM] = "memory", 850 }; 851 EXPORT_SYMBOL_GPL(edac_layer_name); 852 853 static void edac_inc_ce_error(struct mem_ctl_info *mci, 854 bool enable_per_layer_report, 855 const int pos[EDAC_MAX_LAYERS], 856 const u16 count) 857 { 858 int i, index = 0; 859 860 mci->ce_mc += count; 861 862 if (!enable_per_layer_report) { 863 mci->ce_noinfo_count += count; 864 return; 865 } 866 867 for (i = 0; i < mci->n_layers; i++) { 868 if (pos[i] < 0) 869 break; 870 index += pos[i]; 871 mci->ce_per_layer[i][index] += count; 872 873 if (i < mci->n_layers - 1) 874 index *= mci->layers[i + 1].size; 875 } 876 } 877 878 static void edac_inc_ue_error(struct mem_ctl_info *mci, 879 bool enable_per_layer_report, 880 const int pos[EDAC_MAX_LAYERS], 881 const u16 count) 882 { 883 int i, index = 0; 884 885 mci->ue_mc += count; 886 887 if (!enable_per_layer_report) { 888 mci->ue_noinfo_count += count; 889 return; 890 } 891 892 for (i = 0; i < mci->n_layers; i++) { 893 if (pos[i] < 0) 894 break; 895 index += pos[i]; 896 mci->ue_per_layer[i][index] += count; 897 898 if (i < mci->n_layers - 1) 899 index *= mci->layers[i + 1].size; 900 } 901 } 902 903 static void edac_ce_error(struct mem_ctl_info *mci, 904 const u16 error_count, 905 const int pos[EDAC_MAX_LAYERS], 906 const char *msg, 907 const char *location, 908 const char *label, 909 const char *detail, 910 const char *other_detail, 911 const bool enable_per_layer_report, 912 const unsigned long page_frame_number, 913 const unsigned long offset_in_page, 914 long grain) 915 { 916 unsigned long remapped_page; 917 char *msg_aux = ""; 918 919 if (*msg) 920 msg_aux = " "; 921 922 if (edac_mc_get_log_ce()) { 923 if (other_detail && *other_detail) 924 edac_mc_printk(mci, KERN_WARNING, 925 "%d CE %s%son %s (%s %s - %s)\n", 926 error_count, msg, msg_aux, label, 927 location, detail, other_detail); 928 else 929 edac_mc_printk(mci, KERN_WARNING, 930 "%d CE %s%son %s (%s %s)\n", 931 error_count, msg, msg_aux, label, 932 location, detail); 933 } 934 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count); 935 936 if (mci->scrub_mode == SCRUB_SW_SRC) { 937 /* 938 * Some memory controllers (called MCs below) can remap 939 * memory so that it is still available at a different 940 * address when PCI devices map into memory. 941 * MC's that can't do this, lose the memory where PCI 942 * devices are mapped. This mapping is MC-dependent 943 * and so we call back into the MC driver for it to 944 * map the MC page to a physical (CPU) page which can 945 * then be mapped to a virtual page - which can then 946 * be scrubbed. 947 */ 948 remapped_page = mci->ctl_page_to_phys ? 949 mci->ctl_page_to_phys(mci, page_frame_number) : 950 page_frame_number; 951 952 edac_mc_scrub_block(remapped_page, 953 offset_in_page, grain); 954 } 955 } 956 957 static void edac_ue_error(struct mem_ctl_info *mci, 958 const u16 error_count, 959 const int pos[EDAC_MAX_LAYERS], 960 const char *msg, 961 const char *location, 962 const char *label, 963 const char *detail, 964 const char *other_detail, 965 const bool enable_per_layer_report) 966 { 967 char *msg_aux = ""; 968 969 if (*msg) 970 msg_aux = " "; 971 972 if (edac_mc_get_log_ue()) { 973 if (other_detail && *other_detail) 974 edac_mc_printk(mci, KERN_WARNING, 975 "%d UE %s%son %s (%s %s - %s)\n", 976 error_count, msg, msg_aux, label, 977 location, detail, other_detail); 978 else 979 edac_mc_printk(mci, KERN_WARNING, 980 "%d UE %s%son %s (%s %s)\n", 981 error_count, msg, msg_aux, label, 982 location, detail); 983 } 984 985 if (edac_mc_get_panic_on_ue()) { 986 if (other_detail && *other_detail) 987 panic("UE %s%son %s (%s%s - %s)\n", 988 msg, msg_aux, label, location, detail, other_detail); 989 else 990 panic("UE %s%son %s (%s%s)\n", 991 msg, msg_aux, label, location, detail); 992 } 993 994 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count); 995 } 996 997 void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type, 998 struct mem_ctl_info *mci, 999 struct edac_raw_error_desc *e) 1000 { 1001 char detail[80]; 1002 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; 1003 1004 /* Memory type dependent details about the error */ 1005 if (type == HW_EVENT_ERR_CORRECTED) { 1006 snprintf(detail, sizeof(detail), 1007 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx", 1008 e->page_frame_number, e->offset_in_page, 1009 e->grain, e->syndrome); 1010 edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label, 1011 detail, e->other_detail, e->enable_per_layer_report, 1012 e->page_frame_number, e->offset_in_page, e->grain); 1013 } else { 1014 snprintf(detail, sizeof(detail), 1015 "page:0x%lx offset:0x%lx grain:%ld", 1016 e->page_frame_number, e->offset_in_page, e->grain); 1017 1018 edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label, 1019 detail, e->other_detail, e->enable_per_layer_report); 1020 } 1021 1022 1023 } 1024 EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error); 1025 1026 void edac_mc_handle_error(const enum hw_event_mc_err_type type, 1027 struct mem_ctl_info *mci, 1028 const u16 error_count, 1029 const unsigned long page_frame_number, 1030 const unsigned long offset_in_page, 1031 const unsigned long syndrome, 1032 const int top_layer, 1033 const int mid_layer, 1034 const int low_layer, 1035 const char *msg, 1036 const char *other_detail) 1037 { 1038 char *p; 1039 int row = -1, chan = -1; 1040 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer }; 1041 int i, n_labels = 0; 1042 u8 grain_bits; 1043 struct edac_raw_error_desc *e = &mci->error_desc; 1044 1045 edac_dbg(3, "MC%d\n", mci->mc_idx); 1046 1047 /* Fills the error report buffer */ 1048 memset(e, 0, sizeof (*e)); 1049 e->error_count = error_count; 1050 e->top_layer = top_layer; 1051 e->mid_layer = mid_layer; 1052 e->low_layer = low_layer; 1053 e->page_frame_number = page_frame_number; 1054 e->offset_in_page = offset_in_page; 1055 e->syndrome = syndrome; 1056 e->msg = msg; 1057 e->other_detail = other_detail; 1058 1059 /* 1060 * Check if the event report is consistent and if the memory 1061 * location is known. If it is known, enable_per_layer_report will be 1062 * true, the DIMM(s) label info will be filled and the per-layer 1063 * error counters will be incremented. 1064 */ 1065 for (i = 0; i < mci->n_layers; i++) { 1066 if (pos[i] >= (int)mci->layers[i].size) { 1067 1068 edac_mc_printk(mci, KERN_ERR, 1069 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n", 1070 edac_layer_name[mci->layers[i].type], 1071 pos[i], mci->layers[i].size); 1072 /* 1073 * Instead of just returning it, let's use what's 1074 * known about the error. The increment routines and 1075 * the DIMM filter logic will do the right thing by 1076 * pointing the likely damaged DIMMs. 1077 */ 1078 pos[i] = -1; 1079 } 1080 if (pos[i] >= 0) 1081 e->enable_per_layer_report = true; 1082 } 1083 1084 /* 1085 * Get the dimm label/grain that applies to the match criteria. 1086 * As the error algorithm may not be able to point to just one memory 1087 * stick, the logic here will get all possible labels that could 1088 * pottentially be affected by the error. 1089 * On FB-DIMM memory controllers, for uncorrected errors, it is common 1090 * to have only the MC channel and the MC dimm (also called "branch") 1091 * but the channel is not known, as the memory is arranged in pairs, 1092 * where each memory belongs to a separate channel within the same 1093 * branch. 1094 */ 1095 p = e->label; 1096 *p = '\0'; 1097 1098 for (i = 0; i < mci->tot_dimms; i++) { 1099 struct dimm_info *dimm = mci->dimms[i]; 1100 1101 if (top_layer >= 0 && top_layer != dimm->location[0]) 1102 continue; 1103 if (mid_layer >= 0 && mid_layer != dimm->location[1]) 1104 continue; 1105 if (low_layer >= 0 && low_layer != dimm->location[2]) 1106 continue; 1107 1108 /* get the max grain, over the error match range */ 1109 if (dimm->grain > e->grain) 1110 e->grain = dimm->grain; 1111 1112 /* 1113 * If the error is memory-controller wide, there's no need to 1114 * seek for the affected DIMMs because the whole 1115 * channel/memory controller/... may be affected. 1116 * Also, don't show errors for empty DIMM slots. 1117 */ 1118 if (e->enable_per_layer_report && dimm->nr_pages) { 1119 if (n_labels >= EDAC_MAX_LABELS) { 1120 e->enable_per_layer_report = false; 1121 break; 1122 } 1123 n_labels++; 1124 if (p != e->label) { 1125 strcpy(p, OTHER_LABEL); 1126 p += strlen(OTHER_LABEL); 1127 } 1128 strcpy(p, dimm->label); 1129 p += strlen(p); 1130 *p = '\0'; 1131 1132 /* 1133 * get csrow/channel of the DIMM, in order to allow 1134 * incrementing the compat API counters 1135 */ 1136 edac_dbg(4, "%s csrows map: (%d,%d)\n", 1137 mci->csbased ? "rank" : "dimm", 1138 dimm->csrow, dimm->cschannel); 1139 if (row == -1) 1140 row = dimm->csrow; 1141 else if (row >= 0 && row != dimm->csrow) 1142 row = -2; 1143 1144 if (chan == -1) 1145 chan = dimm->cschannel; 1146 else if (chan >= 0 && chan != dimm->cschannel) 1147 chan = -2; 1148 } 1149 } 1150 1151 if (!e->enable_per_layer_report) { 1152 strcpy(e->label, "any memory"); 1153 } else { 1154 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan); 1155 if (p == e->label) 1156 strcpy(e->label, "unknown memory"); 1157 if (type == HW_EVENT_ERR_CORRECTED) { 1158 if (row >= 0) { 1159 mci->csrows[row]->ce_count += error_count; 1160 if (chan >= 0) 1161 mci->csrows[row]->channels[chan]->ce_count += error_count; 1162 } 1163 } else 1164 if (row >= 0) 1165 mci->csrows[row]->ue_count += error_count; 1166 } 1167 1168 /* Fill the RAM location data */ 1169 p = e->location; 1170 1171 for (i = 0; i < mci->n_layers; i++) { 1172 if (pos[i] < 0) 1173 continue; 1174 1175 p += sprintf(p, "%s:%d ", 1176 edac_layer_name[mci->layers[i].type], 1177 pos[i]); 1178 } 1179 if (p > e->location) 1180 *(p - 1) = '\0'; 1181 1182 /* Report the error via the trace interface */ 1183 grain_bits = fls_long(e->grain) + 1; 1184 trace_mc_event(type, e->msg, e->label, e->error_count, 1185 mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer, 1186 (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page, 1187 grain_bits, e->syndrome, e->other_detail); 1188 1189 edac_raw_mc_handle_error(type, mci, e); 1190 } 1191 EXPORT_SYMBOL_GPL(edac_mc_handle_error); 1192