xref: /openbmc/linux/drivers/edac/edac_mc.c (revision 64c70b1c)
1 /*
2  * edac_mc kernel module
3  * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4  * This file may be distributed under the terms of the
5  * GNU General Public License.
6  *
7  * Written by Thayne Harbaugh
8  * Based on work by Dan Hollis <goemon at anime dot net> and others.
9  *	http://www.anime.net/~goemon/linux-ecc/
10  *
11  * Modified by Dave Peterson and Doug Thompson
12  *
13  */
14 
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/sysdev.h>
29 #include <linux/ctype.h>
30 #include <linux/kthread.h>
31 #include <linux/freezer.h>
32 #include <asm/uaccess.h>
33 #include <asm/page.h>
34 #include <asm/edac.h>
35 #include "edac_mc.h"
36 
37 #define EDAC_MC_VERSION "Ver: 2.0.1 " __DATE__
38 
39 
40 #ifdef CONFIG_EDAC_DEBUG
41 /* Values of 0 to 4 will generate output */
42 int edac_debug_level = 1;
43 EXPORT_SYMBOL_GPL(edac_debug_level);
44 #endif
45 
46 /* EDAC Controls, setable by module parameter, and sysfs */
47 static int log_ue = 1;
48 static int log_ce = 1;
49 static int panic_on_ue;
50 static int poll_msec = 1000;
51 
52 /* lock to memory controller's control array */
53 static DECLARE_MUTEX(mem_ctls_mutex);
54 static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
55 
56 static struct task_struct *edac_thread;
57 
58 #ifdef CONFIG_PCI
59 static int check_pci_parity = 0;	/* default YES check PCI parity */
60 static int panic_on_pci_parity;		/* default no panic on PCI Parity */
61 static atomic_t pci_parity_count = ATOMIC_INIT(0);
62 
63 static struct kobject edac_pci_kobj; /* /sys/devices/system/edac/pci */
64 static struct completion edac_pci_kobj_complete;
65 #endif	/* CONFIG_PCI */
66 
67 /*  START sysfs data and methods */
68 
69 
70 static const char *mem_types[] = {
71 	[MEM_EMPTY] = "Empty",
72 	[MEM_RESERVED] = "Reserved",
73 	[MEM_UNKNOWN] = "Unknown",
74 	[MEM_FPM] = "FPM",
75 	[MEM_EDO] = "EDO",
76 	[MEM_BEDO] = "BEDO",
77 	[MEM_SDR] = "Unbuffered-SDR",
78 	[MEM_RDR] = "Registered-SDR",
79 	[MEM_DDR] = "Unbuffered-DDR",
80 	[MEM_RDDR] = "Registered-DDR",
81 	[MEM_RMBS] = "RMBS"
82 };
83 
84 static const char *dev_types[] = {
85 	[DEV_UNKNOWN] = "Unknown",
86 	[DEV_X1] = "x1",
87 	[DEV_X2] = "x2",
88 	[DEV_X4] = "x4",
89 	[DEV_X8] = "x8",
90 	[DEV_X16] = "x16",
91 	[DEV_X32] = "x32",
92 	[DEV_X64] = "x64"
93 };
94 
95 static const char *edac_caps[] = {
96 	[EDAC_UNKNOWN] = "Unknown",
97 	[EDAC_NONE] = "None",
98 	[EDAC_RESERVED] = "Reserved",
99 	[EDAC_PARITY] = "PARITY",
100 	[EDAC_EC] = "EC",
101 	[EDAC_SECDED] = "SECDED",
102 	[EDAC_S2ECD2ED] = "S2ECD2ED",
103 	[EDAC_S4ECD4ED] = "S4ECD4ED",
104 	[EDAC_S8ECD8ED] = "S8ECD8ED",
105 	[EDAC_S16ECD16ED] = "S16ECD16ED"
106 };
107 
108 /* sysfs object: /sys/devices/system/edac */
109 static struct sysdev_class edac_class = {
110 	set_kset_name("edac"),
111 };
112 
113 /* sysfs object:
114  *	/sys/devices/system/edac/mc
115  */
116 static struct kobject edac_memctrl_kobj;
117 
118 /* We use these to wait for the reference counts on edac_memctrl_kobj and
119  * edac_pci_kobj to reach 0.
120  */
121 static struct completion edac_memctrl_kobj_complete;
122 
123 /*
124  * /sys/devices/system/edac/mc;
125  *	data structures and methods
126  */
127 static ssize_t memctrl_int_show(void *ptr, char *buffer)
128 {
129 	int *value = (int*) ptr;
130 	return sprintf(buffer, "%u\n", *value);
131 }
132 
133 static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
134 {
135 	int *value = (int*) ptr;
136 
137 	if (isdigit(*buffer))
138 		*value = simple_strtoul(buffer, NULL, 0);
139 
140 	return count;
141 }
142 
143 struct memctrl_dev_attribute {
144 	struct attribute attr;
145 	void *value;
146 	ssize_t (*show)(void *,char *);
147 	ssize_t (*store)(void *, const char *, size_t);
148 };
149 
150 /* Set of show/store abstract level functions for memory control object */
151 static ssize_t memctrl_dev_show(struct kobject *kobj,
152 		struct attribute *attr, char *buffer)
153 {
154 	struct memctrl_dev_attribute *memctrl_dev;
155 	memctrl_dev = (struct memctrl_dev_attribute*)attr;
156 
157 	if (memctrl_dev->show)
158 		return memctrl_dev->show(memctrl_dev->value, buffer);
159 
160 	return -EIO;
161 }
162 
163 static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
164 		const char *buffer, size_t count)
165 {
166 	struct memctrl_dev_attribute *memctrl_dev;
167 	memctrl_dev = (struct memctrl_dev_attribute*)attr;
168 
169 	if (memctrl_dev->store)
170 		return memctrl_dev->store(memctrl_dev->value, buffer, count);
171 
172 	return -EIO;
173 }
174 
175 static struct sysfs_ops memctrlfs_ops = {
176 	.show   = memctrl_dev_show,
177 	.store  = memctrl_dev_store
178 };
179 
180 #define MEMCTRL_ATTR(_name,_mode,_show,_store)			\
181 struct memctrl_dev_attribute attr_##_name = {			\
182 	.attr = {.name = __stringify(_name), .mode = _mode },	\
183 	.value  = &_name,					\
184 	.show   = _show,					\
185 	.store  = _store,					\
186 };
187 
188 #define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store)	\
189 struct memctrl_dev_attribute attr_##_name = {			\
190 	.attr = {.name = __stringify(_name), .mode = _mode },	\
191 	.value  = _data,					\
192 	.show   = _show,					\
193 	.store  = _store,					\
194 };
195 
196 /* csrow<id> control files */
197 MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
198 MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
199 MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
200 MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
201 
202 /* Base Attributes of the memory ECC object */
203 static struct memctrl_dev_attribute *memctrl_attr[] = {
204 	&attr_panic_on_ue,
205 	&attr_log_ue,
206 	&attr_log_ce,
207 	&attr_poll_msec,
208 	NULL,
209 };
210 
211 /* Main MC kobject release() function */
212 static void edac_memctrl_master_release(struct kobject *kobj)
213 {
214 	debugf1("%s()\n", __func__);
215 	complete(&edac_memctrl_kobj_complete);
216 }
217 
218 static struct kobj_type ktype_memctrl = {
219 	.release = edac_memctrl_master_release,
220 	.sysfs_ops = &memctrlfs_ops,
221 	.default_attrs = (struct attribute **) memctrl_attr,
222 };
223 
224 /* Initialize the main sysfs entries for edac:
225  *   /sys/devices/system/edac
226  *
227  * and children
228  *
229  * Return:  0 SUCCESS
230  *         !0 FAILURE
231  */
232 static int edac_sysfs_memctrl_setup(void)
233 {
234 	int err = 0;
235 
236 	debugf1("%s()\n", __func__);
237 
238 	/* create the /sys/devices/system/edac directory */
239 	err = sysdev_class_register(&edac_class);
240 
241 	if (err) {
242 		debugf1("%s() error=%d\n", __func__, err);
243 		return err;
244 	}
245 
246 	/* Init the MC's kobject */
247 	memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
248 	edac_memctrl_kobj.parent = &edac_class.kset.kobj;
249 	edac_memctrl_kobj.ktype = &ktype_memctrl;
250 
251 	/* generate sysfs "..../edac/mc"   */
252 	err = kobject_set_name(&edac_memctrl_kobj,"mc");
253 
254 	if (err)
255 		goto fail;
256 
257 	/* FIXME: maybe new sysdev_create_subdir() */
258 	err = kobject_register(&edac_memctrl_kobj);
259 
260 	if (err) {
261 		debugf1("Failed to register '.../edac/mc'\n");
262 		goto fail;
263 	}
264 
265 	debugf1("Registered '.../edac/mc' kobject\n");
266 
267 	return 0;
268 
269 fail:
270 	sysdev_class_unregister(&edac_class);
271 	return err;
272 }
273 
274 /*
275  * MC teardown:
276  *	the '..../edac/mc' kobject followed by '..../edac' itself
277  */
278 static void edac_sysfs_memctrl_teardown(void)
279 {
280 	debugf0("MC: " __FILE__ ": %s()\n", __func__);
281 
282 	/* Unregister the MC's kobject and wait for reference count to reach
283 	 * 0.
284 	 */
285 	init_completion(&edac_memctrl_kobj_complete);
286 	kobject_unregister(&edac_memctrl_kobj);
287 	wait_for_completion(&edac_memctrl_kobj_complete);
288 
289 	/* Unregister the 'edac' object */
290 	sysdev_class_unregister(&edac_class);
291 }
292 
293 #ifdef CONFIG_PCI
294 static ssize_t edac_pci_int_show(void *ptr, char *buffer)
295 {
296 	int *value = ptr;
297 	return sprintf(buffer,"%d\n",*value);
298 }
299 
300 static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
301 {
302 	int *value = ptr;
303 
304 	if (isdigit(*buffer))
305 		*value = simple_strtoul(buffer,NULL,0);
306 
307 	return count;
308 }
309 
310 struct edac_pci_dev_attribute {
311 	struct attribute attr;
312 	void *value;
313 	ssize_t (*show)(void *,char *);
314 	ssize_t (*store)(void *, const char *,size_t);
315 };
316 
317 /* Set of show/store abstract level functions for PCI Parity object */
318 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
319 		char *buffer)
320 {
321 	struct edac_pci_dev_attribute *edac_pci_dev;
322 	edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
323 
324 	if (edac_pci_dev->show)
325 		return edac_pci_dev->show(edac_pci_dev->value, buffer);
326 	return -EIO;
327 }
328 
329 static ssize_t edac_pci_dev_store(struct kobject *kobj,
330 		struct attribute *attr, const char *buffer, size_t count)
331 {
332 	struct edac_pci_dev_attribute *edac_pci_dev;
333 	edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
334 
335 	if (edac_pci_dev->show)
336 		return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
337 	return -EIO;
338 }
339 
340 static struct sysfs_ops edac_pci_sysfs_ops = {
341 	.show   = edac_pci_dev_show,
342 	.store  = edac_pci_dev_store
343 };
344 
345 #define EDAC_PCI_ATTR(_name,_mode,_show,_store)			\
346 struct edac_pci_dev_attribute edac_pci_attr_##_name = {		\
347 	.attr = {.name = __stringify(_name), .mode = _mode },	\
348 	.value  = &_name,					\
349 	.show   = _show,					\
350 	.store  = _store,					\
351 };
352 
353 #define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store)	\
354 struct edac_pci_dev_attribute edac_pci_attr_##_name = {		\
355 	.attr = {.name = __stringify(_name), .mode = _mode },	\
356 	.value  = _data,					\
357 	.show   = _show,					\
358 	.store  = _store,					\
359 };
360 
361 /* PCI Parity control files */
362 EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
363 	edac_pci_int_store);
364 EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
365 	edac_pci_int_store);
366 EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
367 
368 /* Base Attributes of the memory ECC object */
369 static struct edac_pci_dev_attribute *edac_pci_attr[] = {
370 	&edac_pci_attr_check_pci_parity,
371 	&edac_pci_attr_panic_on_pci_parity,
372 	&edac_pci_attr_pci_parity_count,
373 	NULL,
374 };
375 
376 /* No memory to release */
377 static void edac_pci_release(struct kobject *kobj)
378 {
379 	debugf1("%s()\n", __func__);
380 	complete(&edac_pci_kobj_complete);
381 }
382 
383 static struct kobj_type ktype_edac_pci = {
384 	.release = edac_pci_release,
385 	.sysfs_ops = &edac_pci_sysfs_ops,
386 	.default_attrs = (struct attribute **) edac_pci_attr,
387 };
388 
389 /**
390  * edac_sysfs_pci_setup()
391  *
392  */
393 static int edac_sysfs_pci_setup(void)
394 {
395 	int err;
396 
397 	debugf1("%s()\n", __func__);
398 
399 	memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
400 	edac_pci_kobj.parent = &edac_class.kset.kobj;
401 	edac_pci_kobj.ktype = &ktype_edac_pci;
402 	err = kobject_set_name(&edac_pci_kobj, "pci");
403 
404 	if (!err) {
405 		/* Instanstiate the csrow object */
406 		/* FIXME: maybe new sysdev_create_subdir() */
407 		err = kobject_register(&edac_pci_kobj);
408 
409 		if (err)
410 			debugf1("Failed to register '.../edac/pci'\n");
411 		else
412 			debugf1("Registered '.../edac/pci' kobject\n");
413 	}
414 
415 	return err;
416 }
417 
418 static void edac_sysfs_pci_teardown(void)
419 {
420 	debugf0("%s()\n", __func__);
421 	init_completion(&edac_pci_kobj_complete);
422 	kobject_unregister(&edac_pci_kobj);
423 	wait_for_completion(&edac_pci_kobj_complete);
424 }
425 
426 
427 static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
428 {
429 	int where;
430 	u16 status;
431 
432 	where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
433 	pci_read_config_word(dev, where, &status);
434 
435 	/* If we get back 0xFFFF then we must suspect that the card has been
436 	 * pulled but the Linux PCI layer has not yet finished cleaning up.
437 	 * We don't want to report on such devices
438 	 */
439 
440 	if (status == 0xFFFF) {
441 		u32 sanity;
442 
443 		pci_read_config_dword(dev, 0, &sanity);
444 
445 		if (sanity == 0xFFFFFFFF)
446 			return 0;
447 	}
448 
449 	status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
450 		PCI_STATUS_PARITY;
451 
452 	if (status)
453 		/* reset only the bits we are interested in */
454 		pci_write_config_word(dev, where, status);
455 
456 	return status;
457 }
458 
459 typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
460 
461 /* Clear any PCI parity errors logged by this device. */
462 static void edac_pci_dev_parity_clear(struct pci_dev *dev)
463 {
464 	u8 header_type;
465 
466 	get_pci_parity_status(dev, 0);
467 
468 	/* read the device TYPE, looking for bridges */
469 	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
470 
471 	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
472 		get_pci_parity_status(dev, 1);
473 }
474 
475 /*
476  *  PCI Parity polling
477  *
478  */
479 static void edac_pci_dev_parity_test(struct pci_dev *dev)
480 {
481 	u16 status;
482 	u8  header_type;
483 
484 	/* read the STATUS register on this device
485 	 */
486 	status = get_pci_parity_status(dev, 0);
487 
488 	debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
489 
490 	/* check the status reg for errors */
491 	if (status) {
492 		if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
493 			edac_printk(KERN_CRIT, EDAC_PCI,
494 				"Signaled System Error on %s\n",
495 				pci_name(dev));
496 
497 		if (status & (PCI_STATUS_PARITY)) {
498 			edac_printk(KERN_CRIT, EDAC_PCI,
499 				"Master Data Parity Error on %s\n",
500 				pci_name(dev));
501 
502 			atomic_inc(&pci_parity_count);
503 		}
504 
505 		if (status & (PCI_STATUS_DETECTED_PARITY)) {
506 			edac_printk(KERN_CRIT, EDAC_PCI,
507 				"Detected Parity Error on %s\n",
508 				pci_name(dev));
509 
510 			atomic_inc(&pci_parity_count);
511 		}
512 	}
513 
514 	/* read the device TYPE, looking for bridges */
515 	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
516 
517 	debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
518 
519 	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
520 		/* On bridges, need to examine secondary status register  */
521 		status = get_pci_parity_status(dev, 1);
522 
523 		debugf2("PCI SEC_STATUS= 0x%04x %s\n",
524 				status, dev->dev.bus_id );
525 
526 		/* check the secondary status reg for errors */
527 		if (status) {
528 			if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
529 				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
530 					"Signaled System Error on %s\n",
531 					pci_name(dev));
532 
533 			if (status & (PCI_STATUS_PARITY)) {
534 				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
535 					"Master Data Parity Error on "
536 					"%s\n", pci_name(dev));
537 
538 				atomic_inc(&pci_parity_count);
539 			}
540 
541 			if (status & (PCI_STATUS_DETECTED_PARITY)) {
542 				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
543 					"Detected Parity Error on %s\n",
544 					pci_name(dev));
545 
546 				atomic_inc(&pci_parity_count);
547 			}
548 		}
549 	}
550 }
551 
552 /*
553  * pci_dev parity list iterator
554  *	Scan the PCI device list for one iteration, looking for SERRORs
555  *	Master Parity ERRORS or Parity ERRORs on primary or secondary devices
556  */
557 static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
558 {
559 	struct pci_dev *dev = NULL;
560 
561 	/* request for kernel access to the next PCI device, if any,
562 	 * and while we are looking at it have its reference count
563 	 * bumped until we are done with it
564 	 */
565 	while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
566 		fn(dev);
567 	}
568 }
569 
570 static void do_pci_parity_check(void)
571 {
572 	unsigned long flags;
573 	int before_count;
574 
575 	debugf3("%s()\n", __func__);
576 
577 	if (!check_pci_parity)
578 		return;
579 
580 	before_count = atomic_read(&pci_parity_count);
581 
582 	/* scan all PCI devices looking for a Parity Error on devices and
583 	 * bridges
584 	 */
585 	local_irq_save(flags);
586 	edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
587 	local_irq_restore(flags);
588 
589 	/* Only if operator has selected panic on PCI Error */
590 	if (panic_on_pci_parity) {
591 		/* If the count is different 'after' from 'before' */
592 		if (before_count != atomic_read(&pci_parity_count))
593 			panic("EDAC: PCI Parity Error");
594 	}
595 }
596 
597 static inline void clear_pci_parity_errors(void)
598 {
599 	/* Clear any PCI bus parity errors that devices initially have logged
600 	 * in their registers.
601 	 */
602 	edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
603 }
604 
605 #else	/* CONFIG_PCI */
606 
607 /* pre-process these away */
608 #define	do_pci_parity_check()
609 #define	clear_pci_parity_errors()
610 #define	edac_sysfs_pci_teardown()
611 #define	edac_sysfs_pci_setup()	(0)
612 
613 #endif	/* CONFIG_PCI */
614 
615 /* EDAC sysfs CSROW data structures and methods
616  */
617 
618 /* Set of more default csrow<id> attribute show/store functions */
619 static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data, int private)
620 {
621 	return sprintf(data,"%u\n", csrow->ue_count);
622 }
623 
624 static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data, int private)
625 {
626 	return sprintf(data,"%u\n", csrow->ce_count);
627 }
628 
629 static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, int private)
630 {
631 	return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages));
632 }
633 
634 static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, int private)
635 {
636 	return sprintf(data,"%s\n", mem_types[csrow->mtype]);
637 }
638 
639 static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, int private)
640 {
641 	return sprintf(data,"%s\n", dev_types[csrow->dtype]);
642 }
643 
644 static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, int private)
645 {
646 	return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]);
647 }
648 
649 /* show/store functions for DIMM Label attributes */
650 static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
651 		char *data, int channel)
652 {
653 	return snprintf(data, EDAC_MC_LABEL_LEN,"%s",
654 			csrow->channels[channel].label);
655 }
656 
657 static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
658 				const char *data,
659 				size_t count,
660 				int channel)
661 {
662 	ssize_t max_size = 0;
663 
664 	max_size = min((ssize_t)count,(ssize_t)EDAC_MC_LABEL_LEN-1);
665 	strncpy(csrow->channels[channel].label, data, max_size);
666 	csrow->channels[channel].label[max_size] = '\0';
667 
668 	return max_size;
669 }
670 
671 /* show function for dynamic chX_ce_count attribute */
672 static ssize_t channel_ce_count_show(struct csrow_info *csrow,
673 				char *data,
674 				int channel)
675 {
676 	return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
677 }
678 
679 /* csrow specific attribute structure */
680 struct csrowdev_attribute {
681 	struct attribute attr;
682 	ssize_t (*show)(struct csrow_info *,char *,int);
683 	ssize_t (*store)(struct csrow_info *, const char *,size_t,int);
684 	int    private;
685 };
686 
687 #define to_csrow(k) container_of(k, struct csrow_info, kobj)
688 #define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
689 
690 /* Set of show/store higher level functions for default csrow attributes */
691 static ssize_t csrowdev_show(struct kobject *kobj,
692 			struct attribute *attr,
693 			char *buffer)
694 {
695 	struct csrow_info *csrow = to_csrow(kobj);
696 	struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
697 
698 	if (csrowdev_attr->show)
699 		return csrowdev_attr->show(csrow,
700 					buffer,
701 					csrowdev_attr->private);
702 	return -EIO;
703 }
704 
705 static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
706 		const char *buffer, size_t count)
707 {
708 	struct csrow_info *csrow = to_csrow(kobj);
709 	struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
710 
711 	if (csrowdev_attr->store)
712 		return csrowdev_attr->store(csrow,
713 					buffer,
714 					count,
715 					csrowdev_attr->private);
716 	return -EIO;
717 }
718 
719 static struct sysfs_ops csrowfs_ops = {
720 	.show   = csrowdev_show,
721 	.store  = csrowdev_store
722 };
723 
724 #define CSROWDEV_ATTR(_name,_mode,_show,_store,_private)	\
725 struct csrowdev_attribute attr_##_name = {			\
726 	.attr = {.name = __stringify(_name), .mode = _mode },	\
727 	.show   = _show,					\
728 	.store  = _store,					\
729 	.private = _private,					\
730 };
731 
732 /* default cwrow<id>/attribute files */
733 CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL,0);
734 CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL,0);
735 CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL,0);
736 CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL,0);
737 CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL,0);
738 CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL,0);
739 
740 /* default attributes of the CSROW<id> object */
741 static struct csrowdev_attribute *default_csrow_attr[] = {
742 	&attr_dev_type,
743 	&attr_mem_type,
744 	&attr_edac_mode,
745 	&attr_size_mb,
746 	&attr_ue_count,
747 	&attr_ce_count,
748 	NULL,
749 };
750 
751 
752 /* possible dynamic channel DIMM Label attribute files */
753 CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR,
754 		channel_dimm_label_show,
755 		channel_dimm_label_store,
756 		0 );
757 CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
758 		channel_dimm_label_show,
759 		channel_dimm_label_store,
760 		1 );
761 CSROWDEV_ATTR(ch2_dimm_label,S_IRUGO|S_IWUSR,
762 		channel_dimm_label_show,
763 		channel_dimm_label_store,
764 		2 );
765 CSROWDEV_ATTR(ch3_dimm_label,S_IRUGO|S_IWUSR,
766 		channel_dimm_label_show,
767 		channel_dimm_label_store,
768 		3 );
769 CSROWDEV_ATTR(ch4_dimm_label,S_IRUGO|S_IWUSR,
770 		channel_dimm_label_show,
771 		channel_dimm_label_store,
772 		4 );
773 CSROWDEV_ATTR(ch5_dimm_label,S_IRUGO|S_IWUSR,
774 		channel_dimm_label_show,
775 		channel_dimm_label_store,
776 		5 );
777 
778 /* Total possible dynamic DIMM Label attribute file table */
779 static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
780 		&attr_ch0_dimm_label,
781 		&attr_ch1_dimm_label,
782 		&attr_ch2_dimm_label,
783 		&attr_ch3_dimm_label,
784 		&attr_ch4_dimm_label,
785 		&attr_ch5_dimm_label
786 };
787 
788 /* possible dynamic channel ce_count attribute files */
789 CSROWDEV_ATTR(ch0_ce_count,S_IRUGO|S_IWUSR,
790 		channel_ce_count_show,
791 		NULL,
792 		0 );
793 CSROWDEV_ATTR(ch1_ce_count,S_IRUGO|S_IWUSR,
794 		channel_ce_count_show,
795 		NULL,
796 		1 );
797 CSROWDEV_ATTR(ch2_ce_count,S_IRUGO|S_IWUSR,
798 		channel_ce_count_show,
799 		NULL,
800 		2 );
801 CSROWDEV_ATTR(ch3_ce_count,S_IRUGO|S_IWUSR,
802 		channel_ce_count_show,
803 		NULL,
804 		3 );
805 CSROWDEV_ATTR(ch4_ce_count,S_IRUGO|S_IWUSR,
806 		channel_ce_count_show,
807 		NULL,
808 		4 );
809 CSROWDEV_ATTR(ch5_ce_count,S_IRUGO|S_IWUSR,
810 		channel_ce_count_show,
811 		NULL,
812 		5 );
813 
814 /* Total possible dynamic ce_count attribute file table */
815 static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
816 		&attr_ch0_ce_count,
817 		&attr_ch1_ce_count,
818 		&attr_ch2_ce_count,
819 		&attr_ch3_ce_count,
820 		&attr_ch4_ce_count,
821 		&attr_ch5_ce_count
822 };
823 
824 
825 #define EDAC_NR_CHANNELS	6
826 
827 /* Create dynamic CHANNEL files, indexed by 'chan',  under specifed CSROW */
828 static int edac_create_channel_files(struct kobject *kobj, int chan)
829 {
830 	int err=-ENODEV;
831 
832 	if (chan >= EDAC_NR_CHANNELS)
833 		return err;
834 
835 	/* create the DIMM label attribute file */
836 	err = sysfs_create_file(kobj,
837 			(struct attribute *) dynamic_csrow_dimm_attr[chan]);
838 
839 	if (!err) {
840 		/* create the CE Count attribute file */
841 		err = sysfs_create_file(kobj,
842 			(struct attribute *) dynamic_csrow_ce_count_attr[chan]);
843 	} else {
844 		debugf1("%s()  dimm labels and ce_count files created", __func__);
845 	}
846 
847 	return err;
848 }
849 
850 /* No memory to release for this kobj */
851 static void edac_csrow_instance_release(struct kobject *kobj)
852 {
853 	struct csrow_info *cs;
854 
855 	cs = container_of(kobj, struct csrow_info, kobj);
856 	complete(&cs->kobj_complete);
857 }
858 
859 /* the kobj_type instance for a CSROW */
860 static struct kobj_type ktype_csrow = {
861 	.release = edac_csrow_instance_release,
862 	.sysfs_ops = &csrowfs_ops,
863 	.default_attrs = (struct attribute **) default_csrow_attr,
864 };
865 
866 /* Create a CSROW object under specifed edac_mc_device */
867 static int edac_create_csrow_object(
868 		struct kobject *edac_mci_kobj,
869 		struct csrow_info *csrow,
870 		int index)
871 {
872 	int err = 0;
873 	int chan;
874 
875 	memset(&csrow->kobj, 0, sizeof(csrow->kobj));
876 
877 	/* generate ..../edac/mc/mc<id>/csrow<index>   */
878 
879 	csrow->kobj.parent = edac_mci_kobj;
880 	csrow->kobj.ktype = &ktype_csrow;
881 
882 	/* name this instance of csrow<id> */
883 	err = kobject_set_name(&csrow->kobj,"csrow%d",index);
884 	if (err)
885 		goto error_exit;
886 
887 	/* Instanstiate the csrow object */
888 	err = kobject_register(&csrow->kobj);
889 	if (!err) {
890 		/* Create the dyanmic attribute files on this csrow,
891 		 * namely, the DIMM labels and the channel ce_count
892 		 */
893 		for (chan = 0; chan < csrow->nr_channels; chan++) {
894 			err = edac_create_channel_files(&csrow->kobj,chan);
895 			if (err)
896 				break;
897 		}
898 	}
899 
900 error_exit:
901 	return err;
902 }
903 
904 /* default sysfs methods and data structures for the main MCI kobject */
905 
906 static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
907 		const char *data, size_t count)
908 {
909 	int row, chan;
910 
911 	mci->ue_noinfo_count = 0;
912 	mci->ce_noinfo_count = 0;
913 	mci->ue_count = 0;
914 	mci->ce_count = 0;
915 
916 	for (row = 0; row < mci->nr_csrows; row++) {
917 		struct csrow_info *ri = &mci->csrows[row];
918 
919 		ri->ue_count = 0;
920 		ri->ce_count = 0;
921 
922 		for (chan = 0; chan < ri->nr_channels; chan++)
923 			ri->channels[chan].ce_count = 0;
924 	}
925 
926 	mci->start_time = jiffies;
927 	return count;
928 }
929 
930 /* memory scrubbing */
931 static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
932 					const char *data, size_t count)
933 {
934 	u32 bandwidth = -1;
935 
936 	if (mci->set_sdram_scrub_rate) {
937 
938 		memctrl_int_store(&bandwidth, data, count);
939 
940 		if (!(*mci->set_sdram_scrub_rate)(mci, &bandwidth)) {
941 			edac_printk(KERN_DEBUG, EDAC_MC,
942 				"Scrub rate set successfully, applied: %d\n",
943 				bandwidth);
944 		} else {
945 			/* FIXME: error codes maybe? */
946 			edac_printk(KERN_DEBUG, EDAC_MC,
947 				"Scrub rate set FAILED, could not apply: %d\n",
948 				bandwidth);
949 		}
950 	} else {
951 		/* FIXME: produce "not implemented" ERROR for user-side. */
952 		edac_printk(KERN_WARNING, EDAC_MC,
953 			"Memory scrubbing 'set'control is not implemented!\n");
954 	}
955 	return count;
956 }
957 
958 static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
959 {
960 	u32 bandwidth = -1;
961 
962 	if (mci->get_sdram_scrub_rate) {
963 		if (!(*mci->get_sdram_scrub_rate)(mci, &bandwidth)) {
964 			edac_printk(KERN_DEBUG, EDAC_MC,
965 				"Scrub rate successfully, fetched: %d\n",
966 				bandwidth);
967 		} else {
968 			/* FIXME: error codes maybe? */
969 			edac_printk(KERN_DEBUG, EDAC_MC,
970 				"Scrub rate fetch FAILED, got: %d\n",
971 				bandwidth);
972 		}
973 	} else {
974 		/* FIXME: produce "not implemented" ERROR for user-side.  */
975 		edac_printk(KERN_WARNING, EDAC_MC,
976 			"Memory scrubbing 'get' control is not implemented!\n");
977 	}
978 	return sprintf(data, "%d\n", bandwidth);
979 }
980 
981 /* default attribute files for the MCI object */
982 static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
983 {
984 	return sprintf(data,"%d\n", mci->ue_count);
985 }
986 
987 static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
988 {
989 	return sprintf(data,"%d\n", mci->ce_count);
990 }
991 
992 static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
993 {
994 	return sprintf(data,"%d\n", mci->ce_noinfo_count);
995 }
996 
997 static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
998 {
999 	return sprintf(data,"%d\n", mci->ue_noinfo_count);
1000 }
1001 
1002 static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
1003 {
1004 	return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ);
1005 }
1006 
1007 static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
1008 {
1009 	return sprintf(data,"%s\n", mci->ctl_name);
1010 }
1011 
1012 static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
1013 {
1014 	int total_pages, csrow_idx;
1015 
1016 	for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
1017 			csrow_idx++) {
1018 		struct csrow_info *csrow = &mci->csrows[csrow_idx];
1019 
1020 		if (!csrow->nr_pages)
1021 			continue;
1022 
1023 		total_pages += csrow->nr_pages;
1024 	}
1025 
1026 	return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages));
1027 }
1028 
1029 struct mcidev_attribute {
1030 	struct attribute attr;
1031 	ssize_t (*show)(struct mem_ctl_info *,char *);
1032 	ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
1033 };
1034 
1035 #define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
1036 #define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
1037 
1038 /* MCI show/store functions for top most object */
1039 static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
1040 		char *buffer)
1041 {
1042 	struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
1043 	struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
1044 
1045 	if (mcidev_attr->show)
1046 		return mcidev_attr->show(mem_ctl_info, buffer);
1047 
1048 	return -EIO;
1049 }
1050 
1051 static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
1052 		const char *buffer, size_t count)
1053 {
1054 	struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
1055 	struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
1056 
1057 	if (mcidev_attr->store)
1058 		return mcidev_attr->store(mem_ctl_info, buffer, count);
1059 
1060 	return -EIO;
1061 }
1062 
1063 static struct sysfs_ops mci_ops = {
1064 	.show = mcidev_show,
1065 	.store = mcidev_store
1066 };
1067 
1068 #define MCIDEV_ATTR(_name,_mode,_show,_store)			\
1069 struct mcidev_attribute mci_attr_##_name = {			\
1070 	.attr = {.name = __stringify(_name), .mode = _mode },	\
1071 	.show   = _show,					\
1072 	.store  = _store,					\
1073 };
1074 
1075 /* default Control file */
1076 MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store);
1077 
1078 /* default Attribute files */
1079 MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL);
1080 MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL);
1081 MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL);
1082 MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL);
1083 MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL);
1084 MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL);
1085 MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL);
1086 
1087 /* memory scrubber attribute file */
1088 MCIDEV_ATTR(sdram_scrub_rate,S_IRUGO|S_IWUSR,mci_sdram_scrub_rate_show,mci_sdram_scrub_rate_store);
1089 
1090 static struct mcidev_attribute *mci_attr[] = {
1091 	&mci_attr_reset_counters,
1092 	&mci_attr_mc_name,
1093 	&mci_attr_size_mb,
1094 	&mci_attr_seconds_since_reset,
1095 	&mci_attr_ue_noinfo_count,
1096 	&mci_attr_ce_noinfo_count,
1097 	&mci_attr_ue_count,
1098 	&mci_attr_ce_count,
1099 	&mci_attr_sdram_scrub_rate,
1100 	NULL
1101 };
1102 
1103 /*
1104  * Release of a MC controlling instance
1105  */
1106 static void edac_mci_instance_release(struct kobject *kobj)
1107 {
1108 	struct mem_ctl_info *mci;
1109 
1110 	mci = to_mci(kobj);
1111 	debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
1112 	complete(&mci->kobj_complete);
1113 }
1114 
1115 static struct kobj_type ktype_mci = {
1116 	.release = edac_mci_instance_release,
1117 	.sysfs_ops = &mci_ops,
1118 	.default_attrs = (struct attribute **) mci_attr,
1119 };
1120 
1121 
1122 #define EDAC_DEVICE_SYMLINK	"device"
1123 
1124 /*
1125  * Create a new Memory Controller kobject instance,
1126  *	mc<id> under the 'mc' directory
1127  *
1128  * Return:
1129  *	0	Success
1130  *	!0	Failure
1131  */
1132 static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1133 {
1134 	int i;
1135 	int err;
1136 	struct csrow_info *csrow;
1137 	struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
1138 
1139 	debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
1140 	memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
1141 
1142 	/* set the name of the mc<id> object */
1143 	err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
1144 	if (err)
1145 		return err;
1146 
1147 	/* link to our parent the '..../edac/mc' object */
1148 	edac_mci_kobj->parent = &edac_memctrl_kobj;
1149 	edac_mci_kobj->ktype = &ktype_mci;
1150 
1151 	/* register the mc<id> kobject */
1152 	err = kobject_register(edac_mci_kobj);
1153 	if (err)
1154 		return err;
1155 
1156 	/* create a symlink for the device */
1157 	err = sysfs_create_link(edac_mci_kobj, &mci->dev->kobj,
1158 				EDAC_DEVICE_SYMLINK);
1159 	if (err)
1160 		goto fail0;
1161 
1162 	/* Make directories for each CSROW object
1163 	 * under the mc<id> kobject
1164 	 */
1165 	for (i = 0; i < mci->nr_csrows; i++) {
1166 		csrow = &mci->csrows[i];
1167 
1168 		/* Only expose populated CSROWs */
1169 		if (csrow->nr_pages > 0) {
1170 			err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
1171 			if (err)
1172 				goto fail1;
1173 		}
1174 	}
1175 
1176 	return 0;
1177 
1178 	/* CSROW error: backout what has already been registered,  */
1179 fail1:
1180 	for ( i--; i >= 0; i--) {
1181 		if (csrow->nr_pages > 0) {
1182 			init_completion(&csrow->kobj_complete);
1183 			kobject_unregister(&mci->csrows[i].kobj);
1184 			wait_for_completion(&csrow->kobj_complete);
1185 		}
1186 	}
1187 
1188 fail0:
1189 	init_completion(&mci->kobj_complete);
1190 	kobject_unregister(edac_mci_kobj);
1191 	wait_for_completion(&mci->kobj_complete);
1192 	return err;
1193 }
1194 
1195 /*
1196  * remove a Memory Controller instance
1197  */
1198 static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1199 {
1200 	int i;
1201 
1202 	debugf0("%s()\n", __func__);
1203 
1204 	/* remove all csrow kobjects */
1205 	for (i = 0; i < mci->nr_csrows; i++) {
1206 		if (mci->csrows[i].nr_pages > 0) {
1207 			init_completion(&mci->csrows[i].kobj_complete);
1208 			kobject_unregister(&mci->csrows[i].kobj);
1209 			wait_for_completion(&mci->csrows[i].kobj_complete);
1210 		}
1211 	}
1212 
1213 	sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
1214 	init_completion(&mci->kobj_complete);
1215 	kobject_unregister(&mci->edac_mci_kobj);
1216 	wait_for_completion(&mci->kobj_complete);
1217 }
1218 
1219 /* END OF sysfs data and methods */
1220 
1221 #ifdef CONFIG_EDAC_DEBUG
1222 
1223 void edac_mc_dump_channel(struct channel_info *chan)
1224 {
1225 	debugf4("\tchannel = %p\n", chan);
1226 	debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
1227 	debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
1228 	debugf4("\tchannel->label = '%s'\n", chan->label);
1229 	debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
1230 }
1231 EXPORT_SYMBOL_GPL(edac_mc_dump_channel);
1232 
1233 void edac_mc_dump_csrow(struct csrow_info *csrow)
1234 {
1235 	debugf4("\tcsrow = %p\n", csrow);
1236 	debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
1237 	debugf4("\tcsrow->first_page = 0x%lx\n",
1238 		csrow->first_page);
1239 	debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
1240 	debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
1241 	debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
1242 	debugf4("\tcsrow->nr_channels = %d\n",
1243 		csrow->nr_channels);
1244 	debugf4("\tcsrow->channels = %p\n", csrow->channels);
1245 	debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
1246 }
1247 EXPORT_SYMBOL_GPL(edac_mc_dump_csrow);
1248 
1249 void edac_mc_dump_mci(struct mem_ctl_info *mci)
1250 {
1251 	debugf3("\tmci = %p\n", mci);
1252 	debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
1253 	debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
1254 	debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
1255 	debugf4("\tmci->edac_check = %p\n", mci->edac_check);
1256 	debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
1257 		mci->nr_csrows, mci->csrows);
1258 	debugf3("\tdev = %p\n", mci->dev);
1259 	debugf3("\tmod_name:ctl_name = %s:%s\n",
1260 		mci->mod_name, mci->ctl_name);
1261 	debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
1262 }
1263 EXPORT_SYMBOL_GPL(edac_mc_dump_mci);
1264 
1265 #endif  /* CONFIG_EDAC_DEBUG */
1266 
1267 /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
1268  * Adjust 'ptr' so that its alignment is at least as stringent as what the
1269  * compiler would provide for X and return the aligned result.
1270  *
1271  * If 'size' is a constant, the compiler will optimize this whole function
1272  * down to either a no-op or the addition of a constant to the value of 'ptr'.
1273  */
1274 static inline char * align_ptr(void *ptr, unsigned size)
1275 {
1276 	unsigned align, r;
1277 
1278 	/* Here we assume that the alignment of a "long long" is the most
1279 	 * stringent alignment that the compiler will ever provide by default.
1280 	 * As far as I know, this is a reasonable assumption.
1281 	 */
1282 	if (size > sizeof(long))
1283 		align = sizeof(long long);
1284 	else if (size > sizeof(int))
1285 		align = sizeof(long);
1286 	else if (size > sizeof(short))
1287 		align = sizeof(int);
1288 	else if (size > sizeof(char))
1289 		align = sizeof(short);
1290 	else
1291 		return (char *) ptr;
1292 
1293 	r = size % align;
1294 
1295 	if (r == 0)
1296 		return (char *) ptr;
1297 
1298 	return (char *) (((unsigned long) ptr) + align - r);
1299 }
1300 
1301 /**
1302  * edac_mc_alloc: Allocate a struct mem_ctl_info structure
1303  * @size_pvt:	size of private storage needed
1304  * @nr_csrows:	Number of CWROWS needed for this MC
1305  * @nr_chans:	Number of channels for the MC
1306  *
1307  * Everything is kmalloc'ed as one big chunk - more efficient.
1308  * Only can be used if all structures have the same lifetime - otherwise
1309  * you have to allocate and initialize your own structures.
1310  *
1311  * Use edac_mc_free() to free mc structures allocated by this function.
1312  *
1313  * Returns:
1314  *	NULL allocation failed
1315  *	struct mem_ctl_info pointer
1316  */
1317 struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1318 		unsigned nr_chans)
1319 {
1320 	struct mem_ctl_info *mci;
1321 	struct csrow_info *csi, *csrow;
1322 	struct channel_info *chi, *chp, *chan;
1323 	void *pvt;
1324 	unsigned size;
1325 	int row, chn;
1326 
1327 	/* Figure out the offsets of the various items from the start of an mc
1328 	 * structure.  We want the alignment of each item to be at least as
1329 	 * stringent as what the compiler would provide if we could simply
1330 	 * hardcode everything into a single struct.
1331 	 */
1332 	mci = (struct mem_ctl_info *) 0;
1333 	csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi));
1334 	chi = (struct channel_info *)
1335 			align_ptr(&csi[nr_csrows], sizeof(*chi));
1336 	pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
1337 	size = ((unsigned long) pvt) + sz_pvt;
1338 
1339 	if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
1340 		return NULL;
1341 
1342 	/* Adjust pointers so they point within the memory we just allocated
1343 	 * rather than an imaginary chunk of memory located at address 0.
1344 	 */
1345 	csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
1346 	chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
1347 	pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
1348 
1349 	memset(mci, 0, size);  /* clear all fields */
1350 	mci->csrows = csi;
1351 	mci->pvt_info = pvt;
1352 	mci->nr_csrows = nr_csrows;
1353 
1354 	for (row = 0; row < nr_csrows; row++) {
1355 		csrow = &csi[row];
1356 		csrow->csrow_idx = row;
1357 		csrow->mci = mci;
1358 		csrow->nr_channels = nr_chans;
1359 		chp = &chi[row * nr_chans];
1360 		csrow->channels = chp;
1361 
1362 		for (chn = 0; chn < nr_chans; chn++) {
1363 			chan = &chp[chn];
1364 			chan->chan_idx = chn;
1365 			chan->csrow = csrow;
1366 		}
1367 	}
1368 
1369 	return mci;
1370 }
1371 EXPORT_SYMBOL_GPL(edac_mc_alloc);
1372 
1373 /**
1374  * edac_mc_free:  Free a previously allocated 'mci' structure
1375  * @mci: pointer to a struct mem_ctl_info structure
1376  */
1377 void edac_mc_free(struct mem_ctl_info *mci)
1378 {
1379 	kfree(mci);
1380 }
1381 EXPORT_SYMBOL_GPL(edac_mc_free);
1382 
1383 static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
1384 {
1385 	struct mem_ctl_info *mci;
1386 	struct list_head *item;
1387 
1388 	debugf3("%s()\n", __func__);
1389 
1390 	list_for_each(item, &mc_devices) {
1391 		mci = list_entry(item, struct mem_ctl_info, link);
1392 
1393 		if (mci->dev == dev)
1394 			return mci;
1395 	}
1396 
1397 	return NULL;
1398 }
1399 
1400 /* Return 0 on success, 1 on failure.
1401  * Before calling this function, caller must
1402  * assign a unique value to mci->mc_idx.
1403  */
1404 static int add_mc_to_global_list (struct mem_ctl_info *mci)
1405 {
1406 	struct list_head *item, *insert_before;
1407 	struct mem_ctl_info *p;
1408 
1409 	insert_before = &mc_devices;
1410 
1411 	if (unlikely((p = find_mci_by_dev(mci->dev)) != NULL))
1412 		goto fail0;
1413 
1414 	list_for_each(item, &mc_devices) {
1415 		p = list_entry(item, struct mem_ctl_info, link);
1416 
1417 		if (p->mc_idx >= mci->mc_idx) {
1418 			if (unlikely(p->mc_idx == mci->mc_idx))
1419 				goto fail1;
1420 
1421 			insert_before = item;
1422 			break;
1423 		}
1424 	}
1425 
1426 	list_add_tail_rcu(&mci->link, insert_before);
1427 	return 0;
1428 
1429 fail0:
1430 	edac_printk(KERN_WARNING, EDAC_MC,
1431 		    "%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
1432 		    dev_name(p->dev), p->mod_name, p->ctl_name, p->mc_idx);
1433 	return 1;
1434 
1435 fail1:
1436 	edac_printk(KERN_WARNING, EDAC_MC,
1437 		    "bug in low-level driver: attempt to assign\n"
1438 		    "    duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
1439 	return 1;
1440 }
1441 
1442 static void complete_mc_list_del(struct rcu_head *head)
1443 {
1444 	struct mem_ctl_info *mci;
1445 
1446 	mci = container_of(head, struct mem_ctl_info, rcu);
1447 	INIT_LIST_HEAD(&mci->link);
1448 	complete(&mci->complete);
1449 }
1450 
1451 static void del_mc_from_global_list(struct mem_ctl_info *mci)
1452 {
1453 	list_del_rcu(&mci->link);
1454 	init_completion(&mci->complete);
1455 	call_rcu(&mci->rcu, complete_mc_list_del);
1456 	wait_for_completion(&mci->complete);
1457 }
1458 
1459 /**
1460  * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
1461  *                 create sysfs entries associated with mci structure
1462  * @mci: pointer to the mci structure to be added to the list
1463  * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
1464  *
1465  * Return:
1466  *	0	Success
1467  *	!0	Failure
1468  */
1469 
1470 /* FIXME - should a warning be printed if no error detection? correction? */
1471 int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx)
1472 {
1473 	debugf0("%s()\n", __func__);
1474 	mci->mc_idx = mc_idx;
1475 #ifdef CONFIG_EDAC_DEBUG
1476 	if (edac_debug_level >= 3)
1477 		edac_mc_dump_mci(mci);
1478 
1479 	if (edac_debug_level >= 4) {
1480 		int i;
1481 
1482 		for (i = 0; i < mci->nr_csrows; i++) {
1483 			int j;
1484 
1485 			edac_mc_dump_csrow(&mci->csrows[i]);
1486 			for (j = 0; j < mci->csrows[i].nr_channels; j++)
1487 				edac_mc_dump_channel(
1488 					&mci->csrows[i].channels[j]);
1489 		}
1490 	}
1491 #endif
1492 	down(&mem_ctls_mutex);
1493 
1494 	if (add_mc_to_global_list(mci))
1495 		goto fail0;
1496 
1497 	/* set load time so that error rate can be tracked */
1498 	mci->start_time = jiffies;
1499 
1500 	if (edac_create_sysfs_mci_device(mci)) {
1501 		edac_mc_printk(mci, KERN_WARNING,
1502 			"failed to create sysfs device\n");
1503 		goto fail1;
1504 	}
1505 
1506 	/* Report action taken */
1507 	edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n",
1508 		mci->mod_name, mci->ctl_name, dev_name(mci->dev));
1509 
1510 	up(&mem_ctls_mutex);
1511 	return 0;
1512 
1513 fail1:
1514 	del_mc_from_global_list(mci);
1515 
1516 fail0:
1517 	up(&mem_ctls_mutex);
1518 	return 1;
1519 }
1520 EXPORT_SYMBOL_GPL(edac_mc_add_mc);
1521 
1522 /**
1523  * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
1524  *                 remove mci structure from global list
1525  * @pdev: Pointer to 'struct device' representing mci structure to remove.
1526  *
1527  * Return pointer to removed mci structure, or NULL if device not found.
1528  */
1529 struct mem_ctl_info * edac_mc_del_mc(struct device *dev)
1530 {
1531 	struct mem_ctl_info *mci;
1532 
1533 	debugf0("MC: %s()\n", __func__);
1534 	down(&mem_ctls_mutex);
1535 
1536 	if ((mci = find_mci_by_dev(dev)) == NULL) {
1537 		up(&mem_ctls_mutex);
1538 		return NULL;
1539 	}
1540 
1541 	edac_remove_sysfs_mci_device(mci);
1542 	del_mc_from_global_list(mci);
1543 	up(&mem_ctls_mutex);
1544 	edac_printk(KERN_INFO, EDAC_MC,
1545 		"Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
1546 		mci->mod_name, mci->ctl_name, dev_name(mci->dev));
1547 	return mci;
1548 }
1549 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
1550 
1551 void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size)
1552 {
1553 	struct page *pg;
1554 	void *virt_addr;
1555 	unsigned long flags = 0;
1556 
1557 	debugf3("%s()\n", __func__);
1558 
1559 	/* ECC error page was not in our memory. Ignore it. */
1560 	if(!pfn_valid(page))
1561 		return;
1562 
1563 	/* Find the actual page structure then map it and fix */
1564 	pg = pfn_to_page(page);
1565 
1566 	if (PageHighMem(pg))
1567 		local_irq_save(flags);
1568 
1569 	virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
1570 
1571 	/* Perform architecture specific atomic scrub operation */
1572 	atomic_scrub(virt_addr + offset, size);
1573 
1574 	/* Unmap and complete */
1575 	kunmap_atomic(virt_addr, KM_BOUNCE_READ);
1576 
1577 	if (PageHighMem(pg))
1578 		local_irq_restore(flags);
1579 }
1580 EXPORT_SYMBOL_GPL(edac_mc_scrub_block);
1581 
1582 /* FIXME - should return -1 */
1583 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
1584 {
1585 	struct csrow_info *csrows = mci->csrows;
1586 	int row, i;
1587 
1588 	debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
1589 	row = -1;
1590 
1591 	for (i = 0; i < mci->nr_csrows; i++) {
1592 		struct csrow_info *csrow = &csrows[i];
1593 
1594 		if (csrow->nr_pages == 0)
1595 			continue;
1596 
1597 		debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
1598 			"mask(0x%lx)\n", mci->mc_idx, __func__,
1599 			csrow->first_page, page, csrow->last_page,
1600 			csrow->page_mask);
1601 
1602 		if ((page >= csrow->first_page) &&
1603 		    (page <= csrow->last_page) &&
1604 		    ((page & csrow->page_mask) ==
1605 		     (csrow->first_page & csrow->page_mask))) {
1606 			row = i;
1607 			break;
1608 		}
1609 	}
1610 
1611 	if (row == -1)
1612 		edac_mc_printk(mci, KERN_ERR,
1613 			"could not look up page error address %lx\n",
1614 			(unsigned long) page);
1615 
1616 	return row;
1617 }
1618 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
1619 
1620 /* FIXME - setable log (warning/emerg) levels */
1621 /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
1622 void edac_mc_handle_ce(struct mem_ctl_info *mci,
1623 		unsigned long page_frame_number, unsigned long offset_in_page,
1624 		unsigned long syndrome, int row, int channel, const char *msg)
1625 {
1626 	unsigned long remapped_page;
1627 
1628 	debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
1629 
1630 	/* FIXME - maybe make panic on INTERNAL ERROR an option */
1631 	if (row >= mci->nr_csrows || row < 0) {
1632 		/* something is wrong */
1633 		edac_mc_printk(mci, KERN_ERR,
1634 			"INTERNAL ERROR: row out of range "
1635 			"(%d >= %d)\n", row, mci->nr_csrows);
1636 		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1637 		return;
1638 	}
1639 
1640 	if (channel >= mci->csrows[row].nr_channels || channel < 0) {
1641 		/* something is wrong */
1642 		edac_mc_printk(mci, KERN_ERR,
1643 			"INTERNAL ERROR: channel out of range "
1644 			"(%d >= %d)\n", channel,
1645 			mci->csrows[row].nr_channels);
1646 		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1647 		return;
1648 	}
1649 
1650 	if (log_ce)
1651 		/* FIXME - put in DIMM location */
1652 		edac_mc_printk(mci, KERN_WARNING,
1653 			"CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
1654 			"0x%lx, row %d, channel %d, label \"%s\": %s\n",
1655 			page_frame_number, offset_in_page,
1656 			mci->csrows[row].grain, syndrome, row, channel,
1657 			mci->csrows[row].channels[channel].label, msg);
1658 
1659 	mci->ce_count++;
1660 	mci->csrows[row].ce_count++;
1661 	mci->csrows[row].channels[channel].ce_count++;
1662 
1663 	if (mci->scrub_mode & SCRUB_SW_SRC) {
1664 		/*
1665 		 * Some MC's can remap memory so that it is still available
1666 		 * at a different address when PCI devices map into memory.
1667 		 * MC's that can't do this lose the memory where PCI devices
1668 		 * are mapped.  This mapping is MC dependant and so we call
1669 		 * back into the MC driver for it to map the MC page to
1670 		 * a physical (CPU) page which can then be mapped to a virtual
1671 		 * page - which can then be scrubbed.
1672 		 */
1673 		remapped_page = mci->ctl_page_to_phys ?
1674 		    mci->ctl_page_to_phys(mci, page_frame_number) :
1675 		    page_frame_number;
1676 
1677 		edac_mc_scrub_block(remapped_page, offset_in_page,
1678 					mci->csrows[row].grain);
1679 	}
1680 }
1681 EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
1682 
1683 void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
1684 {
1685 	if (log_ce)
1686 		edac_mc_printk(mci, KERN_WARNING,
1687 			"CE - no information available: %s\n", msg);
1688 
1689 	mci->ce_noinfo_count++;
1690 	mci->ce_count++;
1691 }
1692 EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
1693 
1694 void edac_mc_handle_ue(struct mem_ctl_info *mci,
1695 		unsigned long page_frame_number, unsigned long offset_in_page,
1696 		int row, const char *msg)
1697 {
1698 	int len = EDAC_MC_LABEL_LEN * 4;
1699 	char labels[len + 1];
1700 	char *pos = labels;
1701 	int chan;
1702 	int chars;
1703 
1704 	debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
1705 
1706 	/* FIXME - maybe make panic on INTERNAL ERROR an option */
1707 	if (row >= mci->nr_csrows || row < 0) {
1708 		/* something is wrong */
1709 		edac_mc_printk(mci, KERN_ERR,
1710 			"INTERNAL ERROR: row out of range "
1711 			"(%d >= %d)\n", row, mci->nr_csrows);
1712 		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
1713 		return;
1714 	}
1715 
1716 	chars = snprintf(pos, len + 1, "%s",
1717 			mci->csrows[row].channels[0].label);
1718 	len -= chars;
1719 	pos += chars;
1720 
1721 	for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
1722 	     chan++) {
1723 		chars = snprintf(pos, len + 1, ":%s",
1724 				mci->csrows[row].channels[chan].label);
1725 		len -= chars;
1726 		pos += chars;
1727 	}
1728 
1729 	if (log_ue)
1730 		edac_mc_printk(mci, KERN_EMERG,
1731 			"UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
1732 			"labels \"%s\": %s\n", page_frame_number,
1733 			offset_in_page, mci->csrows[row].grain, row, labels,
1734 			msg);
1735 
1736 	if (panic_on_ue)
1737 		panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
1738 			"row %d, labels \"%s\": %s\n", mci->mc_idx,
1739 			page_frame_number, offset_in_page,
1740 			mci->csrows[row].grain, row, labels, msg);
1741 
1742 	mci->ue_count++;
1743 	mci->csrows[row].ue_count++;
1744 }
1745 EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
1746 
1747 void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
1748 {
1749 	if (panic_on_ue)
1750 		panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
1751 
1752 	if (log_ue)
1753 		edac_mc_printk(mci, KERN_WARNING,
1754 			"UE - no information available: %s\n", msg);
1755 	mci->ue_noinfo_count++;
1756 	mci->ue_count++;
1757 }
1758 EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
1759 
1760 
1761 /*************************************************************
1762  * On Fully Buffered DIMM modules, this help function is
1763  * called to process UE events
1764  */
1765 void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
1766 				unsigned int csrow,
1767 				unsigned int channela,
1768 				unsigned int channelb,
1769 				char *msg)
1770 {
1771 	int len = EDAC_MC_LABEL_LEN * 4;
1772 	char labels[len + 1];
1773 	char *pos = labels;
1774 	int chars;
1775 
1776 	if (csrow >= mci->nr_csrows) {
1777 		/* something is wrong */
1778 		edac_mc_printk(mci, KERN_ERR,
1779 			"INTERNAL ERROR: row out of range (%d >= %d)\n",
1780 			csrow, mci->nr_csrows);
1781 		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
1782 		return;
1783 	}
1784 
1785 	if (channela >= mci->csrows[csrow].nr_channels) {
1786 		/* something is wrong */
1787 		edac_mc_printk(mci, KERN_ERR,
1788 			"INTERNAL ERROR: channel-a out of range "
1789 			"(%d >= %d)\n",
1790 			channela, mci->csrows[csrow].nr_channels);
1791 		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
1792 		return;
1793 	}
1794 
1795 	if (channelb >= mci->csrows[csrow].nr_channels) {
1796 		/* something is wrong */
1797 		edac_mc_printk(mci, KERN_ERR,
1798 			"INTERNAL ERROR: channel-b out of range "
1799 			"(%d >= %d)\n",
1800 			channelb, mci->csrows[csrow].nr_channels);
1801 		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
1802 		return;
1803 	}
1804 
1805 	mci->ue_count++;
1806 	mci->csrows[csrow].ue_count++;
1807 
1808 	/* Generate the DIMM labels from the specified channels */
1809 	chars = snprintf(pos, len + 1, "%s",
1810 			 mci->csrows[csrow].channels[channela].label);
1811 	len -= chars; pos += chars;
1812 	chars = snprintf(pos, len + 1, "-%s",
1813 			 mci->csrows[csrow].channels[channelb].label);
1814 
1815 	if (log_ue)
1816 		edac_mc_printk(mci, KERN_EMERG,
1817 			"UE row %d, channel-a= %d channel-b= %d "
1818 			"labels \"%s\": %s\n", csrow, channela, channelb,
1819 			labels, msg);
1820 
1821 	if (panic_on_ue)
1822 		panic("UE row %d, channel-a= %d channel-b= %d "
1823 				"labels \"%s\": %s\n", csrow, channela,
1824 				channelb, labels, msg);
1825 }
1826 EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
1827 
1828 /*************************************************************
1829  * On Fully Buffered DIMM modules, this help function is
1830  * called to process CE events
1831  */
1832 void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
1833 			   unsigned int csrow,
1834 			   unsigned int channel,
1835 			   char *msg)
1836 {
1837 
1838 	/* Ensure boundary values */
1839 	if (csrow >= mci->nr_csrows) {
1840 		/* something is wrong */
1841 		edac_mc_printk(mci, KERN_ERR,
1842 			"INTERNAL ERROR: row out of range (%d >= %d)\n",
1843 			csrow, mci->nr_csrows);
1844 		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1845 		return;
1846 	}
1847 	if (channel >= mci->csrows[csrow].nr_channels) {
1848 		/* something is wrong */
1849 		edac_mc_printk(mci, KERN_ERR,
1850 			"INTERNAL ERROR: channel out of range (%d >= %d)\n",
1851 			channel, mci->csrows[csrow].nr_channels);
1852 		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1853 		return;
1854 	}
1855 
1856 	if (log_ce)
1857 		/* FIXME - put in DIMM location */
1858 		edac_mc_printk(mci, KERN_WARNING,
1859 			"CE row %d, channel %d, label \"%s\": %s\n",
1860 			csrow, channel,
1861 			mci->csrows[csrow].channels[channel].label,
1862 			msg);
1863 
1864 	mci->ce_count++;
1865 	mci->csrows[csrow].ce_count++;
1866 	mci->csrows[csrow].channels[channel].ce_count++;
1867 }
1868 EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
1869 
1870 
1871 /*
1872  * Iterate over all MC instances and check for ECC, et al, errors
1873  */
1874 static inline void check_mc_devices(void)
1875 {
1876 	struct list_head *item;
1877 	struct mem_ctl_info *mci;
1878 
1879 	debugf3("%s()\n", __func__);
1880 	down(&mem_ctls_mutex);
1881 
1882 	list_for_each(item, &mc_devices) {
1883 		mci = list_entry(item, struct mem_ctl_info, link);
1884 
1885 		if (mci->edac_check != NULL)
1886 			mci->edac_check(mci);
1887 	}
1888 
1889 	up(&mem_ctls_mutex);
1890 }
1891 
1892 /*
1893  * Check MC status every poll_msec.
1894  * Check PCI status every poll_msec as well.
1895  *
1896  * This where the work gets done for edac.
1897  *
1898  * SMP safe, doesn't use NMI, and auto-rate-limits.
1899  */
1900 static void do_edac_check(void)
1901 {
1902 	debugf3("%s()\n", __func__);
1903 	check_mc_devices();
1904 	do_pci_parity_check();
1905 }
1906 
1907 static int edac_kernel_thread(void *arg)
1908 {
1909 	while (!kthread_should_stop()) {
1910 		do_edac_check();
1911 
1912 		/* goto sleep for the interval */
1913 		schedule_timeout_interruptible((HZ * poll_msec) / 1000);
1914 		try_to_freeze();
1915 	}
1916 
1917 	return 0;
1918 }
1919 
1920 /*
1921  * edac_mc_init
1922  *      module initialization entry point
1923  */
1924 static int __init edac_mc_init(void)
1925 {
1926 	edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n");
1927 
1928 	/*
1929 	 * Harvest and clear any boot/initialization PCI parity errors
1930 	 *
1931 	 * FIXME: This only clears errors logged by devices present at time of
1932 	 * 	module initialization.  We should also do an initial clear
1933 	 *	of each newly hotplugged device.
1934 	 */
1935 	clear_pci_parity_errors();
1936 
1937 	/* Create the MC sysfs entries */
1938 	if (edac_sysfs_memctrl_setup()) {
1939 		edac_printk(KERN_ERR, EDAC_MC,
1940 			"Error initializing sysfs code\n");
1941 		return -ENODEV;
1942 	}
1943 
1944 	/* Create the PCI parity sysfs entries */
1945 	if (edac_sysfs_pci_setup()) {
1946 		edac_sysfs_memctrl_teardown();
1947 		edac_printk(KERN_ERR, EDAC_MC,
1948 			"EDAC PCI: Error initializing sysfs code\n");
1949 		return -ENODEV;
1950 	}
1951 
1952 	/* create our kernel thread */
1953 	edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac");
1954 
1955 	if (IS_ERR(edac_thread)) {
1956 		/* remove the sysfs entries */
1957 		edac_sysfs_memctrl_teardown();
1958 		edac_sysfs_pci_teardown();
1959 		return PTR_ERR(edac_thread);
1960 	}
1961 
1962 	return 0;
1963 }
1964 
1965 /*
1966  * edac_mc_exit()
1967  *      module exit/termination functioni
1968  */
1969 static void __exit edac_mc_exit(void)
1970 {
1971 	debugf0("%s()\n", __func__);
1972 	kthread_stop(edac_thread);
1973 
1974 	/* tear down the sysfs device */
1975 	edac_sysfs_memctrl_teardown();
1976 	edac_sysfs_pci_teardown();
1977 }
1978 
1979 module_init(edac_mc_init);
1980 module_exit(edac_mc_exit);
1981 
1982 MODULE_LICENSE("GPL");
1983 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
1984 	"Based on work by Dan Hollis et al");
1985 MODULE_DESCRIPTION("Core library routines for MC reporting");
1986 
1987 module_param(panic_on_ue, int, 0644);
1988 MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
1989 #ifdef CONFIG_PCI
1990 module_param(check_pci_parity, int, 0644);
1991 MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
1992 module_param(panic_on_pci_parity, int, 0644);
1993 MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
1994 #endif
1995 module_param(log_ue, int, 0644);
1996 MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
1997 module_param(log_ce, int, 0644);
1998 MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on");
1999 module_param(poll_msec, int, 0644);
2000 MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds");
2001 #ifdef CONFIG_EDAC_DEBUG
2002 module_param(edac_debug_level, int, 0644);
2003 MODULE_PARM_DESC(edac_debug_level, "Debug level");
2004 #endif
2005