xref: /openbmc/linux/arch/powerpc/kernel/eeh_event.c (revision 2874c5fd)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
15  *
16  * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
17  */
18 
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/sched.h>
22 #include <linux/semaphore.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/kthread.h>
26 #include <asm/eeh_event.h>
27 #include <asm/ppc-pci.h>
28 
29 /** Overview:
30  *  EEH error states may be detected within exception handlers;
31  *  however, the recovery processing needs to occur asynchronously
32  *  in a normal kernel context and not an interrupt context.
33  *  This pair of routines creates an event and queues it onto a
34  *  work-queue, where a worker thread can drive recovery.
35  */
36 
37 static DEFINE_SPINLOCK(eeh_eventlist_lock);
38 static DECLARE_COMPLETION(eeh_eventlist_event);
39 static LIST_HEAD(eeh_eventlist);
40 
41 /**
42  * eeh_event_handler - Dispatch EEH events.
43  * @dummy - unused
44  *
45  * The detection of a frozen slot can occur inside an interrupt,
46  * where it can be hard to do anything about it.  The goal of this
47  * routine is to pull these detection events out of the context
48  * of the interrupt handler, and re-dispatch them for processing
49  * at a later time in a normal context.
50  */
51 static int eeh_event_handler(void * dummy)
52 {
53 	unsigned long flags;
54 	struct eeh_event *event;
55 	struct eeh_pe *pe;
56 
57 	while (!kthread_should_stop()) {
58 		if (wait_for_completion_interruptible(&eeh_eventlist_event))
59 			break;
60 
61 		/* Fetch EEH event from the queue */
62 		spin_lock_irqsave(&eeh_eventlist_lock, flags);
63 		event = NULL;
64 		if (!list_empty(&eeh_eventlist)) {
65 			event = list_entry(eeh_eventlist.next,
66 					   struct eeh_event, list);
67 			list_del(&event->list);
68 		}
69 		spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
70 		if (!event)
71 			continue;
72 
73 		/* We might have event without binding PE */
74 		pe = event->pe;
75 		if (pe) {
76 			if (pe->type & EEH_PE_PHB)
77 				pr_info("EEH: Detected error on PHB#%x\n",
78 					 pe->phb->global_number);
79 			else
80 				pr_info("EEH: Detected PCI bus error on "
81 					"PHB#%x-PE#%x\n",
82 					pe->phb->global_number, pe->addr);
83 			eeh_handle_normal_event(pe);
84 		} else {
85 			eeh_handle_special_event();
86 		}
87 
88 		kfree(event);
89 	}
90 
91 	return 0;
92 }
93 
94 /**
95  * eeh_event_init - Start kernel thread to handle EEH events
96  *
97  * This routine is called to start the kernel thread for processing
98  * EEH event.
99  */
100 int eeh_event_init(void)
101 {
102 	struct task_struct *t;
103 	int ret = 0;
104 
105 	t = kthread_run(eeh_event_handler, NULL, "eehd");
106 	if (IS_ERR(t)) {
107 		ret = PTR_ERR(t);
108 		pr_err("%s: Failed to start EEH daemon (%d)\n",
109 			__func__, ret);
110 		return ret;
111 	}
112 
113 	return 0;
114 }
115 
116 /**
117  * eeh_send_failure_event - Generate a PCI error event
118  * @pe: EEH PE
119  *
120  * This routine can be called within an interrupt context;
121  * the actual event will be delivered in a normal context
122  * (from a workqueue).
123  */
124 int __eeh_send_failure_event(struct eeh_pe *pe)
125 {
126 	unsigned long flags;
127 	struct eeh_event *event;
128 
129 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
130 	if (!event) {
131 		pr_err("EEH: out of memory, event not handled\n");
132 		return -ENOMEM;
133 	}
134 	event->pe = pe;
135 
136 	/* We may or may not be called in an interrupt context */
137 	spin_lock_irqsave(&eeh_eventlist_lock, flags);
138 	list_add(&event->list, &eeh_eventlist);
139 	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
140 
141 	/* For EEH deamon to knick in */
142 	complete(&eeh_eventlist_event);
143 
144 	return 0;
145 }
146 
147 int eeh_send_failure_event(struct eeh_pe *pe)
148 {
149 	/*
150 	 * If we've manually supressed recovery events via debugfs
151 	 * then just drop it on the floor.
152 	 */
153 	if (eeh_debugfs_no_recover) {
154 		pr_err("EEH: Event dropped due to no_recover setting\n");
155 		return 0;
156 	}
157 
158 	return __eeh_send_failure_event(pe);
159 }
160 
161 /**
162  * eeh_remove_event - Remove EEH event from the queue
163  * @pe: Event binding to the PE
164  * @force: Event will be removed unconditionally
165  *
166  * On PowerNV platform, we might have subsequent coming events
167  * is part of the former one. For that case, those subsequent
168  * coming events are totally duplicated and unnecessary, thus
169  * they should be removed.
170  */
171 void eeh_remove_event(struct eeh_pe *pe, bool force)
172 {
173 	unsigned long flags;
174 	struct eeh_event *event, *tmp;
175 
176 	/*
177 	 * If we have NULL PE passed in, we have dead IOC
178 	 * or we're sure we can report all existing errors
179 	 * by the caller.
180 	 *
181 	 * With "force", the event with associated PE that
182 	 * have been isolated, the event won't be removed
183 	 * to avoid event lost.
184 	 */
185 	spin_lock_irqsave(&eeh_eventlist_lock, flags);
186 	list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
187 		if (!force && event->pe &&
188 		    (event->pe->state & EEH_PE_ISOLATED))
189 			continue;
190 
191 		if (!pe) {
192 			list_del(&event->list);
193 			kfree(event);
194 		} else if (pe->type & EEH_PE_PHB) {
195 			if (event->pe && event->pe->phb == pe->phb) {
196 				list_del(&event->list);
197 				kfree(event);
198 			}
199 		} else if (event->pe == pe) {
200 			list_del(&event->list);
201 			kfree(event);
202 		}
203 	}
204 	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
205 }
206