xref: /openbmc/linux/arch/powerpc/kernel/eeh_event.c (revision 1fd02f66)
11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2317f06deSGavin Shan /*
3317f06deSGavin Shan  *
4317f06deSGavin Shan  * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
5317f06deSGavin Shan  */
6317f06deSGavin Shan 
7317f06deSGavin Shan #include <linux/delay.h>
8317f06deSGavin Shan #include <linux/list.h>
9317f06deSGavin Shan #include <linux/sched.h>
10c8608558SGavin Shan #include <linux/semaphore.h>
11317f06deSGavin Shan #include <linux/pci.h>
12317f06deSGavin Shan #include <linux/slab.h>
13317f06deSGavin Shan #include <linux/kthread.h>
14317f06deSGavin Shan #include <asm/eeh_event.h>
15317f06deSGavin Shan #include <asm/ppc-pci.h>
16317f06deSGavin Shan 
17317f06deSGavin Shan /** Overview:
18317f06deSGavin Shan  *  EEH error states may be detected within exception handlers;
19317f06deSGavin Shan  *  however, the recovery processing needs to occur asynchronously
20317f06deSGavin Shan  *  in a normal kernel context and not an interrupt context.
21317f06deSGavin Shan  *  This pair of routines creates an event and queues it onto a
22317f06deSGavin Shan  *  work-queue, where a worker thread can drive recovery.
23317f06deSGavin Shan  */
24317f06deSGavin Shan 
25317f06deSGavin Shan static DEFINE_SPINLOCK(eeh_eventlist_lock);
262fea82dbSArnd Bergmann static DECLARE_COMPLETION(eeh_eventlist_event);
27635218c7SDaniel Axtens static LIST_HEAD(eeh_eventlist);
28317f06deSGavin Shan 
29317f06deSGavin Shan /**
30317f06deSGavin Shan  * eeh_event_handler - Dispatch EEH events.
31317f06deSGavin Shan  * @dummy - unused
32317f06deSGavin Shan  *
33317f06deSGavin Shan  * The detection of a frozen slot can occur inside an interrupt,
34317f06deSGavin Shan  * where it can be hard to do anything about it.  The goal of this
35317f06deSGavin Shan  * routine is to pull these detection events out of the context
36317f06deSGavin Shan  * of the interrupt handler, and re-dispatch them for processing
37317f06deSGavin Shan  * at a later time in a normal context.
38317f06deSGavin Shan  */
eeh_event_handler(void * dummy)39317f06deSGavin Shan static int eeh_event_handler(void * dummy)
40317f06deSGavin Shan {
41317f06deSGavin Shan 	unsigned long flags;
42317f06deSGavin Shan 	struct eeh_event *event;
43317f06deSGavin Shan 
44c8608558SGavin Shan 	while (!kthread_should_stop()) {
452fea82dbSArnd Bergmann 		if (wait_for_completion_interruptible(&eeh_eventlist_event))
465459ae14SGavin Shan 			break;
47c8608558SGavin Shan 
48c8608558SGavin Shan 		/* Fetch EEH event from the queue */
49317f06deSGavin Shan 		spin_lock_irqsave(&eeh_eventlist_lock, flags);
50317f06deSGavin Shan 		event = NULL;
51317f06deSGavin Shan 		if (!list_empty(&eeh_eventlist)) {
52c8608558SGavin Shan 			event = list_entry(eeh_eventlist.next,
53c8608558SGavin Shan 					   struct eeh_event, list);
54317f06deSGavin Shan 			list_del(&event->list);
55317f06deSGavin Shan 		}
56317f06deSGavin Shan 		spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
57c8608558SGavin Shan 		if (!event)
58c8608558SGavin Shan 			continue;
59317f06deSGavin Shan 
60c8608558SGavin Shan 		/* We might have event without binding PE */
6125baf3d8SOliver O'Halloran 		if (event->pe)
6225baf3d8SOliver O'Halloran 			eeh_handle_normal_event(event->pe);
630b5381a6SGavin Shan 		else
6468701780SSam Bobroff 			eeh_handle_special_event();
65317f06deSGavin Shan 
66317f06deSGavin Shan 		kfree(event);
67317f06deSGavin Shan 	}
68317f06deSGavin Shan 
69317f06deSGavin Shan 	return 0;
70317f06deSGavin Shan }
71317f06deSGavin Shan 
72317f06deSGavin Shan /**
73c8608558SGavin Shan  * eeh_event_init - Start kernel thread to handle EEH events
74317f06deSGavin Shan  *
75317f06deSGavin Shan  * This routine is called to start the kernel thread for processing
76317f06deSGavin Shan  * EEH event.
77317f06deSGavin Shan  */
eeh_event_init(void)78c8608558SGavin Shan int eeh_event_init(void)
79317f06deSGavin Shan {
80c8608558SGavin Shan 	struct task_struct *t;
81c8608558SGavin Shan 	int ret = 0;
82c8608558SGavin Shan 
83c8608558SGavin Shan 	t = kthread_run(eeh_event_handler, NULL, "eehd");
84c8608558SGavin Shan 	if (IS_ERR(t)) {
85c8608558SGavin Shan 		ret = PTR_ERR(t);
86c8608558SGavin Shan 		pr_err("%s: Failed to start EEH daemon (%d)\n",
87c8608558SGavin Shan 			__func__, ret);
88c8608558SGavin Shan 		return ret;
89c8608558SGavin Shan 	}
90c8608558SGavin Shan 
91c8608558SGavin Shan 	return 0;
92317f06deSGavin Shan }
93317f06deSGavin Shan 
94317f06deSGavin Shan /**
95317f06deSGavin Shan  * eeh_send_failure_event - Generate a PCI error event
96317f06deSGavin Shan  * @pe: EEH PE
97317f06deSGavin Shan  *
98317f06deSGavin Shan  * This routine can be called within an interrupt context;
99317f06deSGavin Shan  * the actual event will be delivered in a normal context
100317f06deSGavin Shan  * (from a workqueue).
101317f06deSGavin Shan  */
__eeh_send_failure_event(struct eeh_pe * pe)102954bd994SOliver O'Halloran int __eeh_send_failure_event(struct eeh_pe *pe)
103317f06deSGavin Shan {
104317f06deSGavin Shan 	unsigned long flags;
105317f06deSGavin Shan 	struct eeh_event *event;
106317f06deSGavin Shan 
107317f06deSGavin Shan 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
108317f06deSGavin Shan 	if (!event) {
109317f06deSGavin Shan 		pr_err("EEH: out of memory, event not handled\n");
110317f06deSGavin Shan 		return -ENOMEM;
111317f06deSGavin Shan 	}
112317f06deSGavin Shan 	event->pe = pe;
113317f06deSGavin Shan 
114799abe28SOliver O'Halloran 	/*
115799abe28SOliver O'Halloran 	 * Mark the PE as recovering before inserting it in the queue.
116799abe28SOliver O'Halloran 	 * This prevents the PE from being free()ed by a hotplug driver
117799abe28SOliver O'Halloran 	 * while the PE is sitting in the event queue.
118799abe28SOliver O'Halloran 	 */
11925baf3d8SOliver O'Halloran 	if (pe) {
1201b7f3b6cSMichael Ellerman #ifdef CONFIG_STACKTRACE
12125baf3d8SOliver O'Halloran 		/*
12225baf3d8SOliver O'Halloran 		 * Save the current stack trace so we can dump it from the
12325baf3d8SOliver O'Halloran 		 * event handler thread.
12425baf3d8SOliver O'Halloran 		 */
12525baf3d8SOliver O'Halloran 		pe->trace_entries = stack_trace_save(pe->stack_trace,
12625baf3d8SOliver O'Halloran 					 ARRAY_SIZE(pe->stack_trace), 0);
1271b7f3b6cSMichael Ellerman #endif /* CONFIG_STACKTRACE */
12825baf3d8SOliver O'Halloran 
129799abe28SOliver O'Halloran 		eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
13025baf3d8SOliver O'Halloran 	}
131799abe28SOliver O'Halloran 
132317f06deSGavin Shan 	/* We may or may not be called in an interrupt context */
133317f06deSGavin Shan 	spin_lock_irqsave(&eeh_eventlist_lock, flags);
134317f06deSGavin Shan 	list_add(&event->list, &eeh_eventlist);
135317f06deSGavin Shan 	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
136317f06deSGavin Shan 
137c8608558SGavin Shan 	/* For EEH deamon to knick in */
1382fea82dbSArnd Bergmann 	complete(&eeh_eventlist_event);
139317f06deSGavin Shan 
140317f06deSGavin Shan 	return 0;
141317f06deSGavin Shan }
14299866595SGavin Shan 
eeh_send_failure_event(struct eeh_pe * pe)143954bd994SOliver O'Halloran int eeh_send_failure_event(struct eeh_pe *pe)
144954bd994SOliver O'Halloran {
145954bd994SOliver O'Halloran 	/*
146*1fd02f66SJulia Lawall 	 * If we've manually suppressed recovery events via debugfs
147954bd994SOliver O'Halloran 	 * then just drop it on the floor.
148954bd994SOliver O'Halloran 	 */
149954bd994SOliver O'Halloran 	if (eeh_debugfs_no_recover) {
150954bd994SOliver O'Halloran 		pr_err("EEH: Event dropped due to no_recover setting\n");
151954bd994SOliver O'Halloran 		return 0;
152954bd994SOliver O'Halloran 	}
153954bd994SOliver O'Halloran 
154954bd994SOliver O'Halloran 	return __eeh_send_failure_event(pe);
155954bd994SOliver O'Halloran }
156954bd994SOliver O'Halloran 
15799866595SGavin Shan /**
15899866595SGavin Shan  * eeh_remove_event - Remove EEH event from the queue
15999866595SGavin Shan  * @pe: Event binding to the PE
1605c7a35e3SGavin Shan  * @force: Event will be removed unconditionally
16199866595SGavin Shan  *
16299866595SGavin Shan  * On PowerNV platform, we might have subsequent coming events
16399866595SGavin Shan  * is part of the former one. For that case, those subsequent
16499866595SGavin Shan  * coming events are totally duplicated and unnecessary, thus
16599866595SGavin Shan  * they should be removed.
16699866595SGavin Shan  */
eeh_remove_event(struct eeh_pe * pe,bool force)1675c7a35e3SGavin Shan void eeh_remove_event(struct eeh_pe *pe, bool force)
16899866595SGavin Shan {
16999866595SGavin Shan 	unsigned long flags;
17099866595SGavin Shan 	struct eeh_event *event, *tmp;
17199866595SGavin Shan 
1725c7a35e3SGavin Shan 	/*
1735c7a35e3SGavin Shan 	 * If we have NULL PE passed in, we have dead IOC
1745c7a35e3SGavin Shan 	 * or we're sure we can report all existing errors
1755c7a35e3SGavin Shan 	 * by the caller.
1765c7a35e3SGavin Shan 	 *
1775c7a35e3SGavin Shan 	 * With "force", the event with associated PE that
1785c7a35e3SGavin Shan 	 * have been isolated, the event won't be removed
1795c7a35e3SGavin Shan 	 * to avoid event lost.
1805c7a35e3SGavin Shan 	 */
18199866595SGavin Shan 	spin_lock_irqsave(&eeh_eventlist_lock, flags);
18299866595SGavin Shan 	list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
1835c7a35e3SGavin Shan 		if (!force && event->pe &&
1845c7a35e3SGavin Shan 		    (event->pe->state & EEH_PE_ISOLATED))
1855c7a35e3SGavin Shan 			continue;
1865c7a35e3SGavin Shan 
18799866595SGavin Shan 		if (!pe) {
18899866595SGavin Shan 			list_del(&event->list);
18999866595SGavin Shan 			kfree(event);
19099866595SGavin Shan 		} else if (pe->type & EEH_PE_PHB) {
19199866595SGavin Shan 			if (event->pe && event->pe->phb == pe->phb) {
19299866595SGavin Shan 				list_del(&event->list);
19399866595SGavin Shan 				kfree(event);
19499866595SGavin Shan 			}
19599866595SGavin Shan 		} else if (event->pe == pe) {
19699866595SGavin Shan 			list_del(&event->list);
19799866595SGavin Shan 			kfree(event);
19899866595SGavin Shan 		}
19999866595SGavin Shan 	}
20099866595SGavin Shan 	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
20199866595SGavin Shan }
202