1 /*
2  * OPAL hypervisor Maintenance interrupt handling support in PowreNV.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; If not, see <http://www.gnu.org/licenses/>.
16  *
17  * Copyright 2014 IBM Corporation
18  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
19  */
20 
21 #undef DEBUG
22 
23 #include <linux/kernel.h>
24 #include <linux/init.h>
25 #include <linux/of.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 
29 #include <asm/opal.h>
30 #include <asm/cputable.h>
31 #include <asm/machdep.h>
32 
33 static int opal_hmi_handler_nb_init;
34 struct OpalHmiEvtNode {
35 	struct list_head list;
36 	struct OpalHMIEvent hmi_evt;
37 };
38 static LIST_HEAD(opal_hmi_evt_list);
39 static DEFINE_SPINLOCK(opal_hmi_evt_lock);
40 
41 static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
42 {
43 	const char *level, *sevstr, *error_info;
44 	static const char *hmi_error_types[] = {
45 		"Malfunction Alert",
46 		"Processor Recovery done",
47 		"Processor recovery occurred again",
48 		"Processor recovery occurred for masked error",
49 		"Timer facility experienced an error",
50 		"TFMR SPR is corrupted",
51 		"UPS (Uniterrupted Power System) Overflow indication",
52 		"An XSCOM operation failure",
53 		"An XSCOM operation completed",
54 		"SCOM has set a reserved FIR bit to cause recovery",
55 		"Debug trigger has set a reserved FIR bit to cause recovery",
56 		"A hypervisor resource error occurred"
57 	};
58 
59 	/* Print things out */
60 	if (hmi_evt->version < OpalHMIEvt_V1) {
61 		pr_err("HMI Interrupt, Unknown event version %d !\n",
62 			hmi_evt->version);
63 		return;
64 	}
65 	switch (hmi_evt->severity) {
66 	case OpalHMI_SEV_NO_ERROR:
67 		level = KERN_INFO;
68 		sevstr = "Harmless";
69 		break;
70 	case OpalHMI_SEV_WARNING:
71 		level = KERN_WARNING;
72 		sevstr = "";
73 		break;
74 	case OpalHMI_SEV_ERROR_SYNC:
75 		level = KERN_ERR;
76 		sevstr = "Severe";
77 		break;
78 	case OpalHMI_SEV_FATAL:
79 	default:
80 		level = KERN_ERR;
81 		sevstr = "Fatal";
82 		break;
83 	}
84 
85 	printk("%s%s Hypervisor Maintenance interrupt [%s]\n",
86 		level, sevstr,
87 		hmi_evt->disposition == OpalHMI_DISPOSITION_RECOVERED ?
88 		"Recovered" : "Not recovered");
89 	error_info = hmi_evt->type < ARRAY_SIZE(hmi_error_types) ?
90 			hmi_error_types[hmi_evt->type]
91 			: "Unknown";
92 	printk("%s Error detail: %s\n", level, error_info);
93 	printk("%s	HMER: %016llx\n", level, be64_to_cpu(hmi_evt->hmer));
94 	if ((hmi_evt->type == OpalHMI_ERROR_TFAC) ||
95 		(hmi_evt->type == OpalHMI_ERROR_TFMR_PARITY))
96 		printk("%s	TFMR: %016llx\n", level,
97 						be64_to_cpu(hmi_evt->tfmr));
98 }
99 
100 static void hmi_event_handler(struct work_struct *work)
101 {
102 	unsigned long flags;
103 	struct OpalHMIEvent *hmi_evt;
104 	struct OpalHmiEvtNode *msg_node;
105 	uint8_t disposition;
106 
107 	spin_lock_irqsave(&opal_hmi_evt_lock, flags);
108 	while (!list_empty(&opal_hmi_evt_list)) {
109 		msg_node = list_entry(opal_hmi_evt_list.next,
110 					   struct OpalHmiEvtNode, list);
111 		list_del(&msg_node->list);
112 		spin_unlock_irqrestore(&opal_hmi_evt_lock, flags);
113 
114 		hmi_evt = (struct OpalHMIEvent *) &msg_node->hmi_evt;
115 		print_hmi_event_info(hmi_evt);
116 		disposition = hmi_evt->disposition;
117 		kfree(msg_node);
118 
119 		/*
120 		 * Check if HMI event has been recovered or not. If not
121 		 * then we can't continue, invoke panic.
122 		 */
123 		if (disposition != OpalHMI_DISPOSITION_RECOVERED)
124 			panic("Unrecoverable HMI exception");
125 
126 		spin_lock_irqsave(&opal_hmi_evt_lock, flags);
127 	}
128 	spin_unlock_irqrestore(&opal_hmi_evt_lock, flags);
129 }
130 
131 static DECLARE_WORK(hmi_event_work, hmi_event_handler);
132 /*
133  * opal_handle_hmi_event - notifier handler that queues up HMI events
134  * to be preocessed later.
135  */
136 static int opal_handle_hmi_event(struct notifier_block *nb,
137 			  unsigned long msg_type, void *msg)
138 {
139 	unsigned long flags;
140 	struct OpalHMIEvent *hmi_evt;
141 	struct opal_msg *hmi_msg = msg;
142 	struct OpalHmiEvtNode *msg_node;
143 
144 	/* Sanity Checks */
145 	if (msg_type != OPAL_MSG_HMI_EVT)
146 		return 0;
147 
148 	/* HMI event info starts from param[0] */
149 	hmi_evt = (struct OpalHMIEvent *)&hmi_msg->params[0];
150 
151 	/* Delay the logging of HMI events to workqueue. */
152 	msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
153 	if (!msg_node) {
154 		pr_err("HMI: out of memory, Opal message event not handled\n");
155 		return -ENOMEM;
156 	}
157 	memcpy(&msg_node->hmi_evt, hmi_evt, sizeof(struct OpalHMIEvent));
158 
159 	spin_lock_irqsave(&opal_hmi_evt_lock, flags);
160 	list_add(&msg_node->list, &opal_hmi_evt_list);
161 	spin_unlock_irqrestore(&opal_hmi_evt_lock, flags);
162 
163 	schedule_work(&hmi_event_work);
164 	return 0;
165 }
166 
167 static struct notifier_block opal_hmi_handler_nb = {
168 	.notifier_call	= opal_handle_hmi_event,
169 	.next		= NULL,
170 	.priority	= 0,
171 };
172 
173 static int __init opal_hmi_handler_init(void)
174 {
175 	int ret;
176 
177 	if (!opal_hmi_handler_nb_init) {
178 		ret = opal_message_notifier_register(
179 				OPAL_MSG_HMI_EVT, &opal_hmi_handler_nb);
180 		if (ret) {
181 			pr_err("%s: Can't register OPAL event notifier (%d)\n",
182 			       __func__, ret);
183 			return ret;
184 		}
185 		opal_hmi_handler_nb_init = 1;
186 	}
187 	return 0;
188 }
189 machine_subsys_initcall(powernv, opal_hmi_handler_init);
190