xref: /openbmc/qemu/hw/ppc/spapr_events.c (revision 81fe70e443ef7e3b5e8f2e30336029ed5b968741)
1 /*
2  * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3  *
4  * RTAS events handling
5  *
6  * Copyright (c) 2012 David Gibson, IBM Corporation.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  *
26  */
27 
28 #include "qemu/osdep.h"
29 #include "qapi/error.h"
30 #include "cpu.h"
31 #include "sysemu/device_tree.h"
32 #include "sysemu/runstate.h"
33 
34 #include "hw/ppc/fdt.h"
35 #include "hw/ppc/spapr.h"
36 #include "hw/ppc/spapr_vio.h"
37 #include "hw/pci/pci.h"
38 #include "hw/irq.h"
39 #include "hw/pci-host/spapr.h"
40 #include "hw/ppc/spapr_drc.h"
41 #include "qemu/help_option.h"
42 #include "qemu/bcd.h"
43 #include "qemu/main-loop.h"
44 #include "hw/ppc/spapr_ovec.h"
45 #include <libfdt.h>
46 
47 #define RTAS_LOG_VERSION_MASK                   0xff000000
48 #define   RTAS_LOG_VERSION_6                    0x06000000
49 #define RTAS_LOG_SEVERITY_MASK                  0x00e00000
50 #define   RTAS_LOG_SEVERITY_ALREADY_REPORTED    0x00c00000
51 #define   RTAS_LOG_SEVERITY_FATAL               0x00a00000
52 #define   RTAS_LOG_SEVERITY_ERROR               0x00800000
53 #define   RTAS_LOG_SEVERITY_ERROR_SYNC          0x00600000
54 #define   RTAS_LOG_SEVERITY_WARNING             0x00400000
55 #define   RTAS_LOG_SEVERITY_EVENT               0x00200000
56 #define   RTAS_LOG_SEVERITY_NO_ERROR            0x00000000
57 #define RTAS_LOG_DISPOSITION_MASK               0x00180000
58 #define   RTAS_LOG_DISPOSITION_FULLY_RECOVERED  0x00000000
59 #define   RTAS_LOG_DISPOSITION_LIMITED_RECOVERY 0x00080000
60 #define   RTAS_LOG_DISPOSITION_NOT_RECOVERED    0x00100000
61 #define RTAS_LOG_OPTIONAL_PART_PRESENT          0x00040000
62 #define RTAS_LOG_INITIATOR_MASK                 0x0000f000
63 #define   RTAS_LOG_INITIATOR_UNKNOWN            0x00000000
64 #define   RTAS_LOG_INITIATOR_CPU                0x00001000
65 #define   RTAS_LOG_INITIATOR_PCI                0x00002000
66 #define   RTAS_LOG_INITIATOR_MEMORY             0x00004000
67 #define   RTAS_LOG_INITIATOR_HOTPLUG            0x00006000
68 #define RTAS_LOG_TARGET_MASK                    0x00000f00
69 #define   RTAS_LOG_TARGET_UNKNOWN               0x00000000
70 #define   RTAS_LOG_TARGET_CPU                   0x00000100
71 #define   RTAS_LOG_TARGET_PCI                   0x00000200
72 #define   RTAS_LOG_TARGET_MEMORY                0x00000400
73 #define   RTAS_LOG_TARGET_HOTPLUG               0x00000600
74 #define RTAS_LOG_TYPE_MASK                      0x000000ff
75 #define   RTAS_LOG_TYPE_OTHER                   0x00000000
76 #define   RTAS_LOG_TYPE_RETRY                   0x00000001
77 #define   RTAS_LOG_TYPE_TCE_ERR                 0x00000002
78 #define   RTAS_LOG_TYPE_INTERN_DEV_FAIL         0x00000003
79 #define   RTAS_LOG_TYPE_TIMEOUT                 0x00000004
80 #define   RTAS_LOG_TYPE_DATA_PARITY             0x00000005
81 #define   RTAS_LOG_TYPE_ADDR_PARITY             0x00000006
82 #define   RTAS_LOG_TYPE_CACHE_PARITY            0x00000007
83 #define   RTAS_LOG_TYPE_ADDR_INVALID            0x00000008
84 #define   RTAS_LOG_TYPE_ECC_UNCORR              0x00000009
85 #define   RTAS_LOG_TYPE_ECC_CORR                0x0000000a
86 #define   RTAS_LOG_TYPE_EPOW                    0x00000040
87 #define   RTAS_LOG_TYPE_HOTPLUG                 0x000000e5
88 
89 struct rtas_error_log {
90     uint32_t summary;
91     uint32_t extended_length;
92 } QEMU_PACKED;
93 
94 struct rtas_event_log_v6 {
95     uint8_t b0;
96 #define RTAS_LOG_V6_B0_VALID                          0x80
97 #define RTAS_LOG_V6_B0_UNRECOVERABLE_ERROR            0x40
98 #define RTAS_LOG_V6_B0_RECOVERABLE_ERROR              0x20
99 #define RTAS_LOG_V6_B0_DEGRADED_OPERATION             0x10
100 #define RTAS_LOG_V6_B0_PREDICTIVE_ERROR               0x08
101 #define RTAS_LOG_V6_B0_NEW_LOG                        0x04
102 #define RTAS_LOG_V6_B0_BIGENDIAN                      0x02
103     uint8_t _resv1;
104     uint8_t b2;
105 #define RTAS_LOG_V6_B2_POWERPC_FORMAT                 0x80
106 #define RTAS_LOG_V6_B2_LOG_FORMAT_MASK                0x0f
107 #define   RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT    0x0e
108     uint8_t _resv2[9];
109     uint32_t company;
110 #define RTAS_LOG_V6_COMPANY_IBM                 0x49424d00 /* IBM<null> */
111 } QEMU_PACKED;
112 
113 struct rtas_event_log_v6_section_header {
114     uint16_t section_id;
115     uint16_t section_length;
116     uint8_t section_version;
117     uint8_t section_subtype;
118     uint16_t creator_component_id;
119 } QEMU_PACKED;
120 
121 struct rtas_event_log_v6_maina {
122 #define RTAS_LOG_V6_SECTION_ID_MAINA                0x5048 /* PH */
123     struct rtas_event_log_v6_section_header hdr;
124     uint32_t creation_date; /* BCD: YYYYMMDD */
125     uint32_t creation_time; /* BCD: HHMMSS00 */
126     uint8_t _platform1[8];
127     char creator_id;
128     uint8_t _resv1[2];
129     uint8_t section_count;
130     uint8_t _resv2[4];
131     uint8_t _platform2[8];
132     uint32_t plid;
133     uint8_t _platform3[4];
134 } QEMU_PACKED;
135 
136 struct rtas_event_log_v6_mainb {
137 #define RTAS_LOG_V6_SECTION_ID_MAINB                0x5548 /* UH */
138     struct rtas_event_log_v6_section_header hdr;
139     uint8_t subsystem_id;
140     uint8_t _platform1;
141     uint8_t event_severity;
142     uint8_t event_subtype;
143     uint8_t _platform2[4];
144     uint8_t _resv1[2];
145     uint16_t action_flags;
146     uint8_t _resv2[4];
147 } QEMU_PACKED;
148 
149 struct rtas_event_log_v6_epow {
150 #define RTAS_LOG_V6_SECTION_ID_EPOW                 0x4550 /* EP */
151     struct rtas_event_log_v6_section_header hdr;
152     uint8_t sensor_value;
153 #define RTAS_LOG_V6_EPOW_ACTION_RESET                    0
154 #define RTAS_LOG_V6_EPOW_ACTION_WARN_COOLING             1
155 #define RTAS_LOG_V6_EPOW_ACTION_WARN_POWER               2
156 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN          3
157 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_HALT              4
158 #define RTAS_LOG_V6_EPOW_ACTION_MAIN_ENCLOSURE           5
159 #define RTAS_LOG_V6_EPOW_ACTION_POWER_OFF                7
160     uint8_t event_modifier;
161 #define RTAS_LOG_V6_EPOW_MODIFIER_NORMAL                 1
162 #define RTAS_LOG_V6_EPOW_MODIFIER_ON_UPS                 2
163 #define RTAS_LOG_V6_EPOW_MODIFIER_CRITICAL               3
164 #define RTAS_LOG_V6_EPOW_MODIFIER_TEMPERATURE            4
165     uint8_t extended_modifier;
166 #define RTAS_LOG_V6_EPOW_XMODIFIER_SYSTEM_WIDE           0
167 #define RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC    1
168     uint8_t _resv;
169     uint64_t reason_code;
170 } QEMU_PACKED;
171 
172 struct epow_extended_log {
173     struct rtas_event_log_v6 v6hdr;
174     struct rtas_event_log_v6_maina maina;
175     struct rtas_event_log_v6_mainb mainb;
176     struct rtas_event_log_v6_epow epow;
177 } QEMU_PACKED;
178 
179 union drc_identifier {
180     uint32_t index;
181     uint32_t count;
182     struct {
183         uint32_t count;
184         uint32_t index;
185     } count_indexed;
186     char name[1];
187 } QEMU_PACKED;
188 
189 struct rtas_event_log_v6_hp {
190 #define RTAS_LOG_V6_SECTION_ID_HOTPLUG              0x4850 /* HP */
191     struct rtas_event_log_v6_section_header hdr;
192     uint8_t hotplug_type;
193 #define RTAS_LOG_V6_HP_TYPE_CPU                          1
194 #define RTAS_LOG_V6_HP_TYPE_MEMORY                       2
195 #define RTAS_LOG_V6_HP_TYPE_SLOT                         3
196 #define RTAS_LOG_V6_HP_TYPE_PHB                          4
197 #define RTAS_LOG_V6_HP_TYPE_PCI                          5
198     uint8_t hotplug_action;
199 #define RTAS_LOG_V6_HP_ACTION_ADD                        1
200 #define RTAS_LOG_V6_HP_ACTION_REMOVE                     2
201     uint8_t hotplug_identifier;
202 #define RTAS_LOG_V6_HP_ID_DRC_NAME                       1
203 #define RTAS_LOG_V6_HP_ID_DRC_INDEX                      2
204 #define RTAS_LOG_V6_HP_ID_DRC_COUNT                      3
205 #define RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED              4
206     uint8_t reserved;
207     union drc_identifier drc_id;
208 } QEMU_PACKED;
209 
210 struct hp_extended_log {
211     struct rtas_event_log_v6 v6hdr;
212     struct rtas_event_log_v6_maina maina;
213     struct rtas_event_log_v6_mainb mainb;
214     struct rtas_event_log_v6_hp hp;
215 } QEMU_PACKED;
216 
217 struct rtas_event_log_v6_mc {
218 #define RTAS_LOG_V6_SECTION_ID_MC                   0x4D43 /* MC */
219     struct rtas_event_log_v6_section_header hdr;
220     uint32_t fru_id;
221     uint32_t proc_id;
222     uint8_t error_type;
223 #define RTAS_LOG_V6_MC_TYPE_UE                           0
224 #define RTAS_LOG_V6_MC_TYPE_SLB                          1
225 #define RTAS_LOG_V6_MC_TYPE_ERAT                         2
226 #define RTAS_LOG_V6_MC_TYPE_TLB                          4
227 #define RTAS_LOG_V6_MC_TYPE_D_CACHE                      5
228 #define RTAS_LOG_V6_MC_TYPE_I_CACHE                      7
229     uint8_t sub_err_type;
230 #define RTAS_LOG_V6_MC_UE_INDETERMINATE                  0
231 #define RTAS_LOG_V6_MC_UE_IFETCH                         1
232 #define RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_IFETCH         2
233 #define RTAS_LOG_V6_MC_UE_LOAD_STORE                     3
234 #define RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_LOAD_STORE     4
235 #define RTAS_LOG_V6_MC_SLB_PARITY                        0
236 #define RTAS_LOG_V6_MC_SLB_MULTIHIT                      1
237 #define RTAS_LOG_V6_MC_SLB_INDETERMINATE                 2
238 #define RTAS_LOG_V6_MC_ERAT_PARITY                       1
239 #define RTAS_LOG_V6_MC_ERAT_MULTIHIT                     2
240 #define RTAS_LOG_V6_MC_ERAT_INDETERMINATE                3
241 #define RTAS_LOG_V6_MC_TLB_PARITY                        1
242 #define RTAS_LOG_V6_MC_TLB_MULTIHIT                      2
243 #define RTAS_LOG_V6_MC_TLB_INDETERMINATE                 3
244     uint8_t reserved_1[6];
245     uint64_t effective_address;
246     uint64_t logical_address;
247 } QEMU_PACKED;
248 
249 struct mc_extended_log {
250     struct rtas_event_log_v6 v6hdr;
251     struct rtas_event_log_v6_mc mc;
252 } QEMU_PACKED;
253 
254 struct MC_ierror_table {
255     unsigned long srr1_mask;
256     unsigned long srr1_value;
257     bool nip_valid; /* nip is a valid indicator of faulting address */
258     uint8_t error_type;
259     uint8_t error_subtype;
260     unsigned int initiator;
261     unsigned int severity;
262 };
263 
264 static const struct MC_ierror_table mc_ierror_table[] = {
265 { 0x00000000081c0000, 0x0000000000040000, true,
266   RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_IFETCH,
267   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
268 { 0x00000000081c0000, 0x0000000000080000, true,
269   RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_PARITY,
270   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
271 { 0x00000000081c0000, 0x00000000000c0000, true,
272   RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_MULTIHIT,
273   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
274 { 0x00000000081c0000, 0x0000000000100000, true,
275   RTAS_LOG_V6_MC_TYPE_ERAT, RTAS_LOG_V6_MC_ERAT_MULTIHIT,
276   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
277 { 0x00000000081c0000, 0x0000000000140000, true,
278   RTAS_LOG_V6_MC_TYPE_TLB, RTAS_LOG_V6_MC_TLB_MULTIHIT,
279   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
280 { 0x00000000081c0000, 0x0000000000180000, true,
281   RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_IFETCH,
282   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, } };
283 
284 struct MC_derror_table {
285     unsigned long dsisr_value;
286     bool dar_valid; /* dar is a valid indicator of faulting address */
287     uint8_t error_type;
288     uint8_t error_subtype;
289     unsigned int initiator;
290     unsigned int severity;
291 };
292 
293 static const struct MC_derror_table mc_derror_table[] = {
294 { 0x00008000, false,
295   RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_LOAD_STORE,
296   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
297 { 0x00004000, true,
298   RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_LOAD_STORE,
299   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
300 { 0x00000800, true,
301   RTAS_LOG_V6_MC_TYPE_ERAT, RTAS_LOG_V6_MC_ERAT_MULTIHIT,
302   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
303 { 0x00000400, true,
304   RTAS_LOG_V6_MC_TYPE_TLB, RTAS_LOG_V6_MC_TLB_MULTIHIT,
305   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
306 { 0x00000080, true,
307   RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_MULTIHIT,  /* Before PARITY */
308   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
309 { 0x00000100, true,
310   RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_PARITY,
311   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, } };
312 
313 #define SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42))
314 
315 typedef enum EventClass {
316     EVENT_CLASS_INTERNAL_ERRORS     = 0,
317     EVENT_CLASS_EPOW                = 1,
318     EVENT_CLASS_RESERVED            = 2,
319     EVENT_CLASS_HOT_PLUG            = 3,
320     EVENT_CLASS_IO                  = 4,
321     EVENT_CLASS_MAX
322 } EventClassIndex;
323 #define EVENT_CLASS_MASK(index) (1 << (31 - index))
324 
325 static const char * const event_names[EVENT_CLASS_MAX] = {
326     [EVENT_CLASS_INTERNAL_ERRORS]       = "internal-errors",
327     [EVENT_CLASS_EPOW]                  = "epow-events",
328     [EVENT_CLASS_HOT_PLUG]              = "hot-plug-events",
329     [EVENT_CLASS_IO]                    = "ibm,io-events",
330 };
331 
332 struct SpaprEventSource {
333     int irq;
334     uint32_t mask;
335     bool enabled;
336 };
337 
338 static SpaprEventSource *spapr_event_sources_new(void)
339 {
340     return g_new0(SpaprEventSource, EVENT_CLASS_MAX);
341 }
342 
343 static void spapr_event_sources_register(SpaprEventSource *event_sources,
344                                         EventClassIndex index, int irq)
345 {
346     /* we only support 1 irq per event class at the moment */
347     g_assert(event_sources);
348     g_assert(!event_sources[index].enabled);
349     event_sources[index].irq = irq;
350     event_sources[index].mask = EVENT_CLASS_MASK(index);
351     event_sources[index].enabled = true;
352 }
353 
354 static const SpaprEventSource *
355 spapr_event_sources_get_source(SpaprEventSource *event_sources,
356                                EventClassIndex index)
357 {
358     g_assert(index < EVENT_CLASS_MAX);
359     g_assert(event_sources);
360 
361     return &event_sources[index];
362 }
363 
364 void spapr_dt_events(SpaprMachineState *spapr, void *fdt)
365 {
366     uint32_t irq_ranges[EVENT_CLASS_MAX * 2];
367     int i, count = 0, event_sources;
368     SpaprEventSource *events = spapr->event_sources;
369 
370     g_assert(events);
371 
372     _FDT(event_sources = fdt_add_subnode(fdt, 0, "event-sources"));
373 
374     for (i = 0, count = 0; i < EVENT_CLASS_MAX; i++) {
375         int node_offset;
376         uint32_t interrupts[2];
377         const SpaprEventSource *source =
378             spapr_event_sources_get_source(events, i);
379         const char *source_name = event_names[i];
380 
381         if (!source->enabled) {
382             continue;
383         }
384 
385         spapr_dt_irq(interrupts, source->irq, false);
386 
387         _FDT(node_offset = fdt_add_subnode(fdt, event_sources, source_name));
388         _FDT(fdt_setprop(fdt, node_offset, "interrupts", interrupts,
389                          sizeof(interrupts)));
390 
391         irq_ranges[count++] = interrupts[0];
392         irq_ranges[count++] = cpu_to_be32(1);
393     }
394 
395     _FDT((fdt_setprop(fdt, event_sources, "interrupt-controller", NULL, 0)));
396     _FDT((fdt_setprop_cell(fdt, event_sources, "#interrupt-cells", 2)));
397     _FDT((fdt_setprop(fdt, event_sources, "interrupt-ranges",
398                       irq_ranges, count * sizeof(uint32_t))));
399 }
400 
401 static const SpaprEventSource *
402 rtas_event_log_to_source(SpaprMachineState *spapr, int log_type)
403 {
404     const SpaprEventSource *source;
405 
406     g_assert(spapr->event_sources);
407 
408     switch (log_type) {
409     case RTAS_LOG_TYPE_HOTPLUG:
410         source = spapr_event_sources_get_source(spapr->event_sources,
411                                                 EVENT_CLASS_HOT_PLUG);
412         if (spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT)) {
413             g_assert(source->enabled);
414             break;
415         }
416         /* fall through back to epow for legacy hotplug interrupt source */
417     case RTAS_LOG_TYPE_EPOW:
418         source = spapr_event_sources_get_source(spapr->event_sources,
419                                                 EVENT_CLASS_EPOW);
420         break;
421     default:
422         source = NULL;
423     }
424 
425     return source;
426 }
427 
428 static int rtas_event_log_to_irq(SpaprMachineState *spapr, int log_type)
429 {
430     const SpaprEventSource *source;
431 
432     source = rtas_event_log_to_source(spapr, log_type);
433     g_assert(source);
434     g_assert(source->enabled);
435 
436     return source->irq;
437 }
438 
439 static uint32_t spapr_event_log_entry_type(SpaprEventLogEntry *entry)
440 {
441     return entry->summary & RTAS_LOG_TYPE_MASK;
442 }
443 
444 static void rtas_event_log_queue(SpaprMachineState *spapr,
445                                  SpaprEventLogEntry *entry)
446 {
447     QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next);
448 }
449 
450 static SpaprEventLogEntry *rtas_event_log_dequeue(SpaprMachineState *spapr,
451                                                   uint32_t event_mask)
452 {
453     SpaprEventLogEntry *entry = NULL;
454 
455     QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
456         const SpaprEventSource *source =
457             rtas_event_log_to_source(spapr,
458                                      spapr_event_log_entry_type(entry));
459 
460         g_assert(source);
461         if (source->mask & event_mask) {
462             break;
463         }
464     }
465 
466     if (entry) {
467         QTAILQ_REMOVE(&spapr->pending_events, entry, next);
468     }
469 
470     return entry;
471 }
472 
473 static bool rtas_event_log_contains(uint32_t event_mask)
474 {
475     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
476     SpaprEventLogEntry *entry = NULL;
477 
478     QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
479         const SpaprEventSource *source =
480             rtas_event_log_to_source(spapr,
481                                      spapr_event_log_entry_type(entry));
482 
483         if (source->mask & event_mask) {
484             return true;
485         }
486     }
487 
488     return false;
489 }
490 
491 static uint32_t next_plid;
492 
493 static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr)
494 {
495     v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG
496         | RTAS_LOG_V6_B0_BIGENDIAN;
497     v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT
498         | RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT;
499     v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM);
500 }
501 
502 static void spapr_init_maina(struct rtas_event_log_v6_maina *maina,
503                              int section_count)
504 {
505     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
506     struct tm tm;
507     int year;
508 
509     maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA);
510     maina->hdr.section_length = cpu_to_be16(sizeof(*maina));
511     /* FIXME: section version, subtype and creator id? */
512     spapr_rtc_read(&spapr->rtc, &tm, NULL);
513     year = tm.tm_year + 1900;
514     maina->creation_date = cpu_to_be32((to_bcd(year / 100) << 24)
515                                        | (to_bcd(year % 100) << 16)
516                                        | (to_bcd(tm.tm_mon + 1) << 8)
517                                        | to_bcd(tm.tm_mday));
518     maina->creation_time = cpu_to_be32((to_bcd(tm.tm_hour) << 24)
519                                        | (to_bcd(tm.tm_min) << 16)
520                                        | (to_bcd(tm.tm_sec) << 8));
521     maina->creator_id = 'H'; /* Hypervisor */
522     maina->section_count = section_count;
523     maina->plid = next_plid++;
524 }
525 
526 static void spapr_powerdown_req(Notifier *n, void *opaque)
527 {
528     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
529     SpaprEventLogEntry *entry;
530     struct rtas_event_log_v6 *v6hdr;
531     struct rtas_event_log_v6_maina *maina;
532     struct rtas_event_log_v6_mainb *mainb;
533     struct rtas_event_log_v6_epow *epow;
534     struct epow_extended_log *new_epow;
535 
536     entry = g_new(SpaprEventLogEntry, 1);
537     new_epow = g_malloc0(sizeof(*new_epow));
538     entry->extended_log = new_epow;
539 
540     v6hdr = &new_epow->v6hdr;
541     maina = &new_epow->maina;
542     mainb = &new_epow->mainb;
543     epow = &new_epow->epow;
544 
545     entry->summary = RTAS_LOG_VERSION_6
546                        | RTAS_LOG_SEVERITY_EVENT
547                        | RTAS_LOG_DISPOSITION_NOT_RECOVERED
548                        | RTAS_LOG_OPTIONAL_PART_PRESENT
549                        | RTAS_LOG_TYPE_EPOW;
550     entry->extended_length = sizeof(*new_epow);
551 
552     spapr_init_v6hdr(v6hdr);
553     spapr_init_maina(maina, 3 /* Main-A, Main-B and EPOW */);
554 
555     mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
556     mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
557     /* FIXME: section version, subtype and creator id? */
558     mainb->subsystem_id = 0xa0; /* External environment */
559     mainb->event_severity = 0x00; /* Informational / non-error */
560     mainb->event_subtype = 0xd0; /* Normal shutdown */
561 
562     epow->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_EPOW);
563     epow->hdr.section_length = cpu_to_be16(sizeof(*epow));
564     epow->hdr.section_version = 2; /* includes extended modifier */
565     /* FIXME: section subtype and creator id? */
566     epow->sensor_value = RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN;
567     epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL;
568     epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC;
569 
570     rtas_event_log_queue(spapr, entry);
571 
572     qemu_irq_pulse(spapr_qirq(spapr,
573                    rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_EPOW)));
574 }
575 
576 static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
577                                     SpaprDrcType drc_type,
578                                     union drc_identifier *drc_id)
579 {
580     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
581     SpaprEventLogEntry *entry;
582     struct hp_extended_log *new_hp;
583     struct rtas_event_log_v6 *v6hdr;
584     struct rtas_event_log_v6_maina *maina;
585     struct rtas_event_log_v6_mainb *mainb;
586     struct rtas_event_log_v6_hp *hp;
587 
588     entry = g_new(SpaprEventLogEntry, 1);
589     new_hp = g_malloc0(sizeof(struct hp_extended_log));
590     entry->extended_log = new_hp;
591 
592     v6hdr = &new_hp->v6hdr;
593     maina = &new_hp->maina;
594     mainb = &new_hp->mainb;
595     hp = &new_hp->hp;
596 
597     entry->summary = RTAS_LOG_VERSION_6
598         | RTAS_LOG_SEVERITY_EVENT
599         | RTAS_LOG_DISPOSITION_NOT_RECOVERED
600         | RTAS_LOG_OPTIONAL_PART_PRESENT
601         | RTAS_LOG_INITIATOR_HOTPLUG
602         | RTAS_LOG_TYPE_HOTPLUG;
603     entry->extended_length = sizeof(*new_hp);
604 
605     spapr_init_v6hdr(v6hdr);
606     spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */);
607 
608     mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
609     mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
610     mainb->subsystem_id = 0x80; /* External environment */
611     mainb->event_severity = 0x00; /* Informational / non-error */
612     mainb->event_subtype = 0x00; /* Normal shutdown */
613 
614     hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG);
615     hp->hdr.section_length = cpu_to_be16(sizeof(*hp));
616     hp->hdr.section_version = 1; /* includes extended modifier */
617     hp->hotplug_action = hp_action;
618     hp->hotplug_identifier = hp_id;
619 
620     switch (drc_type) {
621     case SPAPR_DR_CONNECTOR_TYPE_PCI:
622         hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
623         break;
624     case SPAPR_DR_CONNECTOR_TYPE_LMB:
625         hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY;
626         break;
627     case SPAPR_DR_CONNECTOR_TYPE_CPU:
628         hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU;
629         break;
630     case SPAPR_DR_CONNECTOR_TYPE_PHB:
631         hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PHB;
632         break;
633     default:
634         /* we shouldn't be signaling hotplug events for resources
635          * that don't support them
636          */
637         g_assert(false);
638         return;
639     }
640 
641     if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) {
642         hp->drc_id.count = cpu_to_be32(drc_id->count);
643     } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) {
644         hp->drc_id.index = cpu_to_be32(drc_id->index);
645     } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED) {
646         /* we should not be using count_indexed value unless the guest
647          * supports dedicated hotplug event source
648          */
649         g_assert(spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT));
650         hp->drc_id.count_indexed.count =
651             cpu_to_be32(drc_id->count_indexed.count);
652         hp->drc_id.count_indexed.index =
653             cpu_to_be32(drc_id->count_indexed.index);
654     }
655 
656     rtas_event_log_queue(spapr, entry);
657 
658     qemu_irq_pulse(spapr_qirq(spapr,
659                    rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_HOTPLUG)));
660 }
661 
662 void spapr_hotplug_req_add_by_index(SpaprDrc *drc)
663 {
664     SpaprDrcType drc_type = spapr_drc_type(drc);
665     union drc_identifier drc_id;
666 
667     drc_id.index = spapr_drc_index(drc);
668     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
669                             RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
670 }
671 
672 void spapr_hotplug_req_remove_by_index(SpaprDrc *drc)
673 {
674     SpaprDrcType drc_type = spapr_drc_type(drc);
675     union drc_identifier drc_id;
676 
677     drc_id.index = spapr_drc_index(drc);
678     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
679                             RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
680 }
681 
682 void spapr_hotplug_req_add_by_count(SpaprDrcType drc_type,
683                                        uint32_t count)
684 {
685     union drc_identifier drc_id;
686 
687     drc_id.count = count;
688     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
689                             RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
690 }
691 
692 void spapr_hotplug_req_remove_by_count(SpaprDrcType drc_type,
693                                           uint32_t count)
694 {
695     union drc_identifier drc_id;
696 
697     drc_id.count = count;
698     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
699                             RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
700 }
701 
702 void spapr_hotplug_req_add_by_count_indexed(SpaprDrcType drc_type,
703                                             uint32_t count, uint32_t index)
704 {
705     union drc_identifier drc_id;
706 
707     drc_id.count_indexed.count = count;
708     drc_id.count_indexed.index = index;
709     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
710                             RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
711 }
712 
713 void spapr_hotplug_req_remove_by_count_indexed(SpaprDrcType drc_type,
714                                                uint32_t count, uint32_t index)
715 {
716     union drc_identifier drc_id;
717 
718     drc_id.count_indexed.count = count;
719     drc_id.count_indexed.index = index;
720     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
721                             RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
722 }
723 
724 static uint32_t spapr_mce_get_elog_type(PowerPCCPU *cpu, bool recovered,
725                                         struct mc_extended_log *ext_elog)
726 {
727     int i;
728     CPUPPCState *env = &cpu->env;
729     uint32_t summary;
730     uint64_t dsisr = env->spr[SPR_DSISR];
731 
732     summary = RTAS_LOG_VERSION_6 | RTAS_LOG_OPTIONAL_PART_PRESENT;
733     if (recovered) {
734         summary |= RTAS_LOG_DISPOSITION_FULLY_RECOVERED;
735     } else {
736         summary |= RTAS_LOG_DISPOSITION_NOT_RECOVERED;
737     }
738 
739     if (SRR1_MC_LOADSTORE(env->spr[SPR_SRR1])) {
740         for (i = 0; i < ARRAY_SIZE(mc_derror_table); i++) {
741             if (!(dsisr & mc_derror_table[i].dsisr_value)) {
742                 continue;
743             }
744 
745             ext_elog->mc.error_type = mc_derror_table[i].error_type;
746             ext_elog->mc.sub_err_type = mc_derror_table[i].error_subtype;
747             if (mc_derror_table[i].dar_valid) {
748                 ext_elog->mc.effective_address = cpu_to_be64(env->spr[SPR_DAR]);
749             }
750 
751             summary |= mc_derror_table[i].initiator
752                         | mc_derror_table[i].severity;
753 
754             return summary;
755         }
756     } else {
757         for (i = 0; i < ARRAY_SIZE(mc_ierror_table); i++) {
758             if ((env->spr[SPR_SRR1] & mc_ierror_table[i].srr1_mask) !=
759                     mc_ierror_table[i].srr1_value) {
760                 continue;
761             }
762 
763             ext_elog->mc.error_type = mc_ierror_table[i].error_type;
764             ext_elog->mc.sub_err_type = mc_ierror_table[i].error_subtype;
765             if (mc_ierror_table[i].nip_valid) {
766                 ext_elog->mc.effective_address = cpu_to_be64(env->nip);
767             }
768 
769             summary |= mc_ierror_table[i].initiator
770                         | mc_ierror_table[i].severity;
771 
772             return summary;
773         }
774     }
775 
776     summary |= RTAS_LOG_INITIATOR_CPU;
777     return summary;
778 }
779 
780 static void spapr_mce_dispatch_elog(PowerPCCPU *cpu, bool recovered)
781 {
782     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
783     CPUState *cs = CPU(cpu);
784     uint64_t rtas_addr;
785     CPUPPCState *env = &cpu->env;
786     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
787     target_ulong msr = 0;
788     struct rtas_error_log log;
789     struct mc_extended_log *ext_elog;
790     uint32_t summary;
791 
792     /*
793      * Properly set bits in MSR before we invoke the handler.
794      * SRR0/1, DAR and DSISR are properly set by KVM
795      */
796     if (!(*pcc->interrupts_big_endian)(cpu)) {
797         msr |= (1ULL << MSR_LE);
798     }
799 
800     if (env->msr & (1ULL << MSR_SF)) {
801         msr |= (1ULL << MSR_SF);
802     }
803 
804     msr |= (1ULL << MSR_ME);
805 
806     ext_elog = g_malloc0(sizeof(*ext_elog));
807     summary = spapr_mce_get_elog_type(cpu, recovered, ext_elog);
808 
809     log.summary = cpu_to_be32(summary);
810     log.extended_length = cpu_to_be32(sizeof(*ext_elog));
811 
812     spapr_init_v6hdr(&ext_elog->v6hdr);
813     ext_elog->mc.hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MC);
814     ext_elog->mc.hdr.section_length =
815                     cpu_to_be16(sizeof(struct rtas_event_log_v6_mc));
816     ext_elog->mc.hdr.section_version = 1;
817 
818     /* get rtas addr from fdt */
819     rtas_addr = spapr_get_rtas_addr();
820     if (!rtas_addr) {
821         /* Unable to fetch rtas_addr. Hence reset the guest */
822         ppc_cpu_do_system_reset(cs);
823         g_free(ext_elog);
824         return;
825     }
826 
827     stq_be_phys(&address_space_memory, rtas_addr + RTAS_ERROR_LOG_OFFSET,
828                 env->gpr[3]);
829     cpu_physical_memory_write(rtas_addr + RTAS_ERROR_LOG_OFFSET +
830                               sizeof(env->gpr[3]), &log, sizeof(log));
831     cpu_physical_memory_write(rtas_addr + RTAS_ERROR_LOG_OFFSET +
832                               sizeof(env->gpr[3]) + sizeof(log), ext_elog,
833                               sizeof(*ext_elog));
834 
835     env->gpr[3] = rtas_addr + RTAS_ERROR_LOG_OFFSET;
836     env->msr = msr;
837     env->nip = spapr->guest_machine_check_addr;
838 
839     g_free(ext_elog);
840 }
841 
842 void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered)
843 {
844     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
845     CPUState *cs = CPU(cpu);
846 
847     if (spapr->guest_machine_check_addr == -1) {
848         /*
849          * This implies that we have hit a machine check either when the
850          * guest has not registered FWNMI (i.e., "ibm,nmi-register" not
851          * called) or between system reset and "ibm,nmi-register".
852          * Fall back to the old machine check behavior in such cases.
853          */
854         cs->exception_index = POWERPC_EXCP_MCHECK;
855         ppc_cpu_do_interrupt(cs);
856         return;
857     }
858 
859     while (spapr->mc_status != -1) {
860         /*
861          * Check whether the same CPU got machine check error
862          * while still handling the mc error (i.e., before
863          * that CPU called "ibm,nmi-interlock")
864          */
865         if (spapr->mc_status == cpu->vcpu_id) {
866             qemu_system_guest_panicked(NULL);
867             return;
868         }
869         qemu_cond_wait_iothread(&spapr->mc_delivery_cond);
870         /* Meanwhile if the system is reset, then just return */
871         if (spapr->guest_machine_check_addr == -1) {
872             return;
873         }
874     }
875     spapr->mc_status = cpu->vcpu_id;
876 
877     spapr_mce_dispatch_elog(cpu, recovered);
878 }
879 
880 static void check_exception(PowerPCCPU *cpu, SpaprMachineState *spapr,
881                             uint32_t token, uint32_t nargs,
882                             target_ulong args,
883                             uint32_t nret, target_ulong rets)
884 {
885     uint32_t mask, buf, len, event_len;
886     uint64_t xinfo;
887     SpaprEventLogEntry *event;
888     struct rtas_error_log header;
889     int i;
890 
891     if ((nargs < 6) || (nargs > 7) || nret != 1) {
892         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
893         return;
894     }
895 
896     xinfo = rtas_ld(args, 1);
897     mask = rtas_ld(args, 2);
898     buf = rtas_ld(args, 4);
899     len = rtas_ld(args, 5);
900     if (nargs == 7) {
901         xinfo |= (uint64_t)rtas_ld(args, 6) << 32;
902     }
903 
904     event = rtas_event_log_dequeue(spapr, mask);
905     if (!event) {
906         goto out_no_events;
907     }
908 
909     event_len = event->extended_length + sizeof(header);
910 
911     if (event_len < len) {
912         len = event_len;
913     }
914 
915     header.summary = cpu_to_be32(event->summary);
916     header.extended_length = cpu_to_be32(event->extended_length);
917     cpu_physical_memory_write(buf, &header, sizeof(header));
918     cpu_physical_memory_write(buf + sizeof(header), event->extended_log,
919                               event->extended_length);
920     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
921     g_free(event->extended_log);
922     g_free(event);
923 
924     /* according to PAPR+, the IRQ must be left asserted, or re-asserted, if
925      * there are still pending events to be fetched via check-exception. We
926      * do the latter here, since our code relies on edge-triggered
927      * interrupts.
928      */
929     for (i = 0; i < EVENT_CLASS_MAX; i++) {
930         if (rtas_event_log_contains(EVENT_CLASS_MASK(i))) {
931             const SpaprEventSource *source =
932                 spapr_event_sources_get_source(spapr->event_sources, i);
933 
934             g_assert(source->enabled);
935             qemu_irq_pulse(spapr_qirq(spapr, source->irq));
936         }
937     }
938 
939     return;
940 
941 out_no_events:
942     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
943 }
944 
945 static void event_scan(PowerPCCPU *cpu, SpaprMachineState *spapr,
946                        uint32_t token, uint32_t nargs,
947                        target_ulong args,
948                        uint32_t nret, target_ulong rets)
949 {
950     if (nargs != 4 || nret != 1) {
951         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
952         return;
953     }
954     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
955 }
956 
957 void spapr_clear_pending_events(SpaprMachineState *spapr)
958 {
959     SpaprEventLogEntry *entry = NULL, *next_entry;
960 
961     QTAILQ_FOREACH_SAFE(entry, &spapr->pending_events, next, next_entry) {
962         QTAILQ_REMOVE(&spapr->pending_events, entry, next);
963         g_free(entry->extended_log);
964         g_free(entry);
965     }
966 }
967 
968 void spapr_events_init(SpaprMachineState *spapr)
969 {
970     int epow_irq = SPAPR_IRQ_EPOW;
971 
972     if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
973         epow_irq = spapr_irq_findone(spapr, &error_fatal);
974     }
975 
976     spapr_irq_claim(spapr, epow_irq, false, &error_fatal);
977 
978     QTAILQ_INIT(&spapr->pending_events);
979 
980     spapr->event_sources = spapr_event_sources_new();
981 
982     spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_EPOW,
983                                  epow_irq);
984 
985     /* NOTE: if machine supports modern/dedicated hotplug event source,
986      * we add it to the device-tree unconditionally. This means we may
987      * have cases where the source is enabled in QEMU, but unused by the
988      * guest because it does not support modern hotplug events, so we
989      * take care to rely on checking for negotiation of OV5_HP_EVT option
990      * before attempting to use it to signal events, rather than simply
991      * checking that it's enabled.
992      */
993     if (spapr->use_hotplug_event_source) {
994         int hp_irq = SPAPR_IRQ_HOTPLUG;
995 
996         if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
997             hp_irq = spapr_irq_findone(spapr, &error_fatal);
998         }
999 
1000         spapr_irq_claim(spapr, hp_irq, false, &error_fatal);
1001 
1002         spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_HOT_PLUG,
1003                                      hp_irq);
1004     }
1005 
1006     spapr->epow_notifier.notify = spapr_powerdown_req;
1007     qemu_register_powerdown_notifier(&spapr->epow_notifier);
1008     spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception",
1009                         check_exception);
1010     spapr_rtas_register(RTAS_EVENT_SCAN, "event-scan", event_scan);
1011 }
1012