1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * RTAS events handling 5 * 6 * Copyright (c) 2012 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 #include "qemu/osdep.h" 28 #include "qapi/error.h" 29 #include "cpu.h" 30 #include "sysemu/sysemu.h" 31 #include "hw/qdev.h" 32 #include "sysemu/device_tree.h" 33 34 #include "hw/ppc/fdt.h" 35 #include "hw/ppc/spapr.h" 36 #include "hw/ppc/spapr_vio.h" 37 #include "hw/pci/pci.h" 38 #include "hw/pci-host/spapr.h" 39 #include "hw/ppc/spapr_drc.h" 40 #include "qemu/help_option.h" 41 #include "qemu/bcd.h" 42 #include "hw/ppc/spapr_ovec.h" 43 #include <libfdt.h> 44 45 #define RTAS_LOG_VERSION_MASK 0xff000000 46 #define RTAS_LOG_VERSION_6 0x06000000 47 #define RTAS_LOG_SEVERITY_MASK 0x00e00000 48 #define RTAS_LOG_SEVERITY_ALREADY_REPORTED 0x00c00000 49 #define RTAS_LOG_SEVERITY_FATAL 0x00a00000 50 #define RTAS_LOG_SEVERITY_ERROR 0x00800000 51 #define RTAS_LOG_SEVERITY_ERROR_SYNC 0x00600000 52 #define RTAS_LOG_SEVERITY_WARNING 0x00400000 53 #define RTAS_LOG_SEVERITY_EVENT 0x00200000 54 #define RTAS_LOG_SEVERITY_NO_ERROR 0x00000000 55 #define RTAS_LOG_DISPOSITION_MASK 0x00180000 56 #define RTAS_LOG_DISPOSITION_FULLY_RECOVERED 0x00000000 57 #define RTAS_LOG_DISPOSITION_LIMITED_RECOVERY 0x00080000 58 #define RTAS_LOG_DISPOSITION_NOT_RECOVERED 0x00100000 59 #define RTAS_LOG_OPTIONAL_PART_PRESENT 0x00040000 60 #define RTAS_LOG_INITIATOR_MASK 0x0000f000 61 #define RTAS_LOG_INITIATOR_UNKNOWN 0x00000000 62 #define RTAS_LOG_INITIATOR_CPU 0x00001000 63 #define RTAS_LOG_INITIATOR_PCI 0x00002000 64 #define RTAS_LOG_INITIATOR_MEMORY 0x00004000 65 #define RTAS_LOG_INITIATOR_HOTPLUG 0x00006000 66 #define RTAS_LOG_TARGET_MASK 0x00000f00 67 #define RTAS_LOG_TARGET_UNKNOWN 0x00000000 68 #define RTAS_LOG_TARGET_CPU 0x00000100 69 #define RTAS_LOG_TARGET_PCI 0x00000200 70 #define RTAS_LOG_TARGET_MEMORY 0x00000400 71 #define RTAS_LOG_TARGET_HOTPLUG 0x00000600 72 #define RTAS_LOG_TYPE_MASK 0x000000ff 73 #define RTAS_LOG_TYPE_OTHER 0x00000000 74 #define RTAS_LOG_TYPE_RETRY 0x00000001 75 #define RTAS_LOG_TYPE_TCE_ERR 0x00000002 76 #define RTAS_LOG_TYPE_INTERN_DEV_FAIL 0x00000003 77 #define RTAS_LOG_TYPE_TIMEOUT 0x00000004 78 #define RTAS_LOG_TYPE_DATA_PARITY 0x00000005 79 #define RTAS_LOG_TYPE_ADDR_PARITY 0x00000006 80 #define RTAS_LOG_TYPE_CACHE_PARITY 0x00000007 81 #define RTAS_LOG_TYPE_ADDR_INVALID 0x00000008 82 #define RTAS_LOG_TYPE_ECC_UNCORR 0x00000009 83 #define RTAS_LOG_TYPE_ECC_CORR 0x0000000a 84 #define RTAS_LOG_TYPE_EPOW 0x00000040 85 #define RTAS_LOG_TYPE_HOTPLUG 0x000000e5 86 87 struct rtas_error_log { 88 uint32_t summary; 89 uint32_t extended_length; 90 } QEMU_PACKED; 91 92 struct rtas_event_log_v6 { 93 uint8_t b0; 94 #define RTAS_LOG_V6_B0_VALID 0x80 95 #define RTAS_LOG_V6_B0_UNRECOVERABLE_ERROR 0x40 96 #define RTAS_LOG_V6_B0_RECOVERABLE_ERROR 0x20 97 #define RTAS_LOG_V6_B0_DEGRADED_OPERATION 0x10 98 #define RTAS_LOG_V6_B0_PREDICTIVE_ERROR 0x08 99 #define RTAS_LOG_V6_B0_NEW_LOG 0x04 100 #define RTAS_LOG_V6_B0_BIGENDIAN 0x02 101 uint8_t _resv1; 102 uint8_t b2; 103 #define RTAS_LOG_V6_B2_POWERPC_FORMAT 0x80 104 #define RTAS_LOG_V6_B2_LOG_FORMAT_MASK 0x0f 105 #define RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT 0x0e 106 uint8_t _resv2[9]; 107 uint32_t company; 108 #define RTAS_LOG_V6_COMPANY_IBM 0x49424d00 /* IBM<null> */ 109 } QEMU_PACKED; 110 111 struct rtas_event_log_v6_section_header { 112 uint16_t section_id; 113 uint16_t section_length; 114 uint8_t section_version; 115 uint8_t section_subtype; 116 uint16_t creator_component_id; 117 } QEMU_PACKED; 118 119 struct rtas_event_log_v6_maina { 120 #define RTAS_LOG_V6_SECTION_ID_MAINA 0x5048 /* PH */ 121 struct rtas_event_log_v6_section_header hdr; 122 uint32_t creation_date; /* BCD: YYYYMMDD */ 123 uint32_t creation_time; /* BCD: HHMMSS00 */ 124 uint8_t _platform1[8]; 125 char creator_id; 126 uint8_t _resv1[2]; 127 uint8_t section_count; 128 uint8_t _resv2[4]; 129 uint8_t _platform2[8]; 130 uint32_t plid; 131 uint8_t _platform3[4]; 132 } QEMU_PACKED; 133 134 struct rtas_event_log_v6_mainb { 135 #define RTAS_LOG_V6_SECTION_ID_MAINB 0x5548 /* UH */ 136 struct rtas_event_log_v6_section_header hdr; 137 uint8_t subsystem_id; 138 uint8_t _platform1; 139 uint8_t event_severity; 140 uint8_t event_subtype; 141 uint8_t _platform2[4]; 142 uint8_t _resv1[2]; 143 uint16_t action_flags; 144 uint8_t _resv2[4]; 145 } QEMU_PACKED; 146 147 struct rtas_event_log_v6_epow { 148 #define RTAS_LOG_V6_SECTION_ID_EPOW 0x4550 /* EP */ 149 struct rtas_event_log_v6_section_header hdr; 150 uint8_t sensor_value; 151 #define RTAS_LOG_V6_EPOW_ACTION_RESET 0 152 #define RTAS_LOG_V6_EPOW_ACTION_WARN_COOLING 1 153 #define RTAS_LOG_V6_EPOW_ACTION_WARN_POWER 2 154 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN 3 155 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_HALT 4 156 #define RTAS_LOG_V6_EPOW_ACTION_MAIN_ENCLOSURE 5 157 #define RTAS_LOG_V6_EPOW_ACTION_POWER_OFF 7 158 uint8_t event_modifier; 159 #define RTAS_LOG_V6_EPOW_MODIFIER_NORMAL 1 160 #define RTAS_LOG_V6_EPOW_MODIFIER_ON_UPS 2 161 #define RTAS_LOG_V6_EPOW_MODIFIER_CRITICAL 3 162 #define RTAS_LOG_V6_EPOW_MODIFIER_TEMPERATURE 4 163 uint8_t extended_modifier; 164 #define RTAS_LOG_V6_EPOW_XMODIFIER_SYSTEM_WIDE 0 165 #define RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC 1 166 uint8_t _resv; 167 uint64_t reason_code; 168 } QEMU_PACKED; 169 170 struct epow_extended_log { 171 struct rtas_event_log_v6 v6hdr; 172 struct rtas_event_log_v6_maina maina; 173 struct rtas_event_log_v6_mainb mainb; 174 struct rtas_event_log_v6_epow epow; 175 } QEMU_PACKED; 176 177 union drc_identifier { 178 uint32_t index; 179 uint32_t count; 180 struct { 181 uint32_t count; 182 uint32_t index; 183 } count_indexed; 184 char name[1]; 185 } QEMU_PACKED; 186 187 struct rtas_event_log_v6_hp { 188 #define RTAS_LOG_V6_SECTION_ID_HOTPLUG 0x4850 /* HP */ 189 struct rtas_event_log_v6_section_header hdr; 190 uint8_t hotplug_type; 191 #define RTAS_LOG_V6_HP_TYPE_CPU 1 192 #define RTAS_LOG_V6_HP_TYPE_MEMORY 2 193 #define RTAS_LOG_V6_HP_TYPE_SLOT 3 194 #define RTAS_LOG_V6_HP_TYPE_PHB 4 195 #define RTAS_LOG_V6_HP_TYPE_PCI 5 196 uint8_t hotplug_action; 197 #define RTAS_LOG_V6_HP_ACTION_ADD 1 198 #define RTAS_LOG_V6_HP_ACTION_REMOVE 2 199 uint8_t hotplug_identifier; 200 #define RTAS_LOG_V6_HP_ID_DRC_NAME 1 201 #define RTAS_LOG_V6_HP_ID_DRC_INDEX 2 202 #define RTAS_LOG_V6_HP_ID_DRC_COUNT 3 203 #define RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED 4 204 uint8_t reserved; 205 union drc_identifier drc_id; 206 } QEMU_PACKED; 207 208 struct hp_extended_log { 209 struct rtas_event_log_v6 v6hdr; 210 struct rtas_event_log_v6_maina maina; 211 struct rtas_event_log_v6_mainb mainb; 212 struct rtas_event_log_v6_hp hp; 213 } QEMU_PACKED; 214 215 typedef enum EventClass { 216 EVENT_CLASS_INTERNAL_ERRORS = 0, 217 EVENT_CLASS_EPOW = 1, 218 EVENT_CLASS_RESERVED = 2, 219 EVENT_CLASS_HOT_PLUG = 3, 220 EVENT_CLASS_IO = 4, 221 EVENT_CLASS_MAX 222 } EventClassIndex; 223 #define EVENT_CLASS_MASK(index) (1 << (31 - index)) 224 225 static const char * const event_names[EVENT_CLASS_MAX] = { 226 [EVENT_CLASS_INTERNAL_ERRORS] = "internal-errors", 227 [EVENT_CLASS_EPOW] = "epow-events", 228 [EVENT_CLASS_HOT_PLUG] = "hot-plug-events", 229 [EVENT_CLASS_IO] = "ibm,io-events", 230 }; 231 232 struct sPAPREventSource { 233 int irq; 234 uint32_t mask; 235 bool enabled; 236 }; 237 238 static sPAPREventSource *spapr_event_sources_new(void) 239 { 240 return g_new0(sPAPREventSource, EVENT_CLASS_MAX); 241 } 242 243 static void spapr_event_sources_register(sPAPREventSource *event_sources, 244 EventClassIndex index, int irq) 245 { 246 /* we only support 1 irq per event class at the moment */ 247 g_assert(event_sources); 248 g_assert(!event_sources[index].enabled); 249 event_sources[index].irq = irq; 250 event_sources[index].mask = EVENT_CLASS_MASK(index); 251 event_sources[index].enabled = true; 252 } 253 254 static const sPAPREventSource * 255 spapr_event_sources_get_source(sPAPREventSource *event_sources, 256 EventClassIndex index) 257 { 258 g_assert(index < EVENT_CLASS_MAX); 259 g_assert(event_sources); 260 261 return &event_sources[index]; 262 } 263 264 void spapr_dt_events(sPAPRMachineState *spapr, void *fdt) 265 { 266 uint32_t irq_ranges[EVENT_CLASS_MAX * 2]; 267 int i, count = 0, event_sources; 268 sPAPREventSource *events = spapr->event_sources; 269 270 g_assert(events); 271 272 _FDT(event_sources = fdt_add_subnode(fdt, 0, "event-sources")); 273 274 for (i = 0, count = 0; i < EVENT_CLASS_MAX; i++) { 275 int node_offset; 276 uint32_t interrupts[2]; 277 const sPAPREventSource *source = 278 spapr_event_sources_get_source(events, i); 279 const char *source_name = event_names[i]; 280 281 if (!source->enabled) { 282 continue; 283 } 284 285 interrupts[0] = cpu_to_be32(source->irq); 286 interrupts[1] = 0; 287 288 _FDT(node_offset = fdt_add_subnode(fdt, event_sources, source_name)); 289 _FDT(fdt_setprop(fdt, node_offset, "interrupts", interrupts, 290 sizeof(interrupts))); 291 292 irq_ranges[count++] = interrupts[0]; 293 irq_ranges[count++] = cpu_to_be32(1); 294 } 295 296 irq_ranges[count] = cpu_to_be32(count); 297 count++; 298 299 _FDT((fdt_setprop(fdt, event_sources, "interrupt-controller", NULL, 0))); 300 _FDT((fdt_setprop_cell(fdt, event_sources, "#interrupt-cells", 2))); 301 _FDT((fdt_setprop(fdt, event_sources, "interrupt-ranges", 302 irq_ranges, count * sizeof(uint32_t)))); 303 } 304 305 static const sPAPREventSource * 306 rtas_event_log_to_source(sPAPRMachineState *spapr, int log_type) 307 { 308 const sPAPREventSource *source; 309 310 g_assert(spapr->event_sources); 311 312 switch (log_type) { 313 case RTAS_LOG_TYPE_HOTPLUG: 314 source = spapr_event_sources_get_source(spapr->event_sources, 315 EVENT_CLASS_HOT_PLUG); 316 if (spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT)) { 317 g_assert(source->enabled); 318 break; 319 } 320 /* fall back to epow for legacy hotplug interrupt source */ 321 case RTAS_LOG_TYPE_EPOW: 322 source = spapr_event_sources_get_source(spapr->event_sources, 323 EVENT_CLASS_EPOW); 324 break; 325 default: 326 source = NULL; 327 } 328 329 return source; 330 } 331 332 static int rtas_event_log_to_irq(sPAPRMachineState *spapr, int log_type) 333 { 334 const sPAPREventSource *source; 335 336 source = rtas_event_log_to_source(spapr, log_type); 337 g_assert(source); 338 g_assert(source->enabled); 339 340 return source->irq; 341 } 342 343 static uint32_t spapr_event_log_entry_type(sPAPREventLogEntry *entry) 344 { 345 return entry->summary & RTAS_LOG_TYPE_MASK; 346 } 347 348 static void rtas_event_log_queue(sPAPRMachineState *spapr, 349 sPAPREventLogEntry *entry) 350 { 351 QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next); 352 } 353 354 static sPAPREventLogEntry *rtas_event_log_dequeue(sPAPRMachineState *spapr, 355 uint32_t event_mask) 356 { 357 sPAPREventLogEntry *entry = NULL; 358 359 QTAILQ_FOREACH(entry, &spapr->pending_events, next) { 360 const sPAPREventSource *source = 361 rtas_event_log_to_source(spapr, 362 spapr_event_log_entry_type(entry)); 363 364 if (source->mask & event_mask) { 365 break; 366 } 367 } 368 369 if (entry) { 370 QTAILQ_REMOVE(&spapr->pending_events, entry, next); 371 } 372 373 return entry; 374 } 375 376 static bool rtas_event_log_contains(uint32_t event_mask) 377 { 378 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 379 sPAPREventLogEntry *entry = NULL; 380 381 QTAILQ_FOREACH(entry, &spapr->pending_events, next) { 382 const sPAPREventSource *source = 383 rtas_event_log_to_source(spapr, 384 spapr_event_log_entry_type(entry)); 385 386 if (source->mask & event_mask) { 387 return true; 388 } 389 } 390 391 return false; 392 } 393 394 static uint32_t next_plid; 395 396 static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr) 397 { 398 v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG 399 | RTAS_LOG_V6_B0_BIGENDIAN; 400 v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT 401 | RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT; 402 v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM); 403 } 404 405 static void spapr_init_maina(struct rtas_event_log_v6_maina *maina, 406 int section_count) 407 { 408 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 409 struct tm tm; 410 int year; 411 412 maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA); 413 maina->hdr.section_length = cpu_to_be16(sizeof(*maina)); 414 /* FIXME: section version, subtype and creator id? */ 415 spapr_rtc_read(&spapr->rtc, &tm, NULL); 416 year = tm.tm_year + 1900; 417 maina->creation_date = cpu_to_be32((to_bcd(year / 100) << 24) 418 | (to_bcd(year % 100) << 16) 419 | (to_bcd(tm.tm_mon + 1) << 8) 420 | to_bcd(tm.tm_mday)); 421 maina->creation_time = cpu_to_be32((to_bcd(tm.tm_hour) << 24) 422 | (to_bcd(tm.tm_min) << 16) 423 | (to_bcd(tm.tm_sec) << 8)); 424 maina->creator_id = 'H'; /* Hypervisor */ 425 maina->section_count = section_count; 426 maina->plid = next_plid++; 427 } 428 429 static void spapr_powerdown_req(Notifier *n, void *opaque) 430 { 431 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 432 sPAPREventLogEntry *entry; 433 struct rtas_event_log_v6 *v6hdr; 434 struct rtas_event_log_v6_maina *maina; 435 struct rtas_event_log_v6_mainb *mainb; 436 struct rtas_event_log_v6_epow *epow; 437 struct epow_extended_log *new_epow; 438 439 entry = g_new(sPAPREventLogEntry, 1); 440 new_epow = g_malloc0(sizeof(*new_epow)); 441 entry->extended_log = new_epow; 442 443 v6hdr = &new_epow->v6hdr; 444 maina = &new_epow->maina; 445 mainb = &new_epow->mainb; 446 epow = &new_epow->epow; 447 448 entry->summary = RTAS_LOG_VERSION_6 449 | RTAS_LOG_SEVERITY_EVENT 450 | RTAS_LOG_DISPOSITION_NOT_RECOVERED 451 | RTAS_LOG_OPTIONAL_PART_PRESENT 452 | RTAS_LOG_TYPE_EPOW; 453 entry->extended_length = sizeof(*new_epow); 454 455 spapr_init_v6hdr(v6hdr); 456 spapr_init_maina(maina, 3 /* Main-A, Main-B and EPOW */); 457 458 mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB); 459 mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb)); 460 /* FIXME: section version, subtype and creator id? */ 461 mainb->subsystem_id = 0xa0; /* External environment */ 462 mainb->event_severity = 0x00; /* Informational / non-error */ 463 mainb->event_subtype = 0xd0; /* Normal shutdown */ 464 465 epow->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_EPOW); 466 epow->hdr.section_length = cpu_to_be16(sizeof(*epow)); 467 epow->hdr.section_version = 2; /* includes extended modifier */ 468 /* FIXME: section subtype and creator id? */ 469 epow->sensor_value = RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN; 470 epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL; 471 epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC; 472 473 rtas_event_log_queue(spapr, entry); 474 475 qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr), 476 rtas_event_log_to_irq(spapr, 477 RTAS_LOG_TYPE_EPOW))); 478 } 479 480 static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action, 481 sPAPRDRConnectorType drc_type, 482 union drc_identifier *drc_id) 483 { 484 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 485 sPAPREventLogEntry *entry; 486 struct hp_extended_log *new_hp; 487 struct rtas_event_log_v6 *v6hdr; 488 struct rtas_event_log_v6_maina *maina; 489 struct rtas_event_log_v6_mainb *mainb; 490 struct rtas_event_log_v6_hp *hp; 491 492 entry = g_new(sPAPREventLogEntry, 1); 493 new_hp = g_malloc0(sizeof(struct hp_extended_log)); 494 entry->extended_log = new_hp; 495 496 v6hdr = &new_hp->v6hdr; 497 maina = &new_hp->maina; 498 mainb = &new_hp->mainb; 499 hp = &new_hp->hp; 500 501 entry->summary = RTAS_LOG_VERSION_6 502 | RTAS_LOG_SEVERITY_EVENT 503 | RTAS_LOG_DISPOSITION_NOT_RECOVERED 504 | RTAS_LOG_OPTIONAL_PART_PRESENT 505 | RTAS_LOG_INITIATOR_HOTPLUG 506 | RTAS_LOG_TYPE_HOTPLUG; 507 entry->extended_length = sizeof(*new_hp); 508 509 spapr_init_v6hdr(v6hdr); 510 spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */); 511 512 mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB); 513 mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb)); 514 mainb->subsystem_id = 0x80; /* External environment */ 515 mainb->event_severity = 0x00; /* Informational / non-error */ 516 mainb->event_subtype = 0x00; /* Normal shutdown */ 517 518 hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG); 519 hp->hdr.section_length = cpu_to_be16(sizeof(*hp)); 520 hp->hdr.section_version = 1; /* includes extended modifier */ 521 hp->hotplug_action = hp_action; 522 hp->hotplug_identifier = hp_id; 523 524 switch (drc_type) { 525 case SPAPR_DR_CONNECTOR_TYPE_PCI: 526 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI; 527 break; 528 case SPAPR_DR_CONNECTOR_TYPE_LMB: 529 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY; 530 break; 531 case SPAPR_DR_CONNECTOR_TYPE_CPU: 532 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU; 533 break; 534 default: 535 /* we shouldn't be signaling hotplug events for resources 536 * that don't support them 537 */ 538 g_assert(false); 539 return; 540 } 541 542 if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) { 543 hp->drc_id.count = cpu_to_be32(drc_id->count); 544 } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) { 545 hp->drc_id.index = cpu_to_be32(drc_id->index); 546 } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED) { 547 /* we should not be using count_indexed value unless the guest 548 * supports dedicated hotplug event source 549 */ 550 g_assert(spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT)); 551 hp->drc_id.count_indexed.count = 552 cpu_to_be32(drc_id->count_indexed.count); 553 hp->drc_id.count_indexed.index = 554 cpu_to_be32(drc_id->count_indexed.index); 555 } 556 557 rtas_event_log_queue(spapr, entry); 558 559 qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr), 560 rtas_event_log_to_irq(spapr, 561 RTAS_LOG_TYPE_HOTPLUG))); 562 } 563 564 void spapr_hotplug_req_add_by_index(sPAPRDRConnector *drc) 565 { 566 sPAPRDRConnectorType drc_type = spapr_drc_type(drc); 567 union drc_identifier drc_id; 568 569 drc_id.index = spapr_drc_index(drc); 570 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX, 571 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id); 572 } 573 574 void spapr_hotplug_req_remove_by_index(sPAPRDRConnector *drc) 575 { 576 sPAPRDRConnectorType drc_type = spapr_drc_type(drc); 577 union drc_identifier drc_id; 578 579 drc_id.index = spapr_drc_index(drc); 580 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX, 581 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id); 582 } 583 584 void spapr_hotplug_req_add_by_count(sPAPRDRConnectorType drc_type, 585 uint32_t count) 586 { 587 union drc_identifier drc_id; 588 589 drc_id.count = count; 590 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT, 591 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id); 592 } 593 594 void spapr_hotplug_req_remove_by_count(sPAPRDRConnectorType drc_type, 595 uint32_t count) 596 { 597 union drc_identifier drc_id; 598 599 drc_id.count = count; 600 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT, 601 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id); 602 } 603 604 void spapr_hotplug_req_add_by_count_indexed(sPAPRDRConnectorType drc_type, 605 uint32_t count, uint32_t index) 606 { 607 union drc_identifier drc_id; 608 609 drc_id.count_indexed.count = count; 610 drc_id.count_indexed.index = index; 611 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED, 612 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id); 613 } 614 615 void spapr_hotplug_req_remove_by_count_indexed(sPAPRDRConnectorType drc_type, 616 uint32_t count, uint32_t index) 617 { 618 union drc_identifier drc_id; 619 620 drc_id.count_indexed.count = count; 621 drc_id.count_indexed.index = index; 622 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED, 623 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id); 624 } 625 626 static void check_exception(PowerPCCPU *cpu, sPAPRMachineState *spapr, 627 uint32_t token, uint32_t nargs, 628 target_ulong args, 629 uint32_t nret, target_ulong rets) 630 { 631 uint32_t mask, buf, len, event_len; 632 uint64_t xinfo; 633 sPAPREventLogEntry *event; 634 struct rtas_error_log header; 635 int i; 636 637 if ((nargs < 6) || (nargs > 7) || nret != 1) { 638 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 639 return; 640 } 641 642 xinfo = rtas_ld(args, 1); 643 mask = rtas_ld(args, 2); 644 buf = rtas_ld(args, 4); 645 len = rtas_ld(args, 5); 646 if (nargs == 7) { 647 xinfo |= (uint64_t)rtas_ld(args, 6) << 32; 648 } 649 650 event = rtas_event_log_dequeue(spapr, mask); 651 if (!event) { 652 goto out_no_events; 653 } 654 655 event_len = event->extended_length + sizeof(header); 656 657 if (event_len < len) { 658 len = event_len; 659 } 660 661 header.summary = cpu_to_be32(event->summary); 662 header.extended_length = cpu_to_be32(event->extended_length); 663 cpu_physical_memory_write(buf, &header, sizeof(header)); 664 cpu_physical_memory_write(buf + sizeof(header), event->extended_log, 665 event->extended_length); 666 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 667 g_free(event->extended_log); 668 g_free(event); 669 670 /* according to PAPR+, the IRQ must be left asserted, or re-asserted, if 671 * there are still pending events to be fetched via check-exception. We 672 * do the latter here, since our code relies on edge-triggered 673 * interrupts. 674 */ 675 for (i = 0; i < EVENT_CLASS_MAX; i++) { 676 if (rtas_event_log_contains(EVENT_CLASS_MASK(i))) { 677 const sPAPREventSource *source = 678 spapr_event_sources_get_source(spapr->event_sources, i); 679 680 g_assert(source->enabled); 681 qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr), source->irq)); 682 } 683 } 684 685 return; 686 687 out_no_events: 688 rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND); 689 } 690 691 static void event_scan(PowerPCCPU *cpu, sPAPRMachineState *spapr, 692 uint32_t token, uint32_t nargs, 693 target_ulong args, 694 uint32_t nret, target_ulong rets) 695 { 696 if (nargs != 4 || nret != 1) { 697 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 698 return; 699 } 700 rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND); 701 } 702 703 void spapr_clear_pending_events(sPAPRMachineState *spapr) 704 { 705 sPAPREventLogEntry *entry = NULL, *next_entry; 706 707 QTAILQ_FOREACH_SAFE(entry, &spapr->pending_events, next, next_entry) { 708 QTAILQ_REMOVE(&spapr->pending_events, entry, next); 709 g_free(entry->extended_log); 710 g_free(entry); 711 } 712 } 713 714 void spapr_events_init(sPAPRMachineState *spapr) 715 { 716 QTAILQ_INIT(&spapr->pending_events); 717 718 spapr->event_sources = spapr_event_sources_new(); 719 720 spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_EPOW, 721 spapr_ics_alloc(spapr->ics, 0, false, 722 &error_fatal)); 723 724 /* NOTE: if machine supports modern/dedicated hotplug event source, 725 * we add it to the device-tree unconditionally. This means we may 726 * have cases where the source is enabled in QEMU, but unused by the 727 * guest because it does not support modern hotplug events, so we 728 * take care to rely on checking for negotiation of OV5_HP_EVT option 729 * before attempting to use it to signal events, rather than simply 730 * checking that it's enabled. 731 */ 732 if (spapr->use_hotplug_event_source) { 733 spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_HOT_PLUG, 734 spapr_ics_alloc(spapr->ics, 0, false, 735 &error_fatal)); 736 } 737 738 spapr->epow_notifier.notify = spapr_powerdown_req; 739 qemu_register_powerdown_notifier(&spapr->epow_notifier); 740 spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception", 741 check_exception); 742 spapr_rtas_register(RTAS_EVENT_SCAN, "event-scan", event_scan); 743 } 744