1 /* Support for generating ACPI tables and passing them to Guests 2 * 3 * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net> 4 * Copyright (C) 2006 Fabrice Bellard 5 * Copyright (C) 2013 Red Hat Inc 6 * 7 * Author: Michael S. Tsirkin <mst@redhat.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 19 * You should have received a copy of the GNU General Public License along 20 * with this program; if not, see <http://www.gnu.org/licenses/>. 21 */ 22 23 #include "acpi-build.h" 24 #include <stddef.h> 25 #include <glib.h> 26 #include "qemu-common.h" 27 #include "qemu/bitmap.h" 28 #include "qemu/osdep.h" 29 #include "qemu/range.h" 30 #include "qemu/error-report.h" 31 #include "hw/pci/pci.h" 32 #include "qom/cpu.h" 33 #include "hw/i386/pc.h" 34 #include "target-i386/cpu.h" 35 #include "hw/timer/hpet.h" 36 #include "hw/i386/acpi-defs.h" 37 #include "hw/acpi/acpi.h" 38 #include "hw/nvram/fw_cfg.h" 39 #include "bios-linker-loader.h" 40 #include "hw/loader.h" 41 #include "hw/isa/isa.h" 42 #include "hw/acpi/memory_hotplug.h" 43 #include "sysemu/tpm.h" 44 #include "hw/acpi/tpm.h" 45 46 /* Supported chipsets: */ 47 #include "hw/acpi/piix4.h" 48 #include "hw/acpi/pcihp.h" 49 #include "hw/i386/ich9.h" 50 #include "hw/pci/pci_bus.h" 51 #include "hw/pci-host/q35.h" 52 #include "hw/i386/intel_iommu.h" 53 54 #include "hw/i386/q35-acpi-dsdt.hex" 55 #include "hw/i386/acpi-dsdt.hex" 56 57 #include "qapi/qmp/qint.h" 58 #include "qom/qom-qobject.h" 59 60 /* These are used to size the ACPI tables for -M pc-i440fx-1.7 and 61 * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows 62 * a little bit, there should be plenty of free space since the DSDT 63 * shrunk by ~1.5k between QEMU 2.0 and QEMU 2.1. 64 */ 65 #define ACPI_BUILD_LEGACY_CPU_AML_SIZE 97 66 #define ACPI_BUILD_ALIGN_SIZE 0x1000 67 68 #define ACPI_BUILD_TABLE_SIZE 0x20000 69 70 typedef struct AcpiCpuInfo { 71 DECLARE_BITMAP(found_cpus, ACPI_CPU_HOTPLUG_ID_LIMIT); 72 } AcpiCpuInfo; 73 74 typedef struct AcpiMcfgInfo { 75 uint64_t mcfg_base; 76 uint32_t mcfg_size; 77 } AcpiMcfgInfo; 78 79 typedef struct AcpiPmInfo { 80 bool s3_disabled; 81 bool s4_disabled; 82 bool pcihp_bridge_en; 83 uint8_t s4_val; 84 uint16_t sci_int; 85 uint8_t acpi_enable_cmd; 86 uint8_t acpi_disable_cmd; 87 uint32_t gpe0_blk; 88 uint32_t gpe0_blk_len; 89 uint32_t io_base; 90 } AcpiPmInfo; 91 92 typedef struct AcpiMiscInfo { 93 bool has_hpet; 94 bool has_tpm; 95 DECLARE_BITMAP(slot_hotplug_enable, PCI_SLOT_MAX); 96 const unsigned char *dsdt_code; 97 unsigned dsdt_size; 98 uint16_t pvpanic_port; 99 } AcpiMiscInfo; 100 101 typedef struct AcpiBuildPciBusHotplugState { 102 GArray *device_table; 103 GArray *notify_table; 104 struct AcpiBuildPciBusHotplugState *parent; 105 bool pcihp_bridge_en; 106 } AcpiBuildPciBusHotplugState; 107 108 static void acpi_get_dsdt(AcpiMiscInfo *info) 109 { 110 uint16_t *applesmc_sta; 111 Object *piix = piix4_pm_find(); 112 Object *lpc = ich9_lpc_find(); 113 assert(!!piix != !!lpc); 114 115 if (piix) { 116 info->dsdt_code = AcpiDsdtAmlCode; 117 info->dsdt_size = sizeof AcpiDsdtAmlCode; 118 applesmc_sta = piix_dsdt_applesmc_sta; 119 } 120 if (lpc) { 121 info->dsdt_code = Q35AcpiDsdtAmlCode; 122 info->dsdt_size = sizeof Q35AcpiDsdtAmlCode; 123 applesmc_sta = q35_dsdt_applesmc_sta; 124 } 125 126 /* Patch in appropriate value for AppleSMC _STA */ 127 *(uint8_t *)(info->dsdt_code + *applesmc_sta) = 128 applesmc_find() ? 0x0b : 0x00; 129 } 130 131 static 132 int acpi_add_cpu_info(Object *o, void *opaque) 133 { 134 AcpiCpuInfo *cpu = opaque; 135 uint64_t apic_id; 136 137 if (object_dynamic_cast(o, TYPE_CPU)) { 138 apic_id = object_property_get_int(o, "apic-id", NULL); 139 assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT); 140 141 set_bit(apic_id, cpu->found_cpus); 142 } 143 144 object_child_foreach(o, acpi_add_cpu_info, opaque); 145 return 0; 146 } 147 148 static void acpi_get_cpu_info(AcpiCpuInfo *cpu) 149 { 150 Object *root = object_get_root(); 151 152 memset(cpu->found_cpus, 0, sizeof cpu->found_cpus); 153 object_child_foreach(root, acpi_add_cpu_info, cpu); 154 } 155 156 static void acpi_get_pm_info(AcpiPmInfo *pm) 157 { 158 Object *piix = piix4_pm_find(); 159 Object *lpc = ich9_lpc_find(); 160 Object *obj = NULL; 161 QObject *o; 162 163 if (piix) { 164 obj = piix; 165 } 166 if (lpc) { 167 obj = lpc; 168 } 169 assert(obj); 170 171 /* Fill in optional s3/s4 related properties */ 172 o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL); 173 if (o) { 174 pm->s3_disabled = qint_get_int(qobject_to_qint(o)); 175 } else { 176 pm->s3_disabled = false; 177 } 178 qobject_decref(o); 179 o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_DISABLED, NULL); 180 if (o) { 181 pm->s4_disabled = qint_get_int(qobject_to_qint(o)); 182 } else { 183 pm->s4_disabled = false; 184 } 185 qobject_decref(o); 186 o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_VAL, NULL); 187 if (o) { 188 pm->s4_val = qint_get_int(qobject_to_qint(o)); 189 } else { 190 pm->s4_val = false; 191 } 192 qobject_decref(o); 193 194 /* Fill in mandatory properties */ 195 pm->sci_int = object_property_get_int(obj, ACPI_PM_PROP_SCI_INT, NULL); 196 197 pm->acpi_enable_cmd = object_property_get_int(obj, 198 ACPI_PM_PROP_ACPI_ENABLE_CMD, 199 NULL); 200 pm->acpi_disable_cmd = object_property_get_int(obj, 201 ACPI_PM_PROP_ACPI_DISABLE_CMD, 202 NULL); 203 pm->io_base = object_property_get_int(obj, ACPI_PM_PROP_PM_IO_BASE, 204 NULL); 205 pm->gpe0_blk = object_property_get_int(obj, ACPI_PM_PROP_GPE0_BLK, 206 NULL); 207 pm->gpe0_blk_len = object_property_get_int(obj, ACPI_PM_PROP_GPE0_BLK_LEN, 208 NULL); 209 pm->pcihp_bridge_en = 210 object_property_get_bool(obj, "acpi-pci-hotplug-with-bridge-support", 211 NULL); 212 } 213 214 static void acpi_get_misc_info(AcpiMiscInfo *info) 215 { 216 info->has_hpet = hpet_find(); 217 info->has_tpm = tpm_find(); 218 info->pvpanic_port = pvpanic_port(); 219 } 220 221 static void acpi_get_pci_info(PcPciInfo *info) 222 { 223 Object *pci_host; 224 bool ambiguous; 225 226 pci_host = object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE, &ambiguous); 227 g_assert(!ambiguous); 228 g_assert(pci_host); 229 230 info->w32.begin = object_property_get_int(pci_host, 231 PCI_HOST_PROP_PCI_HOLE_START, 232 NULL); 233 info->w32.end = object_property_get_int(pci_host, 234 PCI_HOST_PROP_PCI_HOLE_END, 235 NULL); 236 info->w64.begin = object_property_get_int(pci_host, 237 PCI_HOST_PROP_PCI_HOLE64_START, 238 NULL); 239 info->w64.end = object_property_get_int(pci_host, 240 PCI_HOST_PROP_PCI_HOLE64_END, 241 NULL); 242 } 243 244 #define ACPI_BUILD_APPNAME "Bochs" 245 #define ACPI_BUILD_APPNAME6 "BOCHS " 246 #define ACPI_BUILD_APPNAME4 "BXPC" 247 248 #define ACPI_BUILD_DPRINTF(level, fmt, ...) do {} while (0) 249 250 #define ACPI_BUILD_TABLE_FILE "etc/acpi/tables" 251 #define ACPI_BUILD_RSDP_FILE "etc/acpi/rsdp" 252 #define ACPI_BUILD_TPMLOG_FILE "etc/tpm/log" 253 254 static void 255 build_header(GArray *linker, GArray *table_data, 256 AcpiTableHeader *h, const char *sig, int len, uint8_t rev) 257 { 258 memcpy(&h->signature, sig, 4); 259 h->length = cpu_to_le32(len); 260 h->revision = rev; 261 memcpy(h->oem_id, ACPI_BUILD_APPNAME6, 6); 262 memcpy(h->oem_table_id, ACPI_BUILD_APPNAME4, 4); 263 memcpy(h->oem_table_id + 4, sig, 4); 264 h->oem_revision = cpu_to_le32(1); 265 memcpy(h->asl_compiler_id, ACPI_BUILD_APPNAME4, 4); 266 h->asl_compiler_revision = cpu_to_le32(1); 267 h->checksum = 0; 268 /* Checksum to be filled in by Guest linker */ 269 bios_linker_loader_add_checksum(linker, ACPI_BUILD_TABLE_FILE, 270 table_data->data, h, len, &h->checksum); 271 } 272 273 static inline GArray *build_alloc_array(void) 274 { 275 return g_array_new(false, true /* clear */, 1); 276 } 277 278 static inline void build_free_array(GArray *array) 279 { 280 g_array_free(array, true); 281 } 282 283 static inline void build_prepend_byte(GArray *array, uint8_t val) 284 { 285 g_array_prepend_val(array, val); 286 } 287 288 static inline void build_append_byte(GArray *array, uint8_t val) 289 { 290 g_array_append_val(array, val); 291 } 292 293 static inline void build_append_array(GArray *array, GArray *val) 294 { 295 g_array_append_vals(array, val->data, val->len); 296 } 297 298 static void GCC_FMT_ATTR(2, 3) 299 build_append_nameseg(GArray *array, const char *format, ...) 300 { 301 /* It would be nicer to use g_string_vprintf but it's only there in 2.22 */ 302 char s[] = "XXXX"; 303 int len; 304 va_list args; 305 306 va_start(args, format); 307 len = vsnprintf(s, sizeof s, format, args); 308 va_end(args); 309 310 assert(len == 4); 311 g_array_append_vals(array, s, len); 312 } 313 314 /* 5.4 Definition Block Encoding */ 315 enum { 316 PACKAGE_LENGTH_1BYTE_SHIFT = 6, /* Up to 63 - use extra 2 bits. */ 317 PACKAGE_LENGTH_2BYTE_SHIFT = 4, 318 PACKAGE_LENGTH_3BYTE_SHIFT = 12, 319 PACKAGE_LENGTH_4BYTE_SHIFT = 20, 320 }; 321 322 static void build_prepend_package_length(GArray *package, unsigned min_bytes) 323 { 324 uint8_t byte; 325 unsigned length = package->len; 326 unsigned length_bytes; 327 328 if (length + 1 < (1 << PACKAGE_LENGTH_1BYTE_SHIFT)) { 329 length_bytes = 1; 330 } else if (length + 2 < (1 << PACKAGE_LENGTH_3BYTE_SHIFT)) { 331 length_bytes = 2; 332 } else if (length + 3 < (1 << PACKAGE_LENGTH_4BYTE_SHIFT)) { 333 length_bytes = 3; 334 } else { 335 length_bytes = 4; 336 } 337 338 /* Force length to at least min_bytes. 339 * This wastes memory but that's how bios did it. 340 */ 341 length_bytes = MAX(length_bytes, min_bytes); 342 343 /* PkgLength is the length of the inclusive length of the data. */ 344 length += length_bytes; 345 346 switch (length_bytes) { 347 case 1: 348 byte = length; 349 build_prepend_byte(package, byte); 350 return; 351 case 4: 352 byte = length >> PACKAGE_LENGTH_4BYTE_SHIFT; 353 build_prepend_byte(package, byte); 354 length &= (1 << PACKAGE_LENGTH_4BYTE_SHIFT) - 1; 355 /* fall through */ 356 case 3: 357 byte = length >> PACKAGE_LENGTH_3BYTE_SHIFT; 358 build_prepend_byte(package, byte); 359 length &= (1 << PACKAGE_LENGTH_3BYTE_SHIFT) - 1; 360 /* fall through */ 361 case 2: 362 byte = length >> PACKAGE_LENGTH_2BYTE_SHIFT; 363 build_prepend_byte(package, byte); 364 length &= (1 << PACKAGE_LENGTH_2BYTE_SHIFT) - 1; 365 /* fall through */ 366 } 367 /* 368 * Most significant two bits of byte zero indicate how many following bytes 369 * are in PkgLength encoding. 370 */ 371 byte = ((length_bytes - 1) << PACKAGE_LENGTH_1BYTE_SHIFT) | length; 372 build_prepend_byte(package, byte); 373 } 374 375 static void build_package(GArray *package, uint8_t op, unsigned min_bytes) 376 { 377 build_prepend_package_length(package, min_bytes); 378 build_prepend_byte(package, op); 379 } 380 381 static void build_extop_package(GArray *package, uint8_t op) 382 { 383 build_package(package, op, 1); 384 build_prepend_byte(package, 0x5B); /* ExtOpPrefix */ 385 } 386 387 static void build_append_value(GArray *table, uint32_t value, int size) 388 { 389 uint8_t prefix; 390 int i; 391 392 switch (size) { 393 case 1: 394 prefix = 0x0A; /* BytePrefix */ 395 break; 396 case 2: 397 prefix = 0x0B; /* WordPrefix */ 398 break; 399 case 4: 400 prefix = 0x0C; /* DWordPrefix */ 401 break; 402 default: 403 assert(0); 404 return; 405 } 406 build_append_byte(table, prefix); 407 for (i = 0; i < size; ++i) { 408 build_append_byte(table, value & 0xFF); 409 value = value >> 8; 410 } 411 } 412 413 static void build_append_int(GArray *table, uint32_t value) 414 { 415 if (value == 0x00) { 416 build_append_byte(table, 0x00); /* ZeroOp */ 417 } else if (value == 0x01) { 418 build_append_byte(table, 0x01); /* OneOp */ 419 } else if (value <= 0xFF) { 420 build_append_value(table, value, 1); 421 } else if (value <= 0xFFFF) { 422 build_append_value(table, value, 2); 423 } else { 424 build_append_value(table, value, 4); 425 } 426 } 427 428 static GArray *build_alloc_method(const char *name, uint8_t arg_count) 429 { 430 GArray *method = build_alloc_array(); 431 432 build_append_nameseg(method, "%s", name); 433 build_append_byte(method, arg_count); /* MethodFlags: ArgCount */ 434 435 return method; 436 } 437 438 static void build_append_and_cleanup_method(GArray *device, GArray *method) 439 { 440 uint8_t op = 0x14; /* MethodOp */ 441 442 build_package(method, op, 0); 443 444 build_append_array(device, method); 445 build_free_array(method); 446 } 447 448 static void build_append_notify_target_ifequal(GArray *method, 449 GArray *target_name, 450 uint32_t value, int size) 451 { 452 GArray *notify = build_alloc_array(); 453 uint8_t op = 0xA0; /* IfOp */ 454 455 build_append_byte(notify, 0x93); /* LEqualOp */ 456 build_append_byte(notify, 0x68); /* Arg0Op */ 457 build_append_value(notify, value, size); 458 build_append_byte(notify, 0x86); /* NotifyOp */ 459 build_append_array(notify, target_name); 460 build_append_byte(notify, 0x69); /* Arg1Op */ 461 462 /* Pack it up */ 463 build_package(notify, op, 1); 464 465 build_append_array(method, notify); 466 467 build_free_array(notify); 468 } 469 470 /* End here */ 471 #define ACPI_PORT_SMI_CMD 0x00b2 /* TODO: this is APM_CNT_IOPORT */ 472 473 static inline void *acpi_data_push(GArray *table_data, unsigned size) 474 { 475 unsigned off = table_data->len; 476 g_array_set_size(table_data, off + size); 477 return table_data->data + off; 478 } 479 480 static unsigned acpi_data_len(GArray *table) 481 { 482 #if GLIB_CHECK_VERSION(2, 22, 0) 483 assert(g_array_get_element_size(table) == 1); 484 #endif 485 return table->len; 486 } 487 488 static void acpi_align_size(GArray *blob, unsigned align) 489 { 490 /* Align size to multiple of given size. This reduces the chance 491 * we need to change size in the future (breaking cross version migration). 492 */ 493 g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); 494 } 495 496 /* Set a value within table in a safe manner */ 497 #define ACPI_BUILD_SET_LE(table, size, off, bits, val) \ 498 do { \ 499 uint64_t ACPI_BUILD_SET_LE_val = cpu_to_le64(val); \ 500 memcpy(acpi_data_get_ptr(table, size, off, \ 501 (bits) / BITS_PER_BYTE), \ 502 &ACPI_BUILD_SET_LE_val, \ 503 (bits) / BITS_PER_BYTE); \ 504 } while (0) 505 506 static inline void *acpi_data_get_ptr(uint8_t *table_data, unsigned table_size, 507 unsigned off, unsigned size) 508 { 509 assert(off + size > off); 510 assert(off + size <= table_size); 511 return table_data + off; 512 } 513 514 static inline void acpi_add_table(GArray *table_offsets, GArray *table_data) 515 { 516 uint32_t offset = cpu_to_le32(table_data->len); 517 g_array_append_val(table_offsets, offset); 518 } 519 520 /* FACS */ 521 static void 522 build_facs(GArray *table_data, GArray *linker, PcGuestInfo *guest_info) 523 { 524 AcpiFacsDescriptorRev1 *facs = acpi_data_push(table_data, sizeof *facs); 525 memcpy(&facs->signature, "FACS", 4); 526 facs->length = cpu_to_le32(sizeof(*facs)); 527 } 528 529 /* Load chipset information in FADT */ 530 static void fadt_setup(AcpiFadtDescriptorRev1 *fadt, AcpiPmInfo *pm) 531 { 532 fadt->model = 1; 533 fadt->reserved1 = 0; 534 fadt->sci_int = cpu_to_le16(pm->sci_int); 535 fadt->smi_cmd = cpu_to_le32(ACPI_PORT_SMI_CMD); 536 fadt->acpi_enable = pm->acpi_enable_cmd; 537 fadt->acpi_disable = pm->acpi_disable_cmd; 538 /* EVT, CNT, TMR offset matches hw/acpi/core.c */ 539 fadt->pm1a_evt_blk = cpu_to_le32(pm->io_base); 540 fadt->pm1a_cnt_blk = cpu_to_le32(pm->io_base + 0x04); 541 fadt->pm_tmr_blk = cpu_to_le32(pm->io_base + 0x08); 542 fadt->gpe0_blk = cpu_to_le32(pm->gpe0_blk); 543 /* EVT, CNT, TMR length matches hw/acpi/core.c */ 544 fadt->pm1_evt_len = 4; 545 fadt->pm1_cnt_len = 2; 546 fadt->pm_tmr_len = 4; 547 fadt->gpe0_blk_len = pm->gpe0_blk_len; 548 fadt->plvl2_lat = cpu_to_le16(0xfff); /* C2 state not supported */ 549 fadt->plvl3_lat = cpu_to_le16(0xfff); /* C3 state not supported */ 550 fadt->flags = cpu_to_le32((1 << ACPI_FADT_F_WBINVD) | 551 (1 << ACPI_FADT_F_PROC_C1) | 552 (1 << ACPI_FADT_F_SLP_BUTTON) | 553 (1 << ACPI_FADT_F_RTC_S4)); 554 fadt->flags |= cpu_to_le32(1 << ACPI_FADT_F_USE_PLATFORM_CLOCK); 555 /* APIC destination mode ("Flat Logical") has an upper limit of 8 CPUs 556 * For more than 8 CPUs, "Clustered Logical" mode has to be used 557 */ 558 if (max_cpus > 8) { 559 fadt->flags |= cpu_to_le32(1 << ACPI_FADT_F_FORCE_APIC_CLUSTER_MODEL); 560 } 561 } 562 563 564 /* FADT */ 565 static void 566 build_fadt(GArray *table_data, GArray *linker, AcpiPmInfo *pm, 567 unsigned facs, unsigned dsdt) 568 { 569 AcpiFadtDescriptorRev1 *fadt = acpi_data_push(table_data, sizeof(*fadt)); 570 571 fadt->firmware_ctrl = cpu_to_le32(facs); 572 /* FACS address to be filled by Guest linker */ 573 bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, 574 ACPI_BUILD_TABLE_FILE, 575 table_data, &fadt->firmware_ctrl, 576 sizeof fadt->firmware_ctrl); 577 578 fadt->dsdt = cpu_to_le32(dsdt); 579 /* DSDT address to be filled by Guest linker */ 580 bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, 581 ACPI_BUILD_TABLE_FILE, 582 table_data, &fadt->dsdt, 583 sizeof fadt->dsdt); 584 585 fadt_setup(fadt, pm); 586 587 build_header(linker, table_data, 588 (void *)fadt, "FACP", sizeof(*fadt), 1); 589 } 590 591 static void 592 build_madt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu, 593 PcGuestInfo *guest_info) 594 { 595 int madt_start = table_data->len; 596 597 AcpiMultipleApicTable *madt; 598 AcpiMadtIoApic *io_apic; 599 AcpiMadtIntsrcovr *intsrcovr; 600 AcpiMadtLocalNmi *local_nmi; 601 int i; 602 603 madt = acpi_data_push(table_data, sizeof *madt); 604 madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS); 605 madt->flags = cpu_to_le32(1); 606 607 for (i = 0; i < guest_info->apic_id_limit; i++) { 608 AcpiMadtProcessorApic *apic = acpi_data_push(table_data, sizeof *apic); 609 apic->type = ACPI_APIC_PROCESSOR; 610 apic->length = sizeof(*apic); 611 apic->processor_id = i; 612 apic->local_apic_id = i; 613 if (test_bit(i, cpu->found_cpus)) { 614 apic->flags = cpu_to_le32(1); 615 } else { 616 apic->flags = cpu_to_le32(0); 617 } 618 } 619 io_apic = acpi_data_push(table_data, sizeof *io_apic); 620 io_apic->type = ACPI_APIC_IO; 621 io_apic->length = sizeof(*io_apic); 622 #define ACPI_BUILD_IOAPIC_ID 0x0 623 io_apic->io_apic_id = ACPI_BUILD_IOAPIC_ID; 624 io_apic->address = cpu_to_le32(IO_APIC_DEFAULT_ADDRESS); 625 io_apic->interrupt = cpu_to_le32(0); 626 627 if (guest_info->apic_xrupt_override) { 628 intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); 629 intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; 630 intsrcovr->length = sizeof(*intsrcovr); 631 intsrcovr->source = 0; 632 intsrcovr->gsi = cpu_to_le32(2); 633 intsrcovr->flags = cpu_to_le16(0); /* conforms to bus specifications */ 634 } 635 for (i = 1; i < 16; i++) { 636 #define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11)) 637 if (!(ACPI_BUILD_PCI_IRQS & (1 << i))) { 638 /* No need for a INT source override structure. */ 639 continue; 640 } 641 intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); 642 intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; 643 intsrcovr->length = sizeof(*intsrcovr); 644 intsrcovr->source = i; 645 intsrcovr->gsi = cpu_to_le32(i); 646 intsrcovr->flags = cpu_to_le16(0xd); /* active high, level triggered */ 647 } 648 649 local_nmi = acpi_data_push(table_data, sizeof *local_nmi); 650 local_nmi->type = ACPI_APIC_LOCAL_NMI; 651 local_nmi->length = sizeof(*local_nmi); 652 local_nmi->processor_id = 0xff; /* all processors */ 653 local_nmi->flags = cpu_to_le16(0); 654 local_nmi->lint = 1; /* ACPI_LINT1 */ 655 656 build_header(linker, table_data, 657 (void *)(table_data->data + madt_start), "APIC", 658 table_data->len - madt_start, 1); 659 } 660 661 /* Encode a hex value */ 662 static inline char acpi_get_hex(uint32_t val) 663 { 664 val &= 0x0f; 665 return (val <= 9) ? ('0' + val) : ('A' + val - 10); 666 } 667 668 #include "hw/i386/ssdt-proc.hex" 669 670 /* 0x5B 0x83 ProcessorOp PkgLength NameString ProcID */ 671 #define ACPI_PROC_OFFSET_CPUHEX (*ssdt_proc_name - *ssdt_proc_start + 2) 672 #define ACPI_PROC_OFFSET_CPUID1 (*ssdt_proc_name - *ssdt_proc_start + 4) 673 #define ACPI_PROC_OFFSET_CPUID2 (*ssdt_proc_id - *ssdt_proc_start) 674 #define ACPI_PROC_SIZEOF (*ssdt_proc_end - *ssdt_proc_start) 675 #define ACPI_PROC_AML (ssdp_proc_aml + *ssdt_proc_start) 676 677 /* 0x5B 0x82 DeviceOp PkgLength NameString */ 678 #define ACPI_PCIHP_OFFSET_HEX (*ssdt_pcihp_name - *ssdt_pcihp_start + 1) 679 #define ACPI_PCIHP_OFFSET_ID (*ssdt_pcihp_id - *ssdt_pcihp_start) 680 #define ACPI_PCIHP_OFFSET_ADR (*ssdt_pcihp_adr - *ssdt_pcihp_start) 681 #define ACPI_PCIHP_OFFSET_EJ0 (*ssdt_pcihp_ej0 - *ssdt_pcihp_start) 682 #define ACPI_PCIHP_SIZEOF (*ssdt_pcihp_end - *ssdt_pcihp_start) 683 #define ACPI_PCIHP_AML (ssdp_pcihp_aml + *ssdt_pcihp_start) 684 685 #define ACPI_PCINOHP_OFFSET_HEX (*ssdt_pcinohp_name - *ssdt_pcinohp_start + 1) 686 #define ACPI_PCINOHP_OFFSET_ADR (*ssdt_pcinohp_adr - *ssdt_pcinohp_start) 687 #define ACPI_PCINOHP_SIZEOF (*ssdt_pcinohp_end - *ssdt_pcinohp_start) 688 #define ACPI_PCINOHP_AML (ssdp_pcihp_aml + *ssdt_pcinohp_start) 689 690 #define ACPI_PCIVGA_OFFSET_HEX (*ssdt_pcivga_name - *ssdt_pcivga_start + 1) 691 #define ACPI_PCIVGA_OFFSET_ADR (*ssdt_pcivga_adr - *ssdt_pcivga_start) 692 #define ACPI_PCIVGA_SIZEOF (*ssdt_pcivga_end - *ssdt_pcivga_start) 693 #define ACPI_PCIVGA_AML (ssdp_pcihp_aml + *ssdt_pcivga_start) 694 695 #define ACPI_PCIQXL_OFFSET_HEX (*ssdt_pciqxl_name - *ssdt_pciqxl_start + 1) 696 #define ACPI_PCIQXL_OFFSET_ADR (*ssdt_pciqxl_adr - *ssdt_pciqxl_start) 697 #define ACPI_PCIQXL_SIZEOF (*ssdt_pciqxl_end - *ssdt_pciqxl_start) 698 #define ACPI_PCIQXL_AML (ssdp_pcihp_aml + *ssdt_pciqxl_start) 699 700 #include "hw/i386/ssdt-mem.hex" 701 702 /* 0x5B 0x82 DeviceOp PkgLength NameString DimmID */ 703 #define ACPI_MEM_OFFSET_HEX (*ssdt_mem_name - *ssdt_mem_start + 2) 704 #define ACPI_MEM_OFFSET_ID (*ssdt_mem_id - *ssdt_mem_start + 7) 705 #define ACPI_MEM_SIZEOF (*ssdt_mem_end - *ssdt_mem_start) 706 #define ACPI_MEM_AML (ssdm_mem_aml + *ssdt_mem_start) 707 708 #define ACPI_SSDT_SIGNATURE 0x54445353 /* SSDT */ 709 #define ACPI_SSDT_HEADER_LENGTH 36 710 711 #include "hw/i386/ssdt-misc.hex" 712 #include "hw/i386/ssdt-pcihp.hex" 713 #include "hw/i386/ssdt-tpm.hex" 714 715 static void 716 build_append_notify_method(GArray *device, const char *name, 717 const char *format, int count) 718 { 719 int i; 720 GArray *method = build_alloc_method(name, 2); 721 722 for (i = 0; i < count; i++) { 723 GArray *target = build_alloc_array(); 724 build_append_nameseg(target, format, i); 725 assert(i < 256); /* Fits in 1 byte */ 726 build_append_notify_target_ifequal(method, target, i, 1); 727 build_free_array(target); 728 } 729 730 build_append_and_cleanup_method(device, method); 731 } 732 733 static void patch_pcihp(int slot, uint8_t *ssdt_ptr) 734 { 735 unsigned devfn = PCI_DEVFN(slot, 0); 736 737 ssdt_ptr[ACPI_PCIHP_OFFSET_HEX] = acpi_get_hex(devfn >> 4); 738 ssdt_ptr[ACPI_PCIHP_OFFSET_HEX + 1] = acpi_get_hex(devfn); 739 ssdt_ptr[ACPI_PCIHP_OFFSET_ID] = slot; 740 ssdt_ptr[ACPI_PCIHP_OFFSET_ADR + 2] = slot; 741 } 742 743 static void patch_pcinohp(int slot, uint8_t *ssdt_ptr) 744 { 745 unsigned devfn = PCI_DEVFN(slot, 0); 746 747 ssdt_ptr[ACPI_PCINOHP_OFFSET_HEX] = acpi_get_hex(devfn >> 4); 748 ssdt_ptr[ACPI_PCINOHP_OFFSET_HEX + 1] = acpi_get_hex(devfn); 749 ssdt_ptr[ACPI_PCINOHP_OFFSET_ADR + 2] = slot; 750 } 751 752 static void patch_pcivga(int slot, uint8_t *ssdt_ptr) 753 { 754 unsigned devfn = PCI_DEVFN(slot, 0); 755 756 ssdt_ptr[ACPI_PCIVGA_OFFSET_HEX] = acpi_get_hex(devfn >> 4); 757 ssdt_ptr[ACPI_PCIVGA_OFFSET_HEX + 1] = acpi_get_hex(devfn); 758 ssdt_ptr[ACPI_PCIVGA_OFFSET_ADR + 2] = slot; 759 } 760 761 static void patch_pciqxl(int slot, uint8_t *ssdt_ptr) 762 { 763 unsigned devfn = PCI_DEVFN(slot, 0); 764 765 ssdt_ptr[ACPI_PCIQXL_OFFSET_HEX] = acpi_get_hex(devfn >> 4); 766 ssdt_ptr[ACPI_PCIQXL_OFFSET_HEX + 1] = acpi_get_hex(devfn); 767 ssdt_ptr[ACPI_PCIQXL_OFFSET_ADR + 2] = slot; 768 } 769 770 /* Assign BSEL property to all buses. In the future, this can be changed 771 * to only assign to buses that support hotplug. 772 */ 773 static void *acpi_set_bsel(PCIBus *bus, void *opaque) 774 { 775 unsigned *bsel_alloc = opaque; 776 unsigned *bus_bsel; 777 778 if (qbus_is_hotpluggable(BUS(bus))) { 779 bus_bsel = g_malloc(sizeof *bus_bsel); 780 781 *bus_bsel = (*bsel_alloc)++; 782 object_property_add_uint32_ptr(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, 783 bus_bsel, NULL); 784 } 785 786 return bsel_alloc; 787 } 788 789 static void acpi_set_pci_info(void) 790 { 791 PCIBus *bus = find_i440fx(); /* TODO: Q35 support */ 792 unsigned bsel_alloc = 0; 793 794 if (bus) { 795 /* Scan all PCI buses. Set property to enable acpi based hotplug. */ 796 pci_for_each_bus_depth_first(bus, acpi_set_bsel, NULL, &bsel_alloc); 797 } 798 } 799 800 static void build_pci_bus_state_init(AcpiBuildPciBusHotplugState *state, 801 AcpiBuildPciBusHotplugState *parent, 802 bool pcihp_bridge_en) 803 { 804 state->parent = parent; 805 state->device_table = build_alloc_array(); 806 state->notify_table = build_alloc_array(); 807 state->pcihp_bridge_en = pcihp_bridge_en; 808 } 809 810 static void build_pci_bus_state_cleanup(AcpiBuildPciBusHotplugState *state) 811 { 812 build_free_array(state->device_table); 813 build_free_array(state->notify_table); 814 } 815 816 static void *build_pci_bus_begin(PCIBus *bus, void *parent_state) 817 { 818 AcpiBuildPciBusHotplugState *parent = parent_state; 819 AcpiBuildPciBusHotplugState *child = g_malloc(sizeof *child); 820 821 build_pci_bus_state_init(child, parent, parent->pcihp_bridge_en); 822 823 return child; 824 } 825 826 static void build_pci_bus_end(PCIBus *bus, void *bus_state) 827 { 828 AcpiBuildPciBusHotplugState *child = bus_state; 829 AcpiBuildPciBusHotplugState *parent = child->parent; 830 GArray *bus_table = build_alloc_array(); 831 DECLARE_BITMAP(slot_hotplug_enable, PCI_SLOT_MAX); 832 DECLARE_BITMAP(slot_device_present, PCI_SLOT_MAX); 833 DECLARE_BITMAP(slot_device_system, PCI_SLOT_MAX); 834 DECLARE_BITMAP(slot_device_vga, PCI_SLOT_MAX); 835 DECLARE_BITMAP(slot_device_qxl, PCI_SLOT_MAX); 836 uint8_t op; 837 int i; 838 QObject *bsel; 839 GArray *method; 840 bool bus_hotplug_support = false; 841 842 /* 843 * Skip bridge subtree creation if bridge hotplug is disabled 844 * to make acpi tables compatible with legacy machine types. 845 */ 846 if (!child->pcihp_bridge_en && bus->parent_dev) { 847 return; 848 } 849 850 if (bus->parent_dev) { 851 op = 0x82; /* DeviceOp */ 852 build_append_nameseg(bus_table, "S%.02X_", 853 bus->parent_dev->devfn); 854 build_append_byte(bus_table, 0x08); /* NameOp */ 855 build_append_nameseg(bus_table, "_SUN"); 856 build_append_value(bus_table, PCI_SLOT(bus->parent_dev->devfn), 1); 857 build_append_byte(bus_table, 0x08); /* NameOp */ 858 build_append_nameseg(bus_table, "_ADR"); 859 build_append_value(bus_table, (PCI_SLOT(bus->parent_dev->devfn) << 16) | 860 PCI_FUNC(bus->parent_dev->devfn), 4); 861 } else { 862 op = 0x10; /* ScopeOp */; 863 build_append_nameseg(bus_table, "PCI0"); 864 } 865 866 bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL); 867 if (bsel) { 868 build_append_byte(bus_table, 0x08); /* NameOp */ 869 build_append_nameseg(bus_table, "BSEL"); 870 build_append_int(bus_table, qint_get_int(qobject_to_qint(bsel))); 871 memset(slot_hotplug_enable, 0xff, sizeof slot_hotplug_enable); 872 } else { 873 /* No bsel - no slots are hot-pluggable */ 874 memset(slot_hotplug_enable, 0x00, sizeof slot_hotplug_enable); 875 } 876 877 memset(slot_device_present, 0x00, sizeof slot_device_present); 878 memset(slot_device_system, 0x00, sizeof slot_device_present); 879 memset(slot_device_vga, 0x00, sizeof slot_device_vga); 880 memset(slot_device_qxl, 0x00, sizeof slot_device_qxl); 881 882 for (i = 0; i < ARRAY_SIZE(bus->devices); i += PCI_FUNC_MAX) { 883 DeviceClass *dc; 884 PCIDeviceClass *pc; 885 PCIDevice *pdev = bus->devices[i]; 886 int slot = PCI_SLOT(i); 887 bool bridge_in_acpi; 888 889 if (!pdev) { 890 continue; 891 } 892 893 set_bit(slot, slot_device_present); 894 pc = PCI_DEVICE_GET_CLASS(pdev); 895 dc = DEVICE_GET_CLASS(pdev); 896 897 /* When hotplug for bridges is enabled, bridges are 898 * described in ACPI separately (see build_pci_bus_end). 899 * In this case they aren't themselves hot-pluggable. 900 */ 901 bridge_in_acpi = pc->is_bridge && child->pcihp_bridge_en; 902 903 if (pc->class_id == PCI_CLASS_BRIDGE_ISA || bridge_in_acpi) { 904 set_bit(slot, slot_device_system); 905 } 906 907 if (pc->class_id == PCI_CLASS_DISPLAY_VGA) { 908 set_bit(slot, slot_device_vga); 909 910 if (object_dynamic_cast(OBJECT(pdev), "qxl-vga")) { 911 set_bit(slot, slot_device_qxl); 912 } 913 } 914 915 if (!dc->hotpluggable || bridge_in_acpi) { 916 clear_bit(slot, slot_hotplug_enable); 917 } 918 } 919 920 /* Append Device object for each slot */ 921 for (i = 0; i < PCI_SLOT_MAX; i++) { 922 bool can_eject = test_bit(i, slot_hotplug_enable); 923 bool present = test_bit(i, slot_device_present); 924 bool vga = test_bit(i, slot_device_vga); 925 bool qxl = test_bit(i, slot_device_qxl); 926 bool system = test_bit(i, slot_device_system); 927 if (can_eject) { 928 void *pcihp = acpi_data_push(bus_table, 929 ACPI_PCIHP_SIZEOF); 930 memcpy(pcihp, ACPI_PCIHP_AML, ACPI_PCIHP_SIZEOF); 931 patch_pcihp(i, pcihp); 932 bus_hotplug_support = true; 933 } else if (qxl) { 934 void *pcihp = acpi_data_push(bus_table, 935 ACPI_PCIQXL_SIZEOF); 936 memcpy(pcihp, ACPI_PCIQXL_AML, ACPI_PCIQXL_SIZEOF); 937 patch_pciqxl(i, pcihp); 938 } else if (vga) { 939 void *pcihp = acpi_data_push(bus_table, 940 ACPI_PCIVGA_SIZEOF); 941 memcpy(pcihp, ACPI_PCIVGA_AML, ACPI_PCIVGA_SIZEOF); 942 patch_pcivga(i, pcihp); 943 } else if (system) { 944 /* Nothing to do: system devices are in DSDT or in SSDT above. */ 945 } else if (present) { 946 void *pcihp = acpi_data_push(bus_table, 947 ACPI_PCINOHP_SIZEOF); 948 memcpy(pcihp, ACPI_PCINOHP_AML, ACPI_PCINOHP_SIZEOF); 949 patch_pcinohp(i, pcihp); 950 } 951 } 952 953 if (bsel) { 954 method = build_alloc_method("DVNT", 2); 955 956 for (i = 0; i < PCI_SLOT_MAX; i++) { 957 GArray *notify; 958 uint8_t op; 959 960 if (!test_bit(i, slot_hotplug_enable)) { 961 continue; 962 } 963 964 notify = build_alloc_array(); 965 op = 0xA0; /* IfOp */ 966 967 build_append_byte(notify, 0x7B); /* AndOp */ 968 build_append_byte(notify, 0x68); /* Arg0Op */ 969 build_append_int(notify, 0x1U << i); 970 build_append_byte(notify, 0x00); /* NullName */ 971 build_append_byte(notify, 0x86); /* NotifyOp */ 972 build_append_nameseg(notify, "S%.02X_", PCI_DEVFN(i, 0)); 973 build_append_byte(notify, 0x69); /* Arg1Op */ 974 975 /* Pack it up */ 976 build_package(notify, op, 0); 977 978 build_append_array(method, notify); 979 980 build_free_array(notify); 981 } 982 983 build_append_and_cleanup_method(bus_table, method); 984 } 985 986 /* Append PCNT method to notify about events on local and child buses. 987 * Add unconditionally for root since DSDT expects it. 988 */ 989 if (bus_hotplug_support || child->notify_table->len || !bus->parent_dev) { 990 method = build_alloc_method("PCNT", 0); 991 992 /* If bus supports hotplug select it and notify about local events */ 993 if (bsel) { 994 build_append_byte(method, 0x70); /* StoreOp */ 995 build_append_int(method, qint_get_int(qobject_to_qint(bsel))); 996 build_append_nameseg(method, "BNUM"); 997 build_append_nameseg(method, "DVNT"); 998 build_append_nameseg(method, "PCIU"); 999 build_append_int(method, 1); /* Device Check */ 1000 build_append_nameseg(method, "DVNT"); 1001 build_append_nameseg(method, "PCID"); 1002 build_append_int(method, 3); /* Eject Request */ 1003 } 1004 1005 /* Notify about child bus events in any case */ 1006 build_append_array(method, child->notify_table); 1007 1008 build_append_and_cleanup_method(bus_table, method); 1009 1010 /* Append description of child buses */ 1011 build_append_array(bus_table, child->device_table); 1012 1013 /* Pack it up */ 1014 if (bus->parent_dev) { 1015 build_extop_package(bus_table, op); 1016 } else { 1017 build_package(bus_table, op, 0); 1018 } 1019 1020 /* Append our bus description to parent table */ 1021 build_append_array(parent->device_table, bus_table); 1022 1023 /* Also tell parent how to notify us, invoking PCNT method. 1024 * At the moment this is not needed for root as we have a single root. 1025 */ 1026 if (bus->parent_dev) { 1027 build_append_byte(parent->notify_table, '^'); /* ParentPrefixChar */ 1028 build_append_byte(parent->notify_table, 0x2E); /* DualNamePrefix */ 1029 build_append_nameseg(parent->notify_table, "S%.02X_", 1030 bus->parent_dev->devfn); 1031 build_append_nameseg(parent->notify_table, "PCNT"); 1032 } 1033 } 1034 1035 qobject_decref(bsel); 1036 build_free_array(bus_table); 1037 build_pci_bus_state_cleanup(child); 1038 g_free(child); 1039 } 1040 1041 static void patch_pci_windows(PcPciInfo *pci, uint8_t *start, unsigned size) 1042 { 1043 ACPI_BUILD_SET_LE(start, size, acpi_pci32_start[0], 32, pci->w32.begin); 1044 1045 ACPI_BUILD_SET_LE(start, size, acpi_pci32_end[0], 32, pci->w32.end - 1); 1046 1047 if (pci->w64.end || pci->w64.begin) { 1048 ACPI_BUILD_SET_LE(start, size, acpi_pci64_valid[0], 8, 1); 1049 ACPI_BUILD_SET_LE(start, size, acpi_pci64_start[0], 64, pci->w64.begin); 1050 ACPI_BUILD_SET_LE(start, size, acpi_pci64_end[0], 64, pci->w64.end - 1); 1051 ACPI_BUILD_SET_LE(start, size, acpi_pci64_length[0], 64, pci->w64.end - pci->w64.begin); 1052 } else { 1053 ACPI_BUILD_SET_LE(start, size, acpi_pci64_valid[0], 8, 0); 1054 } 1055 } 1056 1057 static void 1058 build_ssdt(GArray *table_data, GArray *linker, 1059 AcpiCpuInfo *cpu, AcpiPmInfo *pm, AcpiMiscInfo *misc, 1060 PcPciInfo *pci, PcGuestInfo *guest_info) 1061 { 1062 MachineState *machine = MACHINE(qdev_get_machine()); 1063 uint32_t nr_mem = machine->ram_slots; 1064 unsigned acpi_cpus = guest_info->apic_id_limit; 1065 int ssdt_start = table_data->len; 1066 uint8_t *ssdt_ptr; 1067 int i; 1068 1069 /* The current AML generator can cover the APIC ID range [0..255], 1070 * inclusive, for VCPU hotplug. */ 1071 QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256); 1072 g_assert(acpi_cpus <= ACPI_CPU_HOTPLUG_ID_LIMIT); 1073 1074 /* Copy header and patch values in the S3_ / S4_ / S5_ packages */ 1075 ssdt_ptr = acpi_data_push(table_data, sizeof(ssdp_misc_aml)); 1076 memcpy(ssdt_ptr, ssdp_misc_aml, sizeof(ssdp_misc_aml)); 1077 if (pm->s3_disabled) { 1078 ssdt_ptr[acpi_s3_name[0]] = 'X'; 1079 } 1080 if (pm->s4_disabled) { 1081 ssdt_ptr[acpi_s4_name[0]] = 'X'; 1082 } else { 1083 ssdt_ptr[acpi_s4_pkg[0] + 1] = ssdt_ptr[acpi_s4_pkg[0] + 3] = 1084 pm->s4_val; 1085 } 1086 1087 patch_pci_windows(pci, ssdt_ptr, sizeof(ssdp_misc_aml)); 1088 1089 ACPI_BUILD_SET_LE(ssdt_ptr, sizeof(ssdp_misc_aml), 1090 ssdt_isa_pest[0], 16, misc->pvpanic_port); 1091 1092 ACPI_BUILD_SET_LE(ssdt_ptr, sizeof(ssdp_misc_aml), 1093 ssdt_mctrl_nr_slots[0], 32, nr_mem); 1094 1095 { 1096 GArray *sb_scope = build_alloc_array(); 1097 uint8_t op = 0x10; /* ScopeOp */ 1098 1099 build_append_nameseg(sb_scope, "_SB_"); 1100 1101 /* build Processor object for each processor */ 1102 for (i = 0; i < acpi_cpus; i++) { 1103 uint8_t *proc = acpi_data_push(sb_scope, ACPI_PROC_SIZEOF); 1104 memcpy(proc, ACPI_PROC_AML, ACPI_PROC_SIZEOF); 1105 proc[ACPI_PROC_OFFSET_CPUHEX] = acpi_get_hex(i >> 4); 1106 proc[ACPI_PROC_OFFSET_CPUHEX+1] = acpi_get_hex(i); 1107 proc[ACPI_PROC_OFFSET_CPUID1] = i; 1108 proc[ACPI_PROC_OFFSET_CPUID2] = i; 1109 } 1110 1111 /* build this code: 1112 * Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...} 1113 */ 1114 /* Arg0 = Processor ID = APIC ID */ 1115 build_append_notify_method(sb_scope, "NTFY", "CP%0.02X", acpi_cpus); 1116 1117 /* build "Name(CPON, Package() { One, One, ..., Zero, Zero, ... })" */ 1118 build_append_byte(sb_scope, 0x08); /* NameOp */ 1119 build_append_nameseg(sb_scope, "CPON"); 1120 1121 { 1122 GArray *package = build_alloc_array(); 1123 uint8_t op; 1124 1125 /* 1126 * Note: The ability to create variable-sized packages was first introduced in ACPI 2.0. ACPI 1.0 only 1127 * allowed fixed-size packages with up to 255 elements. 1128 * Windows guests up to win2k8 fail when VarPackageOp is used. 1129 */ 1130 if (acpi_cpus <= 255) { 1131 op = 0x12; /* PackageOp */ 1132 build_append_byte(package, acpi_cpus); /* NumElements */ 1133 } else { 1134 op = 0x13; /* VarPackageOp */ 1135 build_append_int(package, acpi_cpus); /* VarNumElements */ 1136 } 1137 1138 for (i = 0; i < acpi_cpus; i++) { 1139 uint8_t b = test_bit(i, cpu->found_cpus) ? 0x01 : 0x00; 1140 build_append_byte(package, b); 1141 } 1142 1143 build_package(package, op, 2); 1144 build_append_array(sb_scope, package); 1145 build_free_array(package); 1146 } 1147 1148 if (nr_mem) { 1149 assert(nr_mem <= ACPI_MAX_RAM_SLOTS); 1150 /* build memory devices */ 1151 for (i = 0; i < nr_mem; i++) { 1152 char id[3]; 1153 uint8_t *mem = acpi_data_push(sb_scope, ACPI_MEM_SIZEOF); 1154 1155 snprintf(id, sizeof(id), "%02X", i); 1156 memcpy(mem, ACPI_MEM_AML, ACPI_MEM_SIZEOF); 1157 memcpy(mem + ACPI_MEM_OFFSET_HEX, id, 2); 1158 memcpy(mem + ACPI_MEM_OFFSET_ID, id, 2); 1159 } 1160 1161 /* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) { 1162 * If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ... 1163 */ 1164 build_append_notify_method(sb_scope, 1165 stringify(MEMORY_SLOT_NOTIFY_METHOD), 1166 "MP%0.02X", nr_mem); 1167 } 1168 1169 { 1170 AcpiBuildPciBusHotplugState hotplug_state; 1171 Object *pci_host; 1172 PCIBus *bus = NULL; 1173 bool ambiguous; 1174 1175 pci_host = object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE, &ambiguous); 1176 if (!ambiguous && pci_host) { 1177 bus = PCI_HOST_BRIDGE(pci_host)->bus; 1178 } 1179 1180 build_pci_bus_state_init(&hotplug_state, NULL, pm->pcihp_bridge_en); 1181 1182 if (bus) { 1183 /* Scan all PCI buses. Generate tables to support hotplug. */ 1184 pci_for_each_bus_depth_first(bus, build_pci_bus_begin, 1185 build_pci_bus_end, &hotplug_state); 1186 } 1187 1188 build_append_array(sb_scope, hotplug_state.device_table); 1189 build_pci_bus_state_cleanup(&hotplug_state); 1190 } 1191 1192 build_package(sb_scope, op, 3); 1193 build_append_array(table_data, sb_scope); 1194 build_free_array(sb_scope); 1195 } 1196 1197 build_header(linker, table_data, 1198 (void *)(table_data->data + ssdt_start), 1199 "SSDT", table_data->len - ssdt_start, 1); 1200 } 1201 1202 static void 1203 build_hpet(GArray *table_data, GArray *linker) 1204 { 1205 Acpi20Hpet *hpet; 1206 1207 hpet = acpi_data_push(table_data, sizeof(*hpet)); 1208 /* Note timer_block_id value must be kept in sync with value advertised by 1209 * emulated hpet 1210 */ 1211 hpet->timer_block_id = cpu_to_le32(0x8086a201); 1212 hpet->addr.address = cpu_to_le64(HPET_BASE); 1213 build_header(linker, table_data, 1214 (void *)hpet, "HPET", sizeof(*hpet), 1); 1215 } 1216 1217 static void 1218 build_tpm_tcpa(GArray *table_data, GArray *linker, GArray *tcpalog) 1219 { 1220 Acpi20Tcpa *tcpa = acpi_data_push(table_data, sizeof *tcpa); 1221 uint64_t log_area_start_address = acpi_data_len(tcpalog); 1222 1223 tcpa->platform_class = cpu_to_le16(TPM_TCPA_ACPI_CLASS_CLIENT); 1224 tcpa->log_area_minimum_length = cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE); 1225 tcpa->log_area_start_address = cpu_to_le64(log_area_start_address); 1226 1227 bios_linker_loader_alloc(linker, ACPI_BUILD_TPMLOG_FILE, 1, 1228 false /* high memory */); 1229 1230 /* log area start address to be filled by Guest linker */ 1231 bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, 1232 ACPI_BUILD_TPMLOG_FILE, 1233 table_data, &tcpa->log_area_start_address, 1234 sizeof(tcpa->log_area_start_address)); 1235 1236 build_header(linker, table_data, 1237 (void *)tcpa, "TCPA", sizeof(*tcpa), 2); 1238 1239 acpi_data_push(tcpalog, TPM_LOG_AREA_MINIMUM_SIZE); 1240 } 1241 1242 static void 1243 build_tpm_ssdt(GArray *table_data, GArray *linker) 1244 { 1245 void *tpm_ptr; 1246 1247 tpm_ptr = acpi_data_push(table_data, sizeof(ssdt_tpm_aml)); 1248 memcpy(tpm_ptr, ssdt_tpm_aml, sizeof(ssdt_tpm_aml)); 1249 } 1250 1251 typedef enum { 1252 MEM_AFFINITY_NOFLAGS = 0, 1253 MEM_AFFINITY_ENABLED = (1 << 0), 1254 MEM_AFFINITY_HOTPLUGGABLE = (1 << 1), 1255 MEM_AFFINITY_NON_VOLATILE = (1 << 2), 1256 } MemoryAffinityFlags; 1257 1258 static void 1259 acpi_build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base, 1260 uint64_t len, int node, MemoryAffinityFlags flags) 1261 { 1262 numamem->type = ACPI_SRAT_MEMORY; 1263 numamem->length = sizeof(*numamem); 1264 memset(numamem->proximity, 0, 4); 1265 numamem->proximity[0] = node; 1266 numamem->flags = cpu_to_le32(flags); 1267 numamem->base_addr = cpu_to_le64(base); 1268 numamem->range_length = cpu_to_le64(len); 1269 } 1270 1271 static void 1272 build_srat(GArray *table_data, GArray *linker, 1273 AcpiCpuInfo *cpu, PcGuestInfo *guest_info) 1274 { 1275 AcpiSystemResourceAffinityTable *srat; 1276 AcpiSratProcessorAffinity *core; 1277 AcpiSratMemoryAffinity *numamem; 1278 1279 int i; 1280 uint64_t curnode; 1281 int srat_start, numa_start, slots; 1282 uint64_t mem_len, mem_base, next_base; 1283 PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); 1284 ram_addr_t hotplugabble_address_space_size = 1285 object_property_get_int(OBJECT(pcms), PC_MACHINE_MEMHP_REGION_SIZE, 1286 NULL); 1287 1288 srat_start = table_data->len; 1289 1290 srat = acpi_data_push(table_data, sizeof *srat); 1291 srat->reserved1 = cpu_to_le32(1); 1292 core = (void *)(srat + 1); 1293 1294 for (i = 0; i < guest_info->apic_id_limit; ++i) { 1295 core = acpi_data_push(table_data, sizeof *core); 1296 core->type = ACPI_SRAT_PROCESSOR; 1297 core->length = sizeof(*core); 1298 core->local_apic_id = i; 1299 curnode = guest_info->node_cpu[i]; 1300 core->proximity_lo = curnode; 1301 memset(core->proximity_hi, 0, 3); 1302 core->local_sapic_eid = 0; 1303 if (test_bit(i, cpu->found_cpus)) { 1304 core->flags = cpu_to_le32(1); 1305 } else { 1306 core->flags = cpu_to_le32(0); 1307 } 1308 } 1309 1310 1311 /* the memory map is a bit tricky, it contains at least one hole 1312 * from 640k-1M and possibly another one from 3.5G-4G. 1313 */ 1314 next_base = 0; 1315 numa_start = table_data->len; 1316 1317 numamem = acpi_data_push(table_data, sizeof *numamem); 1318 acpi_build_srat_memory(numamem, 0, 640*1024, 0, MEM_AFFINITY_ENABLED); 1319 next_base = 1024 * 1024; 1320 for (i = 1; i < guest_info->numa_nodes + 1; ++i) { 1321 mem_base = next_base; 1322 mem_len = guest_info->node_mem[i - 1]; 1323 if (i == 1) { 1324 mem_len -= 1024 * 1024; 1325 } 1326 next_base = mem_base + mem_len; 1327 1328 /* Cut out the ACPI_PCI hole */ 1329 if (mem_base <= guest_info->ram_size_below_4g && 1330 next_base > guest_info->ram_size_below_4g) { 1331 mem_len -= next_base - guest_info->ram_size_below_4g; 1332 if (mem_len > 0) { 1333 numamem = acpi_data_push(table_data, sizeof *numamem); 1334 acpi_build_srat_memory(numamem, mem_base, mem_len, i - 1, 1335 MEM_AFFINITY_ENABLED); 1336 } 1337 mem_base = 1ULL << 32; 1338 mem_len = next_base - guest_info->ram_size_below_4g; 1339 next_base += (1ULL << 32) - guest_info->ram_size_below_4g; 1340 } 1341 numamem = acpi_data_push(table_data, sizeof *numamem); 1342 acpi_build_srat_memory(numamem, mem_base, mem_len, i - 1, 1343 MEM_AFFINITY_ENABLED); 1344 } 1345 slots = (table_data->len - numa_start) / sizeof *numamem; 1346 for (; slots < guest_info->numa_nodes + 2; slots++) { 1347 numamem = acpi_data_push(table_data, sizeof *numamem); 1348 acpi_build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS); 1349 } 1350 1351 /* 1352 * Entry is required for Windows to enable memory hotplug in OS. 1353 * Memory devices may override proximity set by this entry, 1354 * providing _PXM method if necessary. 1355 */ 1356 if (hotplugabble_address_space_size) { 1357 numamem = acpi_data_push(table_data, sizeof *numamem); 1358 acpi_build_srat_memory(numamem, pcms->hotplug_memory_base, 1359 hotplugabble_address_space_size, 0, 1360 MEM_AFFINITY_HOTPLUGGABLE | 1361 MEM_AFFINITY_ENABLED); 1362 } 1363 1364 build_header(linker, table_data, 1365 (void *)(table_data->data + srat_start), 1366 "SRAT", 1367 table_data->len - srat_start, 1); 1368 } 1369 1370 static void 1371 build_mcfg_q35(GArray *table_data, GArray *linker, AcpiMcfgInfo *info) 1372 { 1373 AcpiTableMcfg *mcfg; 1374 const char *sig; 1375 int len = sizeof(*mcfg) + 1 * sizeof(mcfg->allocation[0]); 1376 1377 mcfg = acpi_data_push(table_data, len); 1378 mcfg->allocation[0].address = cpu_to_le64(info->mcfg_base); 1379 /* Only a single allocation so no need to play with segments */ 1380 mcfg->allocation[0].pci_segment = cpu_to_le16(0); 1381 mcfg->allocation[0].start_bus_number = 0; 1382 mcfg->allocation[0].end_bus_number = PCIE_MMCFG_BUS(info->mcfg_size - 1); 1383 1384 /* MCFG is used for ECAM which can be enabled or disabled by guest. 1385 * To avoid table size changes (which create migration issues), 1386 * always create the table even if there are no allocations, 1387 * but set the signature to a reserved value in this case. 1388 * ACPI spec requires OSPMs to ignore such tables. 1389 */ 1390 if (info->mcfg_base == PCIE_BASE_ADDR_UNMAPPED) { 1391 /* Reserved signature: ignored by OSPM */ 1392 sig = "QEMU"; 1393 } else { 1394 sig = "MCFG"; 1395 } 1396 build_header(linker, table_data, (void *)mcfg, sig, len, 1); 1397 } 1398 1399 static void 1400 build_dmar_q35(GArray *table_data, GArray *linker) 1401 { 1402 int dmar_start = table_data->len; 1403 1404 AcpiTableDmar *dmar; 1405 AcpiDmarHardwareUnit *drhd; 1406 1407 dmar = acpi_data_push(table_data, sizeof(*dmar)); 1408 dmar->host_address_width = VTD_HOST_ADDRESS_WIDTH - 1; 1409 dmar->flags = 0; /* No intr_remap for now */ 1410 1411 /* DMAR Remapping Hardware Unit Definition structure */ 1412 drhd = acpi_data_push(table_data, sizeof(*drhd)); 1413 drhd->type = cpu_to_le16(ACPI_DMAR_TYPE_HARDWARE_UNIT); 1414 drhd->length = cpu_to_le16(sizeof(*drhd)); /* No device scope now */ 1415 drhd->flags = ACPI_DMAR_INCLUDE_PCI_ALL; 1416 drhd->pci_segment = cpu_to_le16(0); 1417 drhd->address = cpu_to_le64(Q35_HOST_BRIDGE_IOMMU_ADDR); 1418 1419 build_header(linker, table_data, (void *)(table_data->data + dmar_start), 1420 "DMAR", table_data->len - dmar_start, 1); 1421 } 1422 1423 static void 1424 build_dsdt(GArray *table_data, GArray *linker, AcpiMiscInfo *misc) 1425 { 1426 AcpiTableHeader *dsdt; 1427 1428 assert(misc->dsdt_code && misc->dsdt_size); 1429 1430 dsdt = acpi_data_push(table_data, misc->dsdt_size); 1431 memcpy(dsdt, misc->dsdt_code, misc->dsdt_size); 1432 1433 memset(dsdt, 0, sizeof *dsdt); 1434 build_header(linker, table_data, dsdt, "DSDT", 1435 misc->dsdt_size, 1); 1436 } 1437 1438 /* Build final rsdt table */ 1439 static void 1440 build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets) 1441 { 1442 AcpiRsdtDescriptorRev1 *rsdt; 1443 size_t rsdt_len; 1444 int i; 1445 1446 rsdt_len = sizeof(*rsdt) + sizeof(uint32_t) * table_offsets->len; 1447 rsdt = acpi_data_push(table_data, rsdt_len); 1448 memcpy(rsdt->table_offset_entry, table_offsets->data, 1449 sizeof(uint32_t) * table_offsets->len); 1450 for (i = 0; i < table_offsets->len; ++i) { 1451 /* rsdt->table_offset_entry to be filled by Guest linker */ 1452 bios_linker_loader_add_pointer(linker, 1453 ACPI_BUILD_TABLE_FILE, 1454 ACPI_BUILD_TABLE_FILE, 1455 table_data, &rsdt->table_offset_entry[i], 1456 sizeof(uint32_t)); 1457 } 1458 build_header(linker, table_data, 1459 (void *)rsdt, "RSDT", rsdt_len, 1); 1460 } 1461 1462 static GArray * 1463 build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt) 1464 { 1465 AcpiRsdpDescriptor *rsdp = acpi_data_push(rsdp_table, sizeof *rsdp); 1466 1467 bios_linker_loader_alloc(linker, ACPI_BUILD_RSDP_FILE, 16, 1468 true /* fseg memory */); 1469 1470 memcpy(&rsdp->signature, "RSD PTR ", 8); 1471 memcpy(rsdp->oem_id, ACPI_BUILD_APPNAME6, 6); 1472 rsdp->rsdt_physical_address = cpu_to_le32(rsdt); 1473 /* Address to be filled by Guest linker */ 1474 bios_linker_loader_add_pointer(linker, ACPI_BUILD_RSDP_FILE, 1475 ACPI_BUILD_TABLE_FILE, 1476 rsdp_table, &rsdp->rsdt_physical_address, 1477 sizeof rsdp->rsdt_physical_address); 1478 rsdp->checksum = 0; 1479 /* Checksum to be filled by Guest linker */ 1480 bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE, 1481 rsdp, rsdp, sizeof *rsdp, &rsdp->checksum); 1482 1483 return rsdp_table; 1484 } 1485 1486 typedef 1487 struct AcpiBuildTables { 1488 GArray *table_data; 1489 GArray *rsdp; 1490 GArray *tcpalog; 1491 GArray *linker; 1492 } AcpiBuildTables; 1493 1494 static inline void acpi_build_tables_init(AcpiBuildTables *tables) 1495 { 1496 tables->rsdp = g_array_new(false, true /* clear */, 1); 1497 tables->table_data = g_array_new(false, true /* clear */, 1); 1498 tables->tcpalog = g_array_new(false, true /* clear */, 1); 1499 tables->linker = bios_linker_loader_init(); 1500 } 1501 1502 static inline void acpi_build_tables_cleanup(AcpiBuildTables *tables, bool mfre) 1503 { 1504 void *linker_data = bios_linker_loader_cleanup(tables->linker); 1505 g_free(linker_data); 1506 g_array_free(tables->rsdp, mfre); 1507 g_array_free(tables->table_data, true); 1508 g_array_free(tables->tcpalog, mfre); 1509 } 1510 1511 typedef 1512 struct AcpiBuildState { 1513 /* Copy of table in RAM (for patching). */ 1514 uint8_t *table_ram; 1515 uint32_t table_size; 1516 /* Is table patched? */ 1517 uint8_t patched; 1518 PcGuestInfo *guest_info; 1519 } AcpiBuildState; 1520 1521 static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg) 1522 { 1523 Object *pci_host; 1524 QObject *o; 1525 bool ambiguous; 1526 1527 pci_host = object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE, &ambiguous); 1528 g_assert(!ambiguous); 1529 g_assert(pci_host); 1530 1531 o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_BASE, NULL); 1532 if (!o) { 1533 return false; 1534 } 1535 mcfg->mcfg_base = qint_get_int(qobject_to_qint(o)); 1536 qobject_decref(o); 1537 1538 o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_SIZE, NULL); 1539 assert(o); 1540 mcfg->mcfg_size = qint_get_int(qobject_to_qint(o)); 1541 qobject_decref(o); 1542 return true; 1543 } 1544 1545 static bool acpi_has_iommu(void) 1546 { 1547 bool ambiguous; 1548 Object *intel_iommu; 1549 1550 intel_iommu = object_resolve_path_type("", TYPE_INTEL_IOMMU_DEVICE, 1551 &ambiguous); 1552 return intel_iommu && !ambiguous; 1553 } 1554 1555 static 1556 void acpi_build(PcGuestInfo *guest_info, AcpiBuildTables *tables) 1557 { 1558 GArray *table_offsets; 1559 unsigned facs, ssdt, dsdt, rsdt; 1560 AcpiCpuInfo cpu; 1561 AcpiPmInfo pm; 1562 AcpiMiscInfo misc; 1563 AcpiMcfgInfo mcfg; 1564 PcPciInfo pci; 1565 uint8_t *u; 1566 size_t aml_len = 0; 1567 1568 acpi_get_cpu_info(&cpu); 1569 acpi_get_pm_info(&pm); 1570 acpi_get_dsdt(&misc); 1571 acpi_get_misc_info(&misc); 1572 acpi_get_pci_info(&pci); 1573 1574 table_offsets = g_array_new(false, true /* clear */, 1575 sizeof(uint32_t)); 1576 ACPI_BUILD_DPRINTF(3, "init ACPI tables\n"); 1577 1578 bios_linker_loader_alloc(tables->linker, ACPI_BUILD_TABLE_FILE, 1579 64 /* Ensure FACS is aligned */, 1580 false /* high memory */); 1581 1582 /* 1583 * FACS is pointed to by FADT. 1584 * We place it first since it's the only table that has alignment 1585 * requirements. 1586 */ 1587 facs = tables->table_data->len; 1588 build_facs(tables->table_data, tables->linker, guest_info); 1589 1590 /* DSDT is pointed to by FADT */ 1591 dsdt = tables->table_data->len; 1592 build_dsdt(tables->table_data, tables->linker, &misc); 1593 1594 /* Count the size of the DSDT and SSDT, we will need it for legacy 1595 * sizing of ACPI tables. 1596 */ 1597 aml_len += tables->table_data->len - dsdt; 1598 1599 /* ACPI tables pointed to by RSDT */ 1600 acpi_add_table(table_offsets, tables->table_data); 1601 build_fadt(tables->table_data, tables->linker, &pm, facs, dsdt); 1602 1603 ssdt = tables->table_data->len; 1604 acpi_add_table(table_offsets, tables->table_data); 1605 build_ssdt(tables->table_data, tables->linker, &cpu, &pm, &misc, &pci, 1606 guest_info); 1607 aml_len += tables->table_data->len - ssdt; 1608 1609 acpi_add_table(table_offsets, tables->table_data); 1610 build_madt(tables->table_data, tables->linker, &cpu, guest_info); 1611 1612 if (misc.has_hpet) { 1613 acpi_add_table(table_offsets, tables->table_data); 1614 build_hpet(tables->table_data, tables->linker); 1615 } 1616 if (misc.has_tpm) { 1617 acpi_add_table(table_offsets, tables->table_data); 1618 build_tpm_tcpa(tables->table_data, tables->linker, tables->tcpalog); 1619 1620 acpi_add_table(table_offsets, tables->table_data); 1621 build_tpm_ssdt(tables->table_data, tables->linker); 1622 } 1623 if (guest_info->numa_nodes) { 1624 acpi_add_table(table_offsets, tables->table_data); 1625 build_srat(tables->table_data, tables->linker, &cpu, guest_info); 1626 } 1627 if (acpi_get_mcfg(&mcfg)) { 1628 acpi_add_table(table_offsets, tables->table_data); 1629 build_mcfg_q35(tables->table_data, tables->linker, &mcfg); 1630 } 1631 if (acpi_has_iommu()) { 1632 acpi_add_table(table_offsets, tables->table_data); 1633 build_dmar_q35(tables->table_data, tables->linker); 1634 } 1635 1636 /* Add tables supplied by user (if any) */ 1637 for (u = acpi_table_first(); u; u = acpi_table_next(u)) { 1638 unsigned len = acpi_table_len(u); 1639 1640 acpi_add_table(table_offsets, tables->table_data); 1641 g_array_append_vals(tables->table_data, u, len); 1642 } 1643 1644 /* RSDT is pointed to by RSDP */ 1645 rsdt = tables->table_data->len; 1646 build_rsdt(tables->table_data, tables->linker, table_offsets); 1647 1648 /* RSDP is in FSEG memory, so allocate it separately */ 1649 build_rsdp(tables->rsdp, tables->linker, rsdt); 1650 1651 /* We'll expose it all to Guest so we want to reduce 1652 * chance of size changes. 1653 * RSDP is small so it's easy to keep it immutable, no need to 1654 * bother with alignment. 1655 * 1656 * We used to align the tables to 4k, but of course this would 1657 * too simple to be enough. 4k turned out to be too small an 1658 * alignment very soon, and in fact it is almost impossible to 1659 * keep the table size stable for all (max_cpus, max_memory_slots) 1660 * combinations. So the table size is always 64k for pc-i440fx-2.1 1661 * and we give an error if the table grows beyond that limit. 1662 * 1663 * We still have the problem of migrating from "-M pc-i440fx-2.0". For 1664 * that, we exploit the fact that QEMU 2.1 generates _smaller_ tables 1665 * than 2.0 and we can always pad the smaller tables with zeros. We can 1666 * then use the exact size of the 2.0 tables. 1667 * 1668 * All this is for PIIX4, since QEMU 2.0 didn't support Q35 migration. 1669 */ 1670 if (guest_info->legacy_acpi_table_size) { 1671 /* Subtracting aml_len gives the size of fixed tables. Then add the 1672 * size of the PIIX4 DSDT/SSDT in QEMU 2.0. 1673 */ 1674 int legacy_aml_len = 1675 guest_info->legacy_acpi_table_size + 1676 ACPI_BUILD_LEGACY_CPU_AML_SIZE * max_cpus; 1677 int legacy_table_size = 1678 ROUND_UP(tables->table_data->len - aml_len + legacy_aml_len, 1679 ACPI_BUILD_ALIGN_SIZE); 1680 if (tables->table_data->len > legacy_table_size) { 1681 /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */ 1682 error_report("Warning: migration may not work."); 1683 } 1684 g_array_set_size(tables->table_data, legacy_table_size); 1685 } else { 1686 /* Make sure we have a buffer in case we need to resize the tables. */ 1687 if (tables->table_data->len > ACPI_BUILD_TABLE_SIZE / 2) { 1688 /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */ 1689 error_report("Warning: ACPI tables are larger than 64k."); 1690 error_report("Warning: migration may not work."); 1691 error_report("Warning: please remove CPUs, NUMA nodes, " 1692 "memory slots or PCI bridges."); 1693 } 1694 acpi_align_size(tables->table_data, ACPI_BUILD_TABLE_SIZE); 1695 } 1696 1697 acpi_align_size(tables->linker, ACPI_BUILD_ALIGN_SIZE); 1698 1699 /* Cleanup memory that's no longer used. */ 1700 g_array_free(table_offsets, true); 1701 } 1702 1703 static void acpi_build_update(void *build_opaque, uint32_t offset) 1704 { 1705 AcpiBuildState *build_state = build_opaque; 1706 AcpiBuildTables tables; 1707 1708 /* No state to update or already patched? Nothing to do. */ 1709 if (!build_state || build_state->patched) { 1710 return; 1711 } 1712 build_state->patched = 1; 1713 1714 acpi_build_tables_init(&tables); 1715 1716 acpi_build(build_state->guest_info, &tables); 1717 1718 assert(acpi_data_len(tables.table_data) == build_state->table_size); 1719 memcpy(build_state->table_ram, tables.table_data->data, 1720 build_state->table_size); 1721 1722 acpi_build_tables_cleanup(&tables, true); 1723 } 1724 1725 static void acpi_build_reset(void *build_opaque) 1726 { 1727 AcpiBuildState *build_state = build_opaque; 1728 build_state->patched = 0; 1729 } 1730 1731 static void *acpi_add_rom_blob(AcpiBuildState *build_state, GArray *blob, 1732 const char *name) 1733 { 1734 return rom_add_blob(name, blob->data, acpi_data_len(blob), -1, name, 1735 acpi_build_update, build_state); 1736 } 1737 1738 static const VMStateDescription vmstate_acpi_build = { 1739 .name = "acpi_build", 1740 .version_id = 1, 1741 .minimum_version_id = 1, 1742 .fields = (VMStateField[]) { 1743 VMSTATE_UINT8(patched, AcpiBuildState), 1744 VMSTATE_END_OF_LIST() 1745 }, 1746 }; 1747 1748 void acpi_setup(PcGuestInfo *guest_info) 1749 { 1750 AcpiBuildTables tables; 1751 AcpiBuildState *build_state; 1752 1753 if (!guest_info->fw_cfg) { 1754 ACPI_BUILD_DPRINTF(3, "No fw cfg. Bailing out.\n"); 1755 return; 1756 } 1757 1758 if (!guest_info->has_acpi_build) { 1759 ACPI_BUILD_DPRINTF(3, "ACPI build disabled. Bailing out.\n"); 1760 return; 1761 } 1762 1763 if (!acpi_enabled) { 1764 ACPI_BUILD_DPRINTF(3, "ACPI disabled. Bailing out.\n"); 1765 return; 1766 } 1767 1768 build_state = g_malloc0(sizeof *build_state); 1769 1770 build_state->guest_info = guest_info; 1771 1772 acpi_set_pci_info(); 1773 1774 acpi_build_tables_init(&tables); 1775 acpi_build(build_state->guest_info, &tables); 1776 1777 /* Now expose it all to Guest */ 1778 build_state->table_ram = acpi_add_rom_blob(build_state, tables.table_data, 1779 ACPI_BUILD_TABLE_FILE); 1780 build_state->table_size = acpi_data_len(tables.table_data); 1781 1782 acpi_add_rom_blob(NULL, tables.linker, "etc/table-loader"); 1783 1784 fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_TPMLOG_FILE, 1785 tables.tcpalog->data, acpi_data_len(tables.tcpalog)); 1786 1787 /* 1788 * RSDP is small so it's easy to keep it immutable, no need to 1789 * bother with ROM blobs. 1790 */ 1791 fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_RSDP_FILE, 1792 tables.rsdp->data, acpi_data_len(tables.rsdp)); 1793 1794 qemu_register_reset(acpi_build_reset, build_state); 1795 acpi_build_reset(build_state); 1796 vmstate_register(NULL, 0, &vmstate_acpi_build, build_state); 1797 1798 /* Cleanup tables but don't free the memory: we track it 1799 * in build_state. 1800 */ 1801 acpi_build_tables_cleanup(&tables, false); 1802 } 1803