xref: /openbmc/qemu/hw/arm/virt-acpi-build.c (revision 922d42bb)
1 /* Support for generating ACPI tables and passing them to Guests
2  *
3  * ARM virt ACPI generation
4  *
5  * Copyright (C) 2008-2010  Kevin O'Connor <kevin@koconnor.net>
6  * Copyright (C) 2006 Fabrice Bellard
7  * Copyright (C) 2013 Red Hat Inc
8  *
9  * Author: Michael S. Tsirkin <mst@redhat.com>
10  *
11  * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
12  *
13  * Author: Shannon Zhao <zhaoshenglong@huawei.com>
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License as published by
17  * the Free Software Foundation; either version 2 of the License, or
18  * (at your option) any later version.
19 
20  * This program is distributed in the hope that it will be useful,
21  * but WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
23  * GNU General Public License for more details.
24 
25  * You should have received a copy of the GNU General Public License along
26  * with this program; if not, see <http://www.gnu.org/licenses/>.
27  */
28 
29 #include "qemu/osdep.h"
30 #include "qapi/error.h"
31 #include "qemu/bitmap.h"
32 #include "trace.h"
33 #include "hw/core/cpu.h"
34 #include "target/arm/cpu.h"
35 #include "hw/acpi/acpi-defs.h"
36 #include "hw/acpi/acpi.h"
37 #include "hw/nvram/fw_cfg.h"
38 #include "hw/acpi/bios-linker-loader.h"
39 #include "hw/acpi/aml-build.h"
40 #include "hw/acpi/utils.h"
41 #include "hw/acpi/pci.h"
42 #include "hw/acpi/memory_hotplug.h"
43 #include "hw/acpi/generic_event_device.h"
44 #include "hw/acpi/tpm.h"
45 #include "hw/pci/pcie_host.h"
46 #include "hw/pci/pci.h"
47 #include "hw/pci-host/gpex.h"
48 #include "hw/arm/virt.h"
49 #include "hw/mem/nvdimm.h"
50 #include "hw/platform-bus.h"
51 #include "sysemu/numa.h"
52 #include "sysemu/reset.h"
53 #include "sysemu/tpm.h"
54 #include "kvm_arm.h"
55 #include "migration/vmstate.h"
56 #include "hw/acpi/ghes.h"
57 
58 #define ARM_SPI_BASE 32
59 
60 static void acpi_dsdt_add_cpus(Aml *scope, int smp_cpus)
61 {
62     uint16_t i;
63 
64     for (i = 0; i < smp_cpus; i++) {
65         Aml *dev = aml_device("C%.03X", i);
66         aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
67         aml_append(dev, aml_name_decl("_UID", aml_int(i)));
68         aml_append(scope, dev);
69     }
70 }
71 
72 static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
73                                            uint32_t uart_irq)
74 {
75     Aml *dev = aml_device("COM0");
76     aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0011")));
77     aml_append(dev, aml_name_decl("_UID", aml_int(0)));
78 
79     Aml *crs = aml_resource_template();
80     aml_append(crs, aml_memory32_fixed(uart_memmap->base,
81                                        uart_memmap->size, AML_READ_WRITE));
82     aml_append(crs,
83                aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
84                              AML_EXCLUSIVE, &uart_irq, 1));
85     aml_append(dev, aml_name_decl("_CRS", crs));
86 
87     aml_append(scope, dev);
88 }
89 
90 static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
91 {
92     Aml *dev = aml_device("FWCF");
93     aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
94     /* device present, functioning, decoding, not shown in UI */
95     aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
96     aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
97 
98     Aml *crs = aml_resource_template();
99     aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
100                                        fw_cfg_memmap->size, AML_READ_WRITE));
101     aml_append(dev, aml_name_decl("_CRS", crs));
102     aml_append(scope, dev);
103 }
104 
105 static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
106 {
107     Aml *dev, *crs;
108     hwaddr base = flash_memmap->base;
109     hwaddr size = flash_memmap->size / 2;
110 
111     dev = aml_device("FLS0");
112     aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
113     aml_append(dev, aml_name_decl("_UID", aml_int(0)));
114 
115     crs = aml_resource_template();
116     aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
117     aml_append(dev, aml_name_decl("_CRS", crs));
118     aml_append(scope, dev);
119 
120     dev = aml_device("FLS1");
121     aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015")));
122     aml_append(dev, aml_name_decl("_UID", aml_int(1)));
123     crs = aml_resource_template();
124     aml_append(crs, aml_memory32_fixed(base + size, size, AML_READ_WRITE));
125     aml_append(dev, aml_name_decl("_CRS", crs));
126     aml_append(scope, dev);
127 }
128 
129 static void acpi_dsdt_add_virtio(Aml *scope,
130                                  const MemMapEntry *virtio_mmio_memmap,
131                                  uint32_t mmio_irq, int num)
132 {
133     hwaddr base = virtio_mmio_memmap->base;
134     hwaddr size = virtio_mmio_memmap->size;
135     int i;
136 
137     for (i = 0; i < num; i++) {
138         uint32_t irq = mmio_irq + i;
139         Aml *dev = aml_device("VR%02u", i);
140         aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
141         aml_append(dev, aml_name_decl("_UID", aml_int(i)));
142         aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
143 
144         Aml *crs = aml_resource_template();
145         aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
146         aml_append(crs,
147                    aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
148                                  AML_EXCLUSIVE, &irq, 1));
149         aml_append(dev, aml_name_decl("_CRS", crs));
150         aml_append(scope, dev);
151         base += size;
152     }
153 }
154 
155 static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
156                               uint32_t irq, bool use_highmem, bool highmem_ecam)
157 {
158     int ecam_id = VIRT_ECAM_ID(highmem_ecam);
159     struct GPEXConfig cfg = {
160         .mmio32 = memmap[VIRT_PCIE_MMIO],
161         .pio    = memmap[VIRT_PCIE_PIO],
162         .ecam   = memmap[ecam_id],
163         .irq    = irq,
164     };
165 
166     if (use_highmem) {
167         cfg.mmio64 = memmap[VIRT_HIGH_PCIE_MMIO];
168     }
169 
170     acpi_dsdt_add_gpex(scope, &cfg);
171 }
172 
173 static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap,
174                                            uint32_t gpio_irq)
175 {
176     Aml *dev = aml_device("GPO0");
177     aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0061")));
178     aml_append(dev, aml_name_decl("_UID", aml_int(0)));
179 
180     Aml *crs = aml_resource_template();
181     aml_append(crs, aml_memory32_fixed(gpio_memmap->base, gpio_memmap->size,
182                                        AML_READ_WRITE));
183     aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
184                                   AML_EXCLUSIVE, &gpio_irq, 1));
185     aml_append(dev, aml_name_decl("_CRS", crs));
186 
187     Aml *aei = aml_resource_template();
188     /* Pin 3 for power button */
189     const uint32_t pin_list[1] = {3};
190     aml_append(aei, aml_gpio_int(AML_CONSUMER, AML_EDGE, AML_ACTIVE_HIGH,
191                                  AML_EXCLUSIVE, AML_PULL_UP, 0, pin_list, 1,
192                                  "GPO0", NULL, 0));
193     aml_append(dev, aml_name_decl("_AEI", aei));
194 
195     /* _E03 is handle for power button */
196     Aml *method = aml_method("_E03", 0, AML_NOTSERIALIZED);
197     aml_append(method, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE),
198                                   aml_int(0x80)));
199     aml_append(dev, method);
200     aml_append(scope, dev);
201 }
202 
203 static void acpi_dsdt_add_tpm(Aml *scope, VirtMachineState *vms)
204 {
205     PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev);
206     hwaddr pbus_base = vms->memmap[VIRT_PLATFORM_BUS].base;
207     SysBusDevice *sbdev = SYS_BUS_DEVICE(tpm_find());
208     MemoryRegion *sbdev_mr;
209     hwaddr tpm_base;
210 
211     if (!sbdev) {
212         return;
213     }
214 
215     tpm_base = platform_bus_get_mmio_addr(pbus, sbdev, 0);
216     assert(tpm_base != -1);
217 
218     tpm_base += pbus_base;
219 
220     sbdev_mr = sysbus_mmio_get_region(sbdev, 0);
221 
222     Aml *dev = aml_device("TPM0");
223     aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101")));
224     aml_append(dev, aml_name_decl("_UID", aml_int(0)));
225 
226     Aml *crs = aml_resource_template();
227     aml_append(crs,
228                aml_memory32_fixed(tpm_base,
229                                   (uint32_t)memory_region_size(sbdev_mr),
230                                   AML_READ_WRITE));
231     aml_append(dev, aml_name_decl("_CRS", crs));
232     aml_append(scope, dev);
233 }
234 
235 static void
236 build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
237 {
238     int nb_nodes, iort_start = table_data->len;
239     AcpiIortIdMapping *idmap;
240     AcpiIortItsGroup *its;
241     AcpiIortTable *iort;
242     AcpiIortSmmu3 *smmu;
243     size_t node_size, iort_node_offset, iort_length, smmu_offset = 0;
244     AcpiIortRC *rc;
245 
246     iort = acpi_data_push(table_data, sizeof(*iort));
247 
248     if (vms->iommu == VIRT_IOMMU_SMMUV3) {
249         nb_nodes = 3; /* RC, ITS, SMMUv3 */
250     } else {
251         nb_nodes = 2; /* RC, ITS */
252     }
253 
254     iort_length = sizeof(*iort);
255     iort->node_count = cpu_to_le32(nb_nodes);
256     /*
257      * Use a copy in case table_data->data moves during acpi_data_push
258      * operations.
259      */
260     iort_node_offset = sizeof(*iort);
261     iort->node_offset = cpu_to_le32(iort_node_offset);
262 
263     /* ITS group node */
264     node_size =  sizeof(*its) + sizeof(uint32_t);
265     iort_length += node_size;
266     its = acpi_data_push(table_data, node_size);
267 
268     its->type = ACPI_IORT_NODE_ITS_GROUP;
269     its->length = cpu_to_le16(node_size);
270     its->its_count = cpu_to_le32(1);
271     its->identifiers[0] = 0; /* MADT translation_id */
272 
273     if (vms->iommu == VIRT_IOMMU_SMMUV3) {
274         int irq =  vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE;
275 
276         /* SMMUv3 node */
277         smmu_offset = iort_node_offset + node_size;
278         node_size = sizeof(*smmu) + sizeof(*idmap);
279         iort_length += node_size;
280         smmu = acpi_data_push(table_data, node_size);
281 
282         smmu->type = ACPI_IORT_NODE_SMMU_V3;
283         smmu->length = cpu_to_le16(node_size);
284         smmu->mapping_count = cpu_to_le32(1);
285         smmu->mapping_offset = cpu_to_le32(sizeof(*smmu));
286         smmu->base_address = cpu_to_le64(vms->memmap[VIRT_SMMU].base);
287         smmu->flags = cpu_to_le32(ACPI_IORT_SMMU_V3_COHACC_OVERRIDE);
288         smmu->event_gsiv = cpu_to_le32(irq);
289         smmu->pri_gsiv = cpu_to_le32(irq + 1);
290         smmu->gerr_gsiv = cpu_to_le32(irq + 2);
291         smmu->sync_gsiv = cpu_to_le32(irq + 3);
292 
293         /* Identity RID mapping covering the whole input RID range */
294         idmap = &smmu->id_mapping_array[0];
295         idmap->input_base = 0;
296         idmap->id_count = cpu_to_le32(0xFFFF);
297         idmap->output_base = 0;
298         /* output IORT node is the ITS group node (the first node) */
299         idmap->output_reference = cpu_to_le32(iort_node_offset);
300     }
301 
302     /* Root Complex Node */
303     node_size = sizeof(*rc) + sizeof(*idmap);
304     iort_length += node_size;
305     rc = acpi_data_push(table_data, node_size);
306 
307     rc->type = ACPI_IORT_NODE_PCI_ROOT_COMPLEX;
308     rc->length = cpu_to_le16(node_size);
309     rc->mapping_count = cpu_to_le32(1);
310     rc->mapping_offset = cpu_to_le32(sizeof(*rc));
311 
312     /* fully coherent device */
313     rc->memory_properties.cache_coherency = cpu_to_le32(1);
314     rc->memory_properties.memory_flags = 0x3; /* CCA = CPM = DCAS = 1 */
315     rc->pci_segment_number = 0; /* MCFG pci_segment */
316 
317     /* Identity RID mapping covering the whole input RID range */
318     idmap = &rc->id_mapping_array[0];
319     idmap->input_base = 0;
320     idmap->id_count = cpu_to_le32(0xFFFF);
321     idmap->output_base = 0;
322 
323     if (vms->iommu == VIRT_IOMMU_SMMUV3) {
324         /* output IORT node is the smmuv3 node */
325         idmap->output_reference = cpu_to_le32(smmu_offset);
326     } else {
327         /* output IORT node is the ITS group node (the first node) */
328         idmap->output_reference = cpu_to_le32(iort_node_offset);
329     }
330 
331     /*
332      * Update the pointer address in case table_data->data moves during above
333      * acpi_data_push operations.
334      */
335     iort = (AcpiIortTable *)(table_data->data + iort_start);
336     iort->length = cpu_to_le32(iort_length);
337 
338     build_header(linker, table_data, (void *)(table_data->data + iort_start),
339                  "IORT", table_data->len - iort_start, 0, NULL, NULL);
340 }
341 
342 static void
343 build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
344 {
345     AcpiSerialPortConsoleRedirection *spcr;
346     const MemMapEntry *uart_memmap = &vms->memmap[VIRT_UART];
347     int irq = vms->irqmap[VIRT_UART] + ARM_SPI_BASE;
348     int spcr_start = table_data->len;
349 
350     spcr = acpi_data_push(table_data, sizeof(*spcr));
351 
352     spcr->interface_type = 0x3;    /* ARM PL011 UART */
353 
354     spcr->base_address.space_id = AML_SYSTEM_MEMORY;
355     spcr->base_address.bit_width = 8;
356     spcr->base_address.bit_offset = 0;
357     spcr->base_address.access_width = 1;
358     spcr->base_address.address = cpu_to_le64(uart_memmap->base);
359 
360     spcr->interrupt_types = (1 << 3); /* Bit[3] ARMH GIC interrupt */
361     spcr->gsi = cpu_to_le32(irq);  /* Global System Interrupt */
362 
363     spcr->baud = 3;                /* Baud Rate: 3 = 9600 */
364     spcr->parity = 0;              /* No Parity */
365     spcr->stopbits = 1;            /* 1 Stop bit */
366     spcr->flowctrl = (1 << 1);     /* Bit[1] = RTS/CTS hardware flow control */
367     spcr->term_type = 0;           /* Terminal Type: 0 = VT100 */
368 
369     spcr->pci_device_id = 0xffff;  /* PCI Device ID: not a PCI device */
370     spcr->pci_vendor_id = 0xffff;  /* PCI Vendor ID: not a PCI device */
371 
372     build_header(linker, table_data, (void *)(table_data->data + spcr_start),
373                  "SPCR", table_data->len - spcr_start, 2, NULL, NULL);
374 }
375 
376 static void
377 build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
378 {
379     AcpiSystemResourceAffinityTable *srat;
380     AcpiSratProcessorGiccAffinity *core;
381     AcpiSratMemoryAffinity *numamem;
382     int i, srat_start;
383     uint64_t mem_base;
384     MachineClass *mc = MACHINE_GET_CLASS(vms);
385     MachineState *ms = MACHINE(vms);
386     const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(ms);
387 
388     srat_start = table_data->len;
389     srat = acpi_data_push(table_data, sizeof(*srat));
390     srat->reserved1 = cpu_to_le32(1);
391 
392     for (i = 0; i < cpu_list->len; ++i) {
393         core = acpi_data_push(table_data, sizeof(*core));
394         core->type = ACPI_SRAT_PROCESSOR_GICC;
395         core->length = sizeof(*core);
396         core->proximity = cpu_to_le32(cpu_list->cpus[i].props.node_id);
397         core->acpi_processor_uid = cpu_to_le32(i);
398         core->flags = cpu_to_le32(1);
399     }
400 
401     mem_base = vms->memmap[VIRT_MEM].base;
402     for (i = 0; i < ms->numa_state->num_nodes; ++i) {
403         if (ms->numa_state->nodes[i].node_mem > 0) {
404             numamem = acpi_data_push(table_data, sizeof(*numamem));
405             build_srat_memory(numamem, mem_base,
406                               ms->numa_state->nodes[i].node_mem, i,
407                               MEM_AFFINITY_ENABLED);
408             mem_base += ms->numa_state->nodes[i].node_mem;
409         }
410     }
411 
412     if (ms->nvdimms_state->is_enabled) {
413         nvdimm_build_srat(table_data);
414     }
415 
416     if (ms->device_memory) {
417         numamem = acpi_data_push(table_data, sizeof *numamem);
418         build_srat_memory(numamem, ms->device_memory->base,
419                           memory_region_size(&ms->device_memory->mr),
420                           ms->numa_state->num_nodes - 1,
421                           MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
422     }
423 
424     build_header(linker, table_data, (void *)(table_data->data + srat_start),
425                  "SRAT", table_data->len - srat_start, 3, NULL, NULL);
426 }
427 
428 /* GTDT */
429 static void
430 build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
431 {
432     VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
433     int gtdt_start = table_data->len;
434     AcpiGenericTimerTable *gtdt;
435     uint32_t irqflags;
436 
437     if (vmc->claim_edge_triggered_timers) {
438         irqflags = ACPI_GTDT_INTERRUPT_MODE_EDGE;
439     } else {
440         irqflags = ACPI_GTDT_INTERRUPT_MODE_LEVEL;
441     }
442 
443     gtdt = acpi_data_push(table_data, sizeof *gtdt);
444     /* The interrupt values are the same with the device tree when adding 16 */
445     gtdt->secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_S_EL1_IRQ + 16);
446     gtdt->secure_el1_flags = cpu_to_le32(irqflags);
447 
448     gtdt->non_secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ + 16);
449     gtdt->non_secure_el1_flags = cpu_to_le32(irqflags |
450                                              ACPI_GTDT_CAP_ALWAYS_ON);
451 
452     gtdt->virtual_timer_interrupt = cpu_to_le32(ARCH_TIMER_VIRT_IRQ + 16);
453     gtdt->virtual_timer_flags = cpu_to_le32(irqflags);
454 
455     gtdt->non_secure_el2_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ + 16);
456     gtdt->non_secure_el2_flags = cpu_to_le32(irqflags);
457 
458     build_header(linker, table_data,
459                  (void *)(table_data->data + gtdt_start), "GTDT",
460                  table_data->len - gtdt_start, 2, NULL, NULL);
461 }
462 
463 /* MADT */
464 static void
465 build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
466 {
467     VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
468     int madt_start = table_data->len;
469     const MemMapEntry *memmap = vms->memmap;
470     const int *irqmap = vms->irqmap;
471     AcpiMadtGenericDistributor *gicd;
472     AcpiMadtGenericMsiFrame *gic_msi;
473     int i;
474 
475     acpi_data_push(table_data, sizeof(AcpiMultipleApicTable));
476 
477     gicd = acpi_data_push(table_data, sizeof *gicd);
478     gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR;
479     gicd->length = sizeof(*gicd);
480     gicd->base_address = cpu_to_le64(memmap[VIRT_GIC_DIST].base);
481     gicd->version = vms->gic_version;
482 
483     for (i = 0; i < vms->smp_cpus; i++) {
484         AcpiMadtGenericCpuInterface *gicc = acpi_data_push(table_data,
485                                                            sizeof(*gicc));
486         ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i));
487 
488         gicc->type = ACPI_APIC_GENERIC_CPU_INTERFACE;
489         gicc->length = sizeof(*gicc);
490         if (vms->gic_version == 2) {
491             gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base);
492             gicc->gich_base_address = cpu_to_le64(memmap[VIRT_GIC_HYP].base);
493             gicc->gicv_base_address = cpu_to_le64(memmap[VIRT_GIC_VCPU].base);
494         }
495         gicc->cpu_interface_number = cpu_to_le32(i);
496         gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity);
497         gicc->uid = cpu_to_le32(i);
498         gicc->flags = cpu_to_le32(ACPI_MADT_GICC_ENABLED);
499 
500         if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
501             gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ));
502         }
503         if (vms->virt) {
504             gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ));
505         }
506     }
507 
508     if (vms->gic_version == 3) {
509         AcpiMadtGenericTranslator *gic_its;
510         int nb_redist_regions = virt_gicv3_redist_region_count(vms);
511         AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data,
512                                                          sizeof *gicr);
513 
514         gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
515         gicr->length = sizeof(*gicr);
516         gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base);
517         gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size);
518 
519         if (nb_redist_regions == 2) {
520             gicr = acpi_data_push(table_data, sizeof(*gicr));
521             gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
522             gicr->length = sizeof(*gicr);
523             gicr->base_address =
524                 cpu_to_le64(memmap[VIRT_HIGH_GIC_REDIST2].base);
525             gicr->range_length =
526                 cpu_to_le32(memmap[VIRT_HIGH_GIC_REDIST2].size);
527         }
528 
529         if (its_class_name() && !vmc->no_its) {
530             gic_its = acpi_data_push(table_data, sizeof *gic_its);
531             gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR;
532             gic_its->length = sizeof(*gic_its);
533             gic_its->translation_id = 0;
534             gic_its->base_address = cpu_to_le64(memmap[VIRT_GIC_ITS].base);
535         }
536     } else {
537         gic_msi = acpi_data_push(table_data, sizeof *gic_msi);
538         gic_msi->type = ACPI_APIC_GENERIC_MSI_FRAME;
539         gic_msi->length = sizeof(*gic_msi);
540         gic_msi->gic_msi_frame_id = 0;
541         gic_msi->base_address = cpu_to_le64(memmap[VIRT_GIC_V2M].base);
542         gic_msi->flags = cpu_to_le32(1);
543         gic_msi->spi_count = cpu_to_le16(NUM_GICV2M_SPIS);
544         gic_msi->spi_base = cpu_to_le16(irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE);
545     }
546 
547     build_header(linker, table_data,
548                  (void *)(table_data->data + madt_start), "APIC",
549                  table_data->len - madt_start, 3, NULL, NULL);
550 }
551 
552 /* FADT */
553 static void build_fadt_rev5(GArray *table_data, BIOSLinker *linker,
554                             VirtMachineState *vms, unsigned dsdt_tbl_offset)
555 {
556     /* ACPI v5.1 */
557     AcpiFadtData fadt = {
558         .rev = 5,
559         .minor_ver = 1,
560         .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI,
561         .xdsdt_tbl_offset = &dsdt_tbl_offset,
562     };
563 
564     switch (vms->psci_conduit) {
565     case QEMU_PSCI_CONDUIT_DISABLED:
566         fadt.arm_boot_arch = 0;
567         break;
568     case QEMU_PSCI_CONDUIT_HVC:
569         fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT |
570                              ACPI_FADT_ARM_PSCI_USE_HVC;
571         break;
572     case QEMU_PSCI_CONDUIT_SMC:
573         fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT;
574         break;
575     default:
576         g_assert_not_reached();
577     }
578 
579     build_fadt(table_data, linker, &fadt, NULL, NULL);
580 }
581 
582 /* DSDT */
583 static void
584 build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
585 {
586     VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
587     Aml *scope, *dsdt;
588     MachineState *ms = MACHINE(vms);
589     const MemMapEntry *memmap = vms->memmap;
590     const int *irqmap = vms->irqmap;
591 
592     dsdt = init_aml_allocator();
593     /* Reserve space for header */
594     acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader));
595 
596     /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware.
597      * While UEFI can use libfdt to disable the RTC device node in the DTB that
598      * it passes to the OS, it cannot modify AML. Therefore, we won't generate
599      * the RTC ACPI device at all when using UEFI.
600      */
601     scope = aml_scope("\\_SB");
602     acpi_dsdt_add_cpus(scope, vms->smp_cpus);
603     acpi_dsdt_add_uart(scope, &memmap[VIRT_UART],
604                        (irqmap[VIRT_UART] + ARM_SPI_BASE));
605     if (vmc->acpi_expose_flash) {
606         acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]);
607     }
608     acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]);
609     acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
610                     (irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
611     acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE),
612                       vms->highmem, vms->highmem_ecam);
613     if (vms->acpi_dev) {
614         build_ged_aml(scope, "\\_SB."GED_DEVICE,
615                       HOTPLUG_HANDLER(vms->acpi_dev),
616                       irqmap[VIRT_ACPI_GED] + ARM_SPI_BASE, AML_SYSTEM_MEMORY,
617                       memmap[VIRT_ACPI_GED].base);
618     } else {
619         acpi_dsdt_add_gpio(scope, &memmap[VIRT_GPIO],
620                            (irqmap[VIRT_GPIO] + ARM_SPI_BASE));
621     }
622 
623     if (vms->acpi_dev) {
624         uint32_t event = object_property_get_uint(OBJECT(vms->acpi_dev),
625                                                   "ged-event", &error_abort);
626 
627         if (event & ACPI_GED_MEM_HOTPLUG_EVT) {
628             build_memory_hotplug_aml(scope, ms->ram_slots, "\\_SB", NULL,
629                                      AML_SYSTEM_MEMORY,
630                                      memmap[VIRT_PCDIMM_ACPI].base);
631         }
632     }
633 
634     acpi_dsdt_add_power_button(scope);
635     acpi_dsdt_add_tpm(scope, vms);
636 
637     aml_append(dsdt, scope);
638 
639     /* copy AML table into ACPI tables blob and patch header there */
640     g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
641     build_header(linker, table_data,
642         (void *)(table_data->data + table_data->len - dsdt->buf->len),
643         "DSDT", dsdt->buf->len, 2, NULL, NULL);
644     free_aml_allocator();
645 }
646 
647 typedef
648 struct AcpiBuildState {
649     /* Copy of table in RAM (for patching). */
650     MemoryRegion *table_mr;
651     MemoryRegion *rsdp_mr;
652     MemoryRegion *linker_mr;
653     /* Is table patched? */
654     bool patched;
655 } AcpiBuildState;
656 
657 static
658 void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
659 {
660     VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
661     GArray *table_offsets;
662     unsigned dsdt, xsdt;
663     GArray *tables_blob = tables->table_data;
664     MachineState *ms = MACHINE(vms);
665 
666     table_offsets = g_array_new(false, true /* clear */,
667                                         sizeof(uint32_t));
668 
669     bios_linker_loader_alloc(tables->linker,
670                              ACPI_BUILD_TABLE_FILE, tables_blob,
671                              64, false /* high memory */);
672 
673     /* DSDT is pointed to by FADT */
674     dsdt = tables_blob->len;
675     build_dsdt(tables_blob, tables->linker, vms);
676 
677     /* FADT MADT GTDT MCFG SPCR pointed to by RSDT */
678     acpi_add_table(table_offsets, tables_blob);
679     build_fadt_rev5(tables_blob, tables->linker, vms, dsdt);
680 
681     acpi_add_table(table_offsets, tables_blob);
682     build_madt(tables_blob, tables->linker, vms);
683 
684     acpi_add_table(table_offsets, tables_blob);
685     build_gtdt(tables_blob, tables->linker, vms);
686 
687     acpi_add_table(table_offsets, tables_blob);
688     {
689         AcpiMcfgInfo mcfg = {
690            .base = vms->memmap[VIRT_ECAM_ID(vms->highmem_ecam)].base,
691            .size = vms->memmap[VIRT_ECAM_ID(vms->highmem_ecam)].size,
692         };
693         build_mcfg(tables_blob, tables->linker, &mcfg);
694     }
695 
696     acpi_add_table(table_offsets, tables_blob);
697     build_spcr(tables_blob, tables->linker, vms);
698 
699     if (vms->ras) {
700         build_ghes_error_table(tables->hardware_errors, tables->linker);
701         acpi_add_table(table_offsets, tables_blob);
702         acpi_build_hest(tables_blob, tables->linker);
703     }
704 
705     if (ms->numa_state->num_nodes > 0) {
706         acpi_add_table(table_offsets, tables_blob);
707         build_srat(tables_blob, tables->linker, vms);
708         if (ms->numa_state->have_numa_distance) {
709             acpi_add_table(table_offsets, tables_blob);
710             build_slit(tables_blob, tables->linker, ms);
711         }
712     }
713 
714     if (ms->nvdimms_state->is_enabled) {
715         nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
716                           ms->nvdimms_state, ms->ram_slots);
717     }
718 
719     if (its_class_name() && !vmc->no_its) {
720         acpi_add_table(table_offsets, tables_blob);
721         build_iort(tables_blob, tables->linker, vms);
722     }
723 
724     if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0) {
725         acpi_add_table(table_offsets, tables_blob);
726         build_tpm2(tables_blob, tables->linker, tables->tcpalog);
727     }
728 
729     /* XSDT is pointed to by RSDP */
730     xsdt = tables_blob->len;
731     build_xsdt(tables_blob, tables->linker, table_offsets, NULL, NULL);
732 
733     /* RSDP is in FSEG memory, so allocate it separately */
734     {
735         AcpiRsdpData rsdp_data = {
736             .revision = 2,
737             .oem_id = ACPI_BUILD_APPNAME6,
738             .xsdt_tbl_offset = &xsdt,
739             .rsdt_tbl_offset = NULL,
740         };
741         build_rsdp(tables->rsdp, tables->linker, &rsdp_data);
742     }
743 
744     /* Cleanup memory that's no longer used. */
745     g_array_free(table_offsets, true);
746 }
747 
748 static void acpi_ram_update(MemoryRegion *mr, GArray *data)
749 {
750     uint32_t size = acpi_data_len(data);
751 
752     /* Make sure RAM size is correct - in case it got changed
753      * e.g. by migration */
754     memory_region_ram_resize(mr, size, &error_abort);
755 
756     memcpy(memory_region_get_ram_ptr(mr), data->data, size);
757     memory_region_set_dirty(mr, 0, size);
758 }
759 
760 static void virt_acpi_build_update(void *build_opaque)
761 {
762     AcpiBuildState *build_state = build_opaque;
763     AcpiBuildTables tables;
764 
765     /* No state to update or already patched? Nothing to do. */
766     if (!build_state || build_state->patched) {
767         return;
768     }
769     build_state->patched = true;
770 
771     acpi_build_tables_init(&tables);
772 
773     virt_acpi_build(VIRT_MACHINE(qdev_get_machine()), &tables);
774 
775     acpi_ram_update(build_state->table_mr, tables.table_data);
776     acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
777     acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
778 
779     acpi_build_tables_cleanup(&tables, true);
780 }
781 
782 static void virt_acpi_build_reset(void *build_opaque)
783 {
784     AcpiBuildState *build_state = build_opaque;
785     build_state->patched = false;
786 }
787 
788 static const VMStateDescription vmstate_virt_acpi_build = {
789     .name = "virt_acpi_build",
790     .version_id = 1,
791     .minimum_version_id = 1,
792     .fields = (VMStateField[]) {
793         VMSTATE_BOOL(patched, AcpiBuildState),
794         VMSTATE_END_OF_LIST()
795     },
796 };
797 
798 void virt_acpi_setup(VirtMachineState *vms)
799 {
800     AcpiBuildTables tables;
801     AcpiBuildState *build_state;
802     AcpiGedState *acpi_ged_state;
803 
804     if (!vms->fw_cfg) {
805         trace_virt_acpi_setup();
806         return;
807     }
808 
809     if (!virt_is_acpi_enabled(vms)) {
810         trace_virt_acpi_setup();
811         return;
812     }
813 
814     build_state = g_malloc0(sizeof *build_state);
815 
816     acpi_build_tables_init(&tables);
817     virt_acpi_build(vms, &tables);
818 
819     /* Now expose it all to Guest */
820     build_state->table_mr = acpi_add_rom_blob(virt_acpi_build_update,
821                                               build_state, tables.table_data,
822                                               ACPI_BUILD_TABLE_FILE,
823                                               ACPI_BUILD_TABLE_MAX_SIZE);
824     assert(build_state->table_mr != NULL);
825 
826     build_state->linker_mr =
827         acpi_add_rom_blob(virt_acpi_build_update, build_state,
828                           tables.linker->cmd_blob, ACPI_BUILD_LOADER_FILE, 0);
829 
830     fw_cfg_add_file(vms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data,
831                     acpi_data_len(tables.tcpalog));
832 
833     if (vms->ras) {
834         assert(vms->acpi_dev);
835         acpi_ged_state = ACPI_GED(vms->acpi_dev);
836         acpi_ghes_add_fw_cfg(&acpi_ged_state->ghes_state,
837                              vms->fw_cfg, tables.hardware_errors);
838     }
839 
840     build_state->rsdp_mr = acpi_add_rom_blob(virt_acpi_build_update,
841                                              build_state, tables.rsdp,
842                                              ACPI_BUILD_RSDP_FILE, 0);
843 
844     qemu_register_reset(virt_acpi_build_reset, build_state);
845     virt_acpi_build_reset(build_state);
846     vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state);
847 
848     /* Cleanup tables but don't free the memory: we track it
849      * in build_state.
850      */
851     acpi_build_tables_cleanup(&tables, false);
852 }
853