xref: /openbmc/qemu/hw/acpi/nvdimm.c (revision 45a994944a8773e67adaede8cc15a207f85dbef0)
1 /*
2  * NVDIMM ACPI Implementation
3  *
4  * Copyright(C) 2015 Intel Corporation.
5  *
6  * Author:
7  *  Xiao Guangrong <guangrong.xiao@linux.intel.com>
8  *
9  * NFIT is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
10  * and the DSM specification can be found at:
11  *       http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
12  *
13  * Currently, it only supports PMEM Virtualization.
14  *
15  * This library is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU Lesser General Public
17  * License as published by the Free Software Foundation; either
18  * version 2 of the License, or (at your option) any later version.
19  *
20  * This library is distributed in the hope that it will be useful,
21  * but WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23  * Lesser General Public License for more details.
24  *
25  * You should have received a copy of the GNU Lesser General Public
26  * License along with this library; if not, see <http://www.gnu.org/licenses/>
27  */
28 
29 #include "qemu/osdep.h"
30 #include "hw/acpi/acpi.h"
31 #include "hw/acpi/aml-build.h"
32 #include "hw/acpi/bios-linker-loader.h"
33 #include "hw/nvram/fw_cfg.h"
34 #include "hw/mem/nvdimm.h"
35 
36 static int nvdimm_plugged_device_list(Object *obj, void *opaque)
37 {
38     GSList **list = opaque;
39 
40     if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
41         *list = g_slist_append(*list, DEVICE(obj));
42     }
43 
44     object_child_foreach(obj, nvdimm_plugged_device_list, opaque);
45     return 0;
46 }
47 
48 /*
49  * inquire plugged NVDIMM devices and link them into the list which is
50  * returned to the caller.
51  *
52  * Note: it is the caller's responsibility to free the list to avoid
53  * memory leak.
54  */
55 static GSList *nvdimm_get_plugged_device_list(void)
56 {
57     GSList *list = NULL;
58 
59     object_child_foreach(qdev_get_machine(), nvdimm_plugged_device_list,
60                          &list);
61     return list;
62 }
63 
64 #define NVDIMM_UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)             \
65    { (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
66      (b) & 0xff, ((b) >> 8) & 0xff, (c) & 0xff, ((c) >> 8) & 0xff,          \
67      (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }
68 
69 /*
70  * define Byte Addressable Persistent Memory (PM) Region according to
71  * ACPI 6.0: 5.2.25.1 System Physical Address Range Structure.
72  */
73 static const uint8_t nvdimm_nfit_spa_uuid[] =
74       NVDIMM_UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33,
75                      0x18, 0xb7, 0x8c, 0xdb);
76 
77 /*
78  * NVDIMM Firmware Interface Table
79  * @signature: "NFIT"
80  *
81  * It provides information that allows OSPM to enumerate NVDIMM present in
82  * the platform and associate system physical address ranges created by the
83  * NVDIMMs.
84  *
85  * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
86  */
87 struct NvdimmNfitHeader {
88     ACPI_TABLE_HEADER_DEF
89     uint32_t reserved;
90 } QEMU_PACKED;
91 typedef struct NvdimmNfitHeader NvdimmNfitHeader;
92 
93 /*
94  * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware
95  * Interface Table (NFIT).
96  */
97 
98 /*
99  * System Physical Address Range Structure
100  *
101  * It describes the system physical address ranges occupied by NVDIMMs and
102  * the types of the regions.
103  */
104 struct NvdimmNfitSpa {
105     uint16_t type;
106     uint16_t length;
107     uint16_t spa_index;
108     uint16_t flags;
109     uint32_t reserved;
110     uint32_t proximity_domain;
111     uint8_t type_guid[16];
112     uint64_t spa_base;
113     uint64_t spa_length;
114     uint64_t mem_attr;
115 } QEMU_PACKED;
116 typedef struct NvdimmNfitSpa NvdimmNfitSpa;
117 
118 /*
119  * Memory Device to System Physical Address Range Mapping Structure
120  *
121  * It enables identifying each NVDIMM region and the corresponding SPA
122  * describing the memory interleave
123  */
124 struct NvdimmNfitMemDev {
125     uint16_t type;
126     uint16_t length;
127     uint32_t nfit_handle;
128     uint16_t phys_id;
129     uint16_t region_id;
130     uint16_t spa_index;
131     uint16_t dcr_index;
132     uint64_t region_len;
133     uint64_t region_offset;
134     uint64_t region_dpa;
135     uint16_t interleave_index;
136     uint16_t interleave_ways;
137     uint16_t flags;
138     uint16_t reserved;
139 } QEMU_PACKED;
140 typedef struct NvdimmNfitMemDev NvdimmNfitMemDev;
141 
142 /*
143  * NVDIMM Control Region Structure
144  *
145  * It describes the NVDIMM and if applicable, Block Control Window.
146  */
147 struct NvdimmNfitControlRegion {
148     uint16_t type;
149     uint16_t length;
150     uint16_t dcr_index;
151     uint16_t vendor_id;
152     uint16_t device_id;
153     uint16_t revision_id;
154     uint16_t sub_vendor_id;
155     uint16_t sub_device_id;
156     uint16_t sub_revision_id;
157     uint8_t reserved[6];
158     uint32_t serial_number;
159     uint16_t fic;
160     uint16_t num_bcw;
161     uint64_t bcw_size;
162     uint64_t cmd_offset;
163     uint64_t cmd_size;
164     uint64_t status_offset;
165     uint64_t status_size;
166     uint16_t flags;
167     uint8_t reserved2[6];
168 } QEMU_PACKED;
169 typedef struct NvdimmNfitControlRegion NvdimmNfitControlRegion;
170 
171 /*
172  * Module serial number is a unique number for each device. We use the
173  * slot id of NVDIMM device to generate this number so that each device
174  * associates with a different number.
175  *
176  * 0x123456 is a magic number we arbitrarily chose.
177  */
178 static uint32_t nvdimm_slot_to_sn(int slot)
179 {
180     return 0x123456 + slot;
181 }
182 
183 /*
184  * handle is used to uniquely associate nfit_memdev structure with NVDIMM
185  * ACPI device - nfit_memdev.nfit_handle matches with the value returned
186  * by ACPI device _ADR method.
187  *
188  * We generate the handle with the slot id of NVDIMM device and reserve
189  * 0 for NVDIMM root device.
190  */
191 static uint32_t nvdimm_slot_to_handle(int slot)
192 {
193     return slot + 1;
194 }
195 
196 /*
197  * index uniquely identifies the structure, 0 is reserved which indicates
198  * that the structure is not valid or the associated structure is not
199  * present.
200  *
201  * Each NVDIMM device needs two indexes, one for nfit_spa and another for
202  * nfit_dc which are generated by the slot id of NVDIMM device.
203  */
204 static uint16_t nvdimm_slot_to_spa_index(int slot)
205 {
206     return (slot + 1) << 1;
207 }
208 
209 /* See the comments of nvdimm_slot_to_spa_index(). */
210 static uint32_t nvdimm_slot_to_dcr_index(int slot)
211 {
212     return nvdimm_slot_to_spa_index(slot) + 1;
213 }
214 
215 static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle)
216 {
217     NVDIMMDevice *nvdimm = NULL;
218     GSList *list, *device_list = nvdimm_get_plugged_device_list();
219 
220     for (list = device_list; list; list = list->next) {
221         NVDIMMDevice *nvd = list->data;
222         int slot = object_property_get_int(OBJECT(nvd), PC_DIMM_SLOT_PROP,
223                                            NULL);
224 
225         if (nvdimm_slot_to_handle(slot) == handle) {
226             nvdimm = nvd;
227             break;
228         }
229     }
230 
231     g_slist_free(device_list);
232     return nvdimm;
233 }
234 
235 /* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */
236 static void
237 nvdimm_build_structure_spa(GArray *structures, DeviceState *dev)
238 {
239     NvdimmNfitSpa *nfit_spa;
240     uint64_t addr = object_property_get_int(OBJECT(dev), PC_DIMM_ADDR_PROP,
241                                             NULL);
242     uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP,
243                                             NULL);
244     uint32_t node = object_property_get_int(OBJECT(dev), PC_DIMM_NODE_PROP,
245                                             NULL);
246     int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
247                                             NULL);
248 
249     nfit_spa = acpi_data_push(structures, sizeof(*nfit_spa));
250 
251     nfit_spa->type = cpu_to_le16(0 /* System Physical Address Range
252                                       Structure */);
253     nfit_spa->length = cpu_to_le16(sizeof(*nfit_spa));
254     nfit_spa->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
255 
256     /*
257      * Control region is strict as all the device info, such as SN, index,
258      * is associated with slot id.
259      */
260     nfit_spa->flags = cpu_to_le16(1 /* Control region is strictly for
261                                        management during hot add/online
262                                        operation */ |
263                                   2 /* Data in Proximity Domain field is
264                                        valid*/);
265 
266     /* NUMA node. */
267     nfit_spa->proximity_domain = cpu_to_le32(node);
268     /* the region reported as PMEM. */
269     memcpy(nfit_spa->type_guid, nvdimm_nfit_spa_uuid,
270            sizeof(nvdimm_nfit_spa_uuid));
271 
272     nfit_spa->spa_base = cpu_to_le64(addr);
273     nfit_spa->spa_length = cpu_to_le64(size);
274 
275     /* It is the PMEM and can be cached as writeback. */
276     nfit_spa->mem_attr = cpu_to_le64(0x8ULL /* EFI_MEMORY_WB */ |
277                                      0x8000ULL /* EFI_MEMORY_NV */);
278 }
279 
280 /*
281  * ACPI 6.0: 5.2.25.2 Memory Device to System Physical Address Range Mapping
282  * Structure
283  */
284 static void
285 nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
286 {
287     NvdimmNfitMemDev *nfit_memdev;
288     uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP,
289                                             NULL);
290     int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
291                                             NULL);
292     uint32_t handle = nvdimm_slot_to_handle(slot);
293 
294     nfit_memdev = acpi_data_push(structures, sizeof(*nfit_memdev));
295 
296     nfit_memdev->type = cpu_to_le16(1 /* Memory Device to System Address
297                                          Range Map Structure*/);
298     nfit_memdev->length = cpu_to_le16(sizeof(*nfit_memdev));
299     nfit_memdev->nfit_handle = cpu_to_le32(handle);
300 
301     /*
302      * associate memory device with System Physical Address Range
303      * Structure.
304      */
305     nfit_memdev->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
306     /* associate memory device with Control Region Structure. */
307     nfit_memdev->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
308 
309     /* The memory region on the device. */
310     nfit_memdev->region_len = cpu_to_le64(size);
311     /* The device address starts from 0. */
312     nfit_memdev->region_dpa = cpu_to_le64(0);
313 
314     /* Only one interleave for PMEM. */
315     nfit_memdev->interleave_ways = cpu_to_le16(1);
316 }
317 
318 /*
319  * ACPI 6.0: 5.2.25.5 NVDIMM Control Region Structure.
320  */
321 static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
322 {
323     NvdimmNfitControlRegion *nfit_dcr;
324     int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
325                                        NULL);
326     uint32_t sn = nvdimm_slot_to_sn(slot);
327 
328     nfit_dcr = acpi_data_push(structures, sizeof(*nfit_dcr));
329 
330     nfit_dcr->type = cpu_to_le16(4 /* NVDIMM Control Region Structure */);
331     nfit_dcr->length = cpu_to_le16(sizeof(*nfit_dcr));
332     nfit_dcr->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
333 
334     /* vendor: Intel. */
335     nfit_dcr->vendor_id = cpu_to_le16(0x8086);
336     nfit_dcr->device_id = cpu_to_le16(1);
337 
338     /* The _DSM method is following Intel's DSM specification. */
339     nfit_dcr->revision_id = cpu_to_le16(1 /* Current Revision supported
340                                              in ACPI 6.0 is 1. */);
341     nfit_dcr->serial_number = cpu_to_le32(sn);
342     nfit_dcr->fic = cpu_to_le16(0x201 /* Format Interface Code. See Chapter
343                                          2: NVDIMM Device Specific Method
344                                          (DSM) in DSM Spec Rev1.*/);
345 }
346 
347 static GArray *nvdimm_build_device_structure(void)
348 {
349     GSList *device_list = nvdimm_get_plugged_device_list();
350     GArray *structures = g_array_new(false, true /* clear */, 1);
351 
352     for (; device_list; device_list = device_list->next) {
353         DeviceState *dev = device_list->data;
354 
355         /* build System Physical Address Range Structure. */
356         nvdimm_build_structure_spa(structures, dev);
357 
358         /*
359          * build Memory Device to System Physical Address Range Mapping
360          * Structure.
361          */
362         nvdimm_build_structure_memdev(structures, dev);
363 
364         /* build NVDIMM Control Region Structure. */
365         nvdimm_build_structure_dcr(structures, dev);
366     }
367     g_slist_free(device_list);
368 
369     return structures;
370 }
371 
372 static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf)
373 {
374     fit_buf->fit = g_array_new(false, true /* clear */, 1);
375 }
376 
377 static void nvdimm_build_fit_buffer(NvdimmFitBuffer *fit_buf)
378 {
379     g_array_free(fit_buf->fit, true);
380     fit_buf->fit = nvdimm_build_device_structure();
381     fit_buf->dirty = true;
382 }
383 
384 void nvdimm_acpi_hotplug(AcpiNVDIMMState *state)
385 {
386     nvdimm_build_fit_buffer(&state->fit_buf);
387 }
388 
389 static void nvdimm_build_nfit(AcpiNVDIMMState *state, GArray *table_offsets,
390                               GArray *table_data, BIOSLinker *linker)
391 {
392     NvdimmFitBuffer *fit_buf = &state->fit_buf;
393     unsigned int header;
394 
395     acpi_add_table(table_offsets, table_data);
396 
397     /* NFIT header. */
398     header = table_data->len;
399     acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
400     /* NVDIMM device structures. */
401     g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len);
402 
403     build_header(linker, table_data,
404                  (void *)(table_data->data + header), "NFIT",
405                  sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, NULL, NULL);
406 }
407 
408 struct NvdimmDsmIn {
409     uint32_t handle;
410     uint32_t revision;
411     uint32_t function;
412     /* the remaining size in the page is used by arg3. */
413     union {
414         uint8_t arg3[4084];
415     };
416 } QEMU_PACKED;
417 typedef struct NvdimmDsmIn NvdimmDsmIn;
418 QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmIn) != 4096);
419 
420 struct NvdimmDsmOut {
421     /* the size of buffer filled by QEMU. */
422     uint32_t len;
423     uint8_t data[4092];
424 } QEMU_PACKED;
425 typedef struct NvdimmDsmOut NvdimmDsmOut;
426 QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmOut) != 4096);
427 
428 struct NvdimmDsmFunc0Out {
429     /* the size of buffer filled by QEMU. */
430      uint32_t len;
431      uint32_t supported_func;
432 } QEMU_PACKED;
433 typedef struct NvdimmDsmFunc0Out NvdimmDsmFunc0Out;
434 
435 struct NvdimmDsmFuncNoPayloadOut {
436     /* the size of buffer filled by QEMU. */
437      uint32_t len;
438      uint32_t func_ret_status;
439 } QEMU_PACKED;
440 typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut;
441 
442 struct NvdimmFuncGetLabelSizeOut {
443     /* the size of buffer filled by QEMU. */
444     uint32_t len;
445     uint32_t func_ret_status; /* return status code. */
446     uint32_t label_size; /* the size of label data area. */
447     /*
448      * Maximum size of the namespace label data length supported by
449      * the platform in Get/Set Namespace Label Data functions.
450      */
451     uint32_t max_xfer;
452 } QEMU_PACKED;
453 typedef struct NvdimmFuncGetLabelSizeOut NvdimmFuncGetLabelSizeOut;
454 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelSizeOut) > 4096);
455 
456 struct NvdimmFuncGetLabelDataIn {
457     uint32_t offset; /* the offset in the namespace label data area. */
458     uint32_t length; /* the size of data is to be read via the function. */
459 } QEMU_PACKED;
460 typedef struct NvdimmFuncGetLabelDataIn NvdimmFuncGetLabelDataIn;
461 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataIn) +
462                   offsetof(NvdimmDsmIn, arg3) > 4096);
463 
464 struct NvdimmFuncGetLabelDataOut {
465     /* the size of buffer filled by QEMU. */
466     uint32_t len;
467     uint32_t func_ret_status; /* return status code. */
468     uint8_t out_buf[0]; /* the data got via Get Namesapce Label function. */
469 } QEMU_PACKED;
470 typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut;
471 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > 4096);
472 
473 struct NvdimmFuncSetLabelDataIn {
474     uint32_t offset; /* the offset in the namespace label data area. */
475     uint32_t length; /* the size of data is to be written via the function. */
476     uint8_t in_buf[0]; /* the data written to label data area. */
477 } QEMU_PACKED;
478 typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn;
479 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) +
480                   offsetof(NvdimmDsmIn, arg3) > 4096);
481 
482 struct NvdimmFuncReadFITIn {
483     uint32_t offset; /* the offset of FIT buffer. */
484 } QEMU_PACKED;
485 typedef struct NvdimmFuncReadFITIn NvdimmFuncReadFITIn;
486 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITIn) +
487                   offsetof(NvdimmDsmIn, arg3) > 4096);
488 
489 struct NvdimmFuncReadFITOut {
490     /* the size of buffer filled by QEMU. */
491     uint32_t len;
492     uint32_t func_ret_status; /* return status code. */
493     uint8_t fit[0]; /* the FIT data. */
494 } QEMU_PACKED;
495 typedef struct NvdimmFuncReadFITOut NvdimmFuncReadFITOut;
496 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITOut) > 4096);
497 
498 static void
499 nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr)
500 {
501     NvdimmDsmFunc0Out func0 = {
502         .len = cpu_to_le32(sizeof(func0)),
503         .supported_func = cpu_to_le32(supported_func),
504     };
505     cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof(func0));
506 }
507 
508 static void
509 nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr)
510 {
511     NvdimmDsmFuncNoPayloadOut out = {
512         .len = cpu_to_le32(sizeof(out)),
513         .func_ret_status = cpu_to_le32(func_ret_status),
514     };
515     cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
516 }
517 
518 #define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000
519 
520 /* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */
521 static void nvdimm_dsm_func_read_fit(AcpiNVDIMMState *state, NvdimmDsmIn *in,
522                                      hwaddr dsm_mem_addr)
523 {
524     NvdimmFitBuffer *fit_buf = &state->fit_buf;
525     NvdimmFuncReadFITIn *read_fit;
526     NvdimmFuncReadFITOut *read_fit_out;
527     GArray *fit;
528     uint32_t read_len = 0, func_ret_status;
529     int size;
530 
531     read_fit = (NvdimmFuncReadFITIn *)in->arg3;
532     le32_to_cpus(&read_fit->offset);
533 
534     fit = fit_buf->fit;
535 
536     nvdimm_debug("Read FIT: offset %#x FIT size %#x Dirty %s.\n",
537                  read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No");
538 
539     if (read_fit->offset > fit->len) {
540         func_ret_status = 3 /* Invalid Input Parameters */;
541         goto exit;
542     }
543 
544     /* It is the first time to read FIT. */
545     if (!read_fit->offset) {
546         fit_buf->dirty = false;
547     } else if (fit_buf->dirty) { /* FIT has been changed during RFIT. */
548         func_ret_status = 0x100 /* fit changed */;
549         goto exit;
550     }
551 
552     func_ret_status = 0 /* Success */;
553     read_len = MIN(fit->len - read_fit->offset,
554                    4096 - sizeof(NvdimmFuncReadFITOut));
555 
556 exit:
557     size = sizeof(NvdimmFuncReadFITOut) + read_len;
558     read_fit_out = g_malloc(size);
559 
560     read_fit_out->len = cpu_to_le32(size);
561     read_fit_out->func_ret_status = cpu_to_le32(func_ret_status);
562     memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len);
563 
564     cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size);
565 
566     g_free(read_fit_out);
567 }
568 
569 static void nvdimm_dsm_reserved_root(AcpiNVDIMMState *state, NvdimmDsmIn *in,
570                                      hwaddr dsm_mem_addr)
571 {
572     switch (in->function) {
573     case 0x0:
574         nvdimm_dsm_function0(0x1 | 1 << 1 /* Read FIT */, dsm_mem_addr);
575         return;
576     case 0x1 /*Read FIT */:
577         nvdimm_dsm_func_read_fit(state, in, dsm_mem_addr);
578         return;
579     }
580 
581     nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
582 }
583 
584 static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
585 {
586     /*
587      * function 0 is called to inquire which functions are supported by
588      * OSPM
589      */
590     if (!in->function) {
591         nvdimm_dsm_function0(0 /* No function supported other than
592                                   function 0 */, dsm_mem_addr);
593         return;
594     }
595 
596     /* No function except function 0 is supported yet. */
597     nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
598 }
599 
600 /*
601  * the max transfer size is the max size transferred by both a
602  * 'Get Namespace Label Data' function and a 'Set Namespace Label Data'
603  * function.
604  */
605 static uint32_t nvdimm_get_max_xfer_label_size(void)
606 {
607     uint32_t max_get_size, max_set_size, dsm_memory_size = 4096;
608 
609     /*
610      * the max data ACPI can read one time which is transferred by
611      * the response of 'Get Namespace Label Data' function.
612      */
613     max_get_size = dsm_memory_size - sizeof(NvdimmFuncGetLabelDataOut);
614 
615     /*
616      * the max data ACPI can write one time which is transferred by
617      * 'Set Namespace Label Data' function.
618      */
619     max_set_size = dsm_memory_size - offsetof(NvdimmDsmIn, arg3) -
620                    sizeof(NvdimmFuncSetLabelDataIn);
621 
622     return MIN(max_get_size, max_set_size);
623 }
624 
625 /*
626  * DSM Spec Rev1 4.4 Get Namespace Label Size (Function Index 4).
627  *
628  * It gets the size of Namespace Label data area and the max data size
629  * that Get/Set Namespace Label Data functions can transfer.
630  */
631 static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr)
632 {
633     NvdimmFuncGetLabelSizeOut label_size_out = {
634         .len = cpu_to_le32(sizeof(label_size_out)),
635     };
636     uint32_t label_size, mxfer;
637 
638     label_size = nvdimm->label_size;
639     mxfer = nvdimm_get_max_xfer_label_size();
640 
641     nvdimm_debug("label_size %#x, max_xfer %#x.\n", label_size, mxfer);
642 
643     label_size_out.func_ret_status = cpu_to_le32(0 /* Success */);
644     label_size_out.label_size = cpu_to_le32(label_size);
645     label_size_out.max_xfer = cpu_to_le32(mxfer);
646 
647     cpu_physical_memory_write(dsm_mem_addr, &label_size_out,
648                               sizeof(label_size_out));
649 }
650 
651 static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm,
652                                            uint32_t offset, uint32_t length)
653 {
654     uint32_t ret = 3 /* Invalid Input Parameters */;
655 
656     if (offset + length < offset) {
657         nvdimm_debug("offset %#x + length %#x is overflow.\n", offset,
658                      length);
659         return ret;
660     }
661 
662     if (nvdimm->label_size < offset + length) {
663         nvdimm_debug("position %#x is beyond label data (len = %" PRIx64 ").\n",
664                      offset + length, nvdimm->label_size);
665         return ret;
666     }
667 
668     if (length > nvdimm_get_max_xfer_label_size()) {
669         nvdimm_debug("length (%#x) is larger than max_xfer (%#x).\n",
670                      length, nvdimm_get_max_xfer_label_size());
671         return ret;
672     }
673 
674     return 0 /* Success */;
675 }
676 
677 /*
678  * DSM Spec Rev1 4.5 Get Namespace Label Data (Function Index 5).
679  */
680 static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
681                                       hwaddr dsm_mem_addr)
682 {
683     NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
684     NvdimmFuncGetLabelDataIn *get_label_data;
685     NvdimmFuncGetLabelDataOut *get_label_data_out;
686     uint32_t status;
687     int size;
688 
689     get_label_data = (NvdimmFuncGetLabelDataIn *)in->arg3;
690     le32_to_cpus(&get_label_data->offset);
691     le32_to_cpus(&get_label_data->length);
692 
693     nvdimm_debug("Read Label Data: offset %#x length %#x.\n",
694                  get_label_data->offset, get_label_data->length);
695 
696     status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset,
697                                         get_label_data->length);
698     if (status != 0 /* Success */) {
699         nvdimm_dsm_no_payload(status, dsm_mem_addr);
700         return;
701     }
702 
703     size = sizeof(*get_label_data_out) + get_label_data->length;
704     assert(size <= 4096);
705     get_label_data_out = g_malloc(size);
706 
707     get_label_data_out->len = cpu_to_le32(size);
708     get_label_data_out->func_ret_status = cpu_to_le32(0 /* Success */);
709     nvc->read_label_data(nvdimm, get_label_data_out->out_buf,
710                          get_label_data->length, get_label_data->offset);
711 
712     cpu_physical_memory_write(dsm_mem_addr, get_label_data_out, size);
713     g_free(get_label_data_out);
714 }
715 
716 /*
717  * DSM Spec Rev1 4.6 Set Namespace Label Data (Function Index 6).
718  */
719 static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
720                                       hwaddr dsm_mem_addr)
721 {
722     NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
723     NvdimmFuncSetLabelDataIn *set_label_data;
724     uint32_t status;
725 
726     set_label_data = (NvdimmFuncSetLabelDataIn *)in->arg3;
727 
728     le32_to_cpus(&set_label_data->offset);
729     le32_to_cpus(&set_label_data->length);
730 
731     nvdimm_debug("Write Label Data: offset %#x length %#x.\n",
732                  set_label_data->offset, set_label_data->length);
733 
734     status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset,
735                                         set_label_data->length);
736     if (status != 0 /* Success */) {
737         nvdimm_dsm_no_payload(status, dsm_mem_addr);
738         return;
739     }
740 
741     assert(offsetof(NvdimmDsmIn, arg3) +
742            sizeof(*set_label_data) + set_label_data->length <= 4096);
743 
744     nvc->write_label_data(nvdimm, set_label_data->in_buf,
745                           set_label_data->length, set_label_data->offset);
746     nvdimm_dsm_no_payload(0 /* Success */, dsm_mem_addr);
747 }
748 
749 static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
750 {
751     NVDIMMDevice *nvdimm = nvdimm_get_device_by_handle(in->handle);
752 
753     /* See the comments in nvdimm_dsm_root(). */
754     if (!in->function) {
755         uint32_t supported_func = 0;
756 
757         if (nvdimm && nvdimm->label_size) {
758             supported_func |= 0x1 /* Bit 0 indicates whether there is
759                                      support for any functions other
760                                      than function 0. */ |
761                               1 << 4 /* Get Namespace Label Size */ |
762                               1 << 5 /* Get Namespace Label Data */ |
763                               1 << 6 /* Set Namespace Label Data */;
764         }
765         nvdimm_dsm_function0(supported_func, dsm_mem_addr);
766         return;
767     }
768 
769     if (!nvdimm) {
770         nvdimm_dsm_no_payload(2 /* Non-Existing Memory Device */,
771                               dsm_mem_addr);
772         return;
773     }
774 
775     /* Encode DSM function according to DSM Spec Rev1. */
776     switch (in->function) {
777     case 4 /* Get Namespace Label Size */:
778         if (nvdimm->label_size) {
779             nvdimm_dsm_label_size(nvdimm, dsm_mem_addr);
780             return;
781         }
782         break;
783     case 5 /* Get Namespace Label Data */:
784         if (nvdimm->label_size) {
785             nvdimm_dsm_get_label_data(nvdimm, in, dsm_mem_addr);
786             return;
787         }
788         break;
789     case 0x6 /* Set Namespace Label Data */:
790         if (nvdimm->label_size) {
791             nvdimm_dsm_set_label_data(nvdimm, in, dsm_mem_addr);
792             return;
793         }
794         break;
795     }
796 
797     nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
798 }
799 
800 static uint64_t
801 nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
802 {
803     nvdimm_debug("BUG: we never read _DSM IO Port.\n");
804     return 0;
805 }
806 
807 static void
808 nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
809 {
810     AcpiNVDIMMState *state = opaque;
811     NvdimmDsmIn *in;
812     hwaddr dsm_mem_addr = val;
813 
814     nvdimm_debug("dsm memory address %#" HWADDR_PRIx ".\n", dsm_mem_addr);
815 
816     /*
817      * The DSM memory is mapped to guest address space so an evil guest
818      * can change its content while we are doing DSM emulation. Avoid
819      * this by copying DSM memory to QEMU local memory.
820      */
821     in = g_new(NvdimmDsmIn, 1);
822     cpu_physical_memory_read(dsm_mem_addr, in, sizeof(*in));
823 
824     le32_to_cpus(&in->revision);
825     le32_to_cpus(&in->function);
826     le32_to_cpus(&in->handle);
827 
828     nvdimm_debug("Revision %#x Handler %#x Function %#x.\n", in->revision,
829                  in->handle, in->function);
830 
831     if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) {
832         nvdimm_debug("Revision %#x is not supported, expect %#x.\n",
833                      in->revision, 0x1);
834         nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
835         goto exit;
836     }
837 
838     if (in->handle == NVDIMM_QEMU_RSVD_HANDLE_ROOT) {
839         nvdimm_dsm_reserved_root(state, in, dsm_mem_addr);
840         goto exit;
841     }
842 
843      /* Handle 0 is reserved for NVDIMM Root Device. */
844     if (!in->handle) {
845         nvdimm_dsm_root(in, dsm_mem_addr);
846         goto exit;
847     }
848 
849     nvdimm_dsm_device(in, dsm_mem_addr);
850 
851 exit:
852     g_free(in);
853 }
854 
855 static const MemoryRegionOps nvdimm_dsm_ops = {
856     .read = nvdimm_dsm_read,
857     .write = nvdimm_dsm_write,
858     .endianness = DEVICE_LITTLE_ENDIAN,
859     .valid = {
860         .min_access_size = 4,
861         .max_access_size = 4,
862     },
863 };
864 
865 void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev)
866 {
867     if (dev->hotplugged) {
868         acpi_send_event(DEVICE(hotplug_dev), ACPI_NVDIMM_HOTPLUG_STATUS);
869     }
870 }
871 
872 void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
873                             FWCfgState *fw_cfg, Object *owner)
874 {
875     memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state,
876                           "nvdimm-acpi-io", NVDIMM_ACPI_IO_LEN);
877     memory_region_add_subregion(io, NVDIMM_ACPI_IO_BASE, &state->io_mr);
878 
879     state->dsm_mem = g_array_new(false, true /* clear */, 1);
880     acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn));
881     fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
882                     state->dsm_mem->len);
883 
884     nvdimm_init_fit_buffer(&state->fit_buf);
885 }
886 
887 #define NVDIMM_COMMON_DSM       "NCAL"
888 #define NVDIMM_ACPI_MEM_ADDR    "MEMA"
889 
890 #define NVDIMM_DSM_MEMORY       "NRAM"
891 #define NVDIMM_DSM_IOPORT       "NPIO"
892 
893 #define NVDIMM_DSM_NOTIFY       "NTFI"
894 #define NVDIMM_DSM_HANDLE       "HDLE"
895 #define NVDIMM_DSM_REVISION     "REVS"
896 #define NVDIMM_DSM_FUNCTION     "FUNC"
897 #define NVDIMM_DSM_ARG3         "FARG"
898 
899 #define NVDIMM_DSM_OUT_BUF_SIZE "RLEN"
900 #define NVDIMM_DSM_OUT_BUF      "ODAT"
901 
902 #define NVDIMM_DSM_RFIT_STATUS  "RSTA"
903 
904 #define NVDIMM_QEMU_RSVD_UUID   "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62"
905 
906 static void nvdimm_build_common_dsm(Aml *dev)
907 {
908     Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *elsectx2;
909     Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid;
910     Aml *pckg, *pckg_index, *pckg_buf, *field, *dsm_out_buf, *dsm_out_buf_size;
911     uint8_t byte_list[1];
912 
913     method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED);
914     uuid = aml_arg(0);
915     function = aml_arg(2);
916     handle = aml_arg(4);
917     dsm_mem = aml_local(6);
918     dsm_out_buf = aml_local(7);
919 
920     aml_append(method, aml_store(aml_name(NVDIMM_ACPI_MEM_ADDR), dsm_mem));
921 
922     /* map DSM memory and IO into ACPI namespace. */
923     aml_append(method, aml_operation_region(NVDIMM_DSM_IOPORT, AML_SYSTEM_IO,
924                aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
925     aml_append(method, aml_operation_region(NVDIMM_DSM_MEMORY,
926                AML_SYSTEM_MEMORY, dsm_mem, sizeof(NvdimmDsmIn)));
927 
928     /*
929      * DSM notifier:
930      * NVDIMM_DSM_NOTIFY: write the address of DSM memory and notify QEMU to
931      *                    emulate the access.
932      *
933      * It is the IO port so that accessing them will cause VM-exit, the
934      * control will be transferred to QEMU.
935      */
936     field = aml_field(NVDIMM_DSM_IOPORT, AML_DWORD_ACC, AML_NOLOCK,
937                       AML_PRESERVE);
938     aml_append(field, aml_named_field(NVDIMM_DSM_NOTIFY,
939                sizeof(uint32_t) * BITS_PER_BYTE));
940     aml_append(method, field);
941 
942     /*
943      * DSM input:
944      * NVDIMM_DSM_HANDLE: store device's handle, it's zero if the _DSM call
945      *                    happens on NVDIMM Root Device.
946      * NVDIMM_DSM_REVISION: store the Arg1 of _DSM call.
947      * NVDIMM_DSM_FUNCTION: store the Arg2 of _DSM call.
948      * NVDIMM_DSM_ARG3: store the Arg3 of _DSM call which is a Package
949      *                  containing function-specific arguments.
950      *
951      * They are RAM mapping on host so that these accesses never cause
952      * VM-EXIT.
953      */
954     field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
955                       AML_PRESERVE);
956     aml_append(field, aml_named_field(NVDIMM_DSM_HANDLE,
957                sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
958     aml_append(field, aml_named_field(NVDIMM_DSM_REVISION,
959                sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
960     aml_append(field, aml_named_field(NVDIMM_DSM_FUNCTION,
961                sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
962     aml_append(field, aml_named_field(NVDIMM_DSM_ARG3,
963          (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE));
964     aml_append(method, field);
965 
966     /*
967      * DSM output:
968      * NVDIMM_DSM_OUT_BUF_SIZE: the size of the buffer filled by QEMU.
969      * NVDIMM_DSM_OUT_BUF: the buffer QEMU uses to store the result.
970      *
971      * Since the page is reused by both input and out, the input data
972      * will be lost after storing new result into ODAT so we should fetch
973      * all the input data before writing the result.
974      */
975     field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
976                       AML_PRESERVE);
977     aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF_SIZE,
978                sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
979     aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF,
980        (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE));
981     aml_append(method, field);
982 
983     /*
984      * do not support any method if DSM memory address has not been
985      * patched.
986      */
987     unpatched = aml_equal(dsm_mem, aml_int(0x0));
988 
989     expected_uuid = aml_local(0);
990 
991     ifctx = aml_if(aml_equal(handle, aml_int(0x0)));
992     aml_append(ifctx, aml_store(
993                aml_touuid("2F10E7A4-9E91-11E4-89D3-123B93F75CBA")
994                /* UUID for NVDIMM Root Device */, expected_uuid));
995     aml_append(method, ifctx);
996     elsectx = aml_else();
997     ifctx = aml_if(aml_equal(handle, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT)));
998     aml_append(ifctx, aml_store(aml_touuid(NVDIMM_QEMU_RSVD_UUID
999                /* UUID for QEMU internal use */), expected_uuid));
1000     aml_append(elsectx, ifctx);
1001     elsectx2 = aml_else();
1002     aml_append(elsectx2, aml_store(
1003                aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66")
1004                /* UUID for NVDIMM Devices */, expected_uuid));
1005     aml_append(elsectx, elsectx2);
1006     aml_append(method, elsectx);
1007 
1008     uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid));
1009 
1010     unsupport = aml_if(aml_or(unpatched, uuid_invalid, NULL));
1011 
1012     /*
1013      * function 0 is called to inquire what functions are supported by
1014      * OSPM
1015      */
1016     ifctx = aml_if(aml_equal(function, aml_int(0)));
1017     byte_list[0] = 0 /* No function Supported */;
1018     aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
1019     aml_append(unsupport, ifctx);
1020 
1021     /* No function is supported yet. */
1022     byte_list[0] = 1 /* Not Supported */;
1023     aml_append(unsupport, aml_return(aml_buffer(1, byte_list)));
1024     aml_append(method, unsupport);
1025 
1026     /*
1027      * The HDLE indicates the DSM function is issued from which device,
1028      * it reserves 0 for root device and is the handle for NVDIMM devices.
1029      * See the comments in nvdimm_slot_to_handle().
1030      */
1031     aml_append(method, aml_store(handle, aml_name(NVDIMM_DSM_HANDLE)));
1032     aml_append(method, aml_store(aml_arg(1), aml_name(NVDIMM_DSM_REVISION)));
1033     aml_append(method, aml_store(aml_arg(2), aml_name(NVDIMM_DSM_FUNCTION)));
1034 
1035     /*
1036      * The fourth parameter (Arg3) of _DSM is a package which contains
1037      * a buffer, the layout of the buffer is specified by UUID (Arg0),
1038      * Revision ID (Arg1) and Function Index (Arg2) which are documented
1039      * in the DSM Spec.
1040      */
1041     pckg = aml_arg(3);
1042     ifctx = aml_if(aml_and(aml_equal(aml_object_type(pckg),
1043                    aml_int(4 /* Package */)) /* It is a Package? */,
1044                    aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */,
1045                    NULL));
1046 
1047     pckg_index = aml_local(2);
1048     pckg_buf = aml_local(3);
1049     aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index));
1050     aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf));
1051     aml_append(ifctx, aml_store(pckg_buf, aml_name(NVDIMM_DSM_ARG3)));
1052     aml_append(method, ifctx);
1053 
1054     /*
1055      * tell QEMU about the real address of DSM memory, then QEMU
1056      * gets the control and fills the result in DSM memory.
1057      */
1058     aml_append(method, aml_store(dsm_mem, aml_name(NVDIMM_DSM_NOTIFY)));
1059 
1060     dsm_out_buf_size = aml_local(1);
1061     /* RLEN is not included in the payload returned to guest. */
1062     aml_append(method, aml_subtract(aml_name(NVDIMM_DSM_OUT_BUF_SIZE),
1063                aml_int(4), dsm_out_buf_size));
1064     aml_append(method, aml_store(aml_shiftleft(dsm_out_buf_size, aml_int(3)),
1065                                  dsm_out_buf_size));
1066     aml_append(method, aml_create_field(aml_name(NVDIMM_DSM_OUT_BUF),
1067                aml_int(0), dsm_out_buf_size, "OBUF"));
1068     aml_append(method, aml_concatenate(aml_buffer(0, NULL), aml_name("OBUF"),
1069                                        dsm_out_buf));
1070     aml_append(method, aml_return(dsm_out_buf));
1071     aml_append(dev, method);
1072 }
1073 
1074 static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle)
1075 {
1076     Aml *method;
1077 
1078     method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
1079     aml_append(method, aml_return(aml_call5(NVDIMM_COMMON_DSM, aml_arg(0),
1080                                   aml_arg(1), aml_arg(2), aml_arg(3),
1081                                   aml_int(handle))));
1082     aml_append(dev, method);
1083 }
1084 
1085 static void nvdimm_build_fit(Aml *dev)
1086 {
1087     Aml *method, *pkg, *buf, *buf_size, *offset, *call_result;
1088     Aml *whilectx, *ifcond, *ifctx, *elsectx, *fit;
1089 
1090     buf = aml_local(0);
1091     buf_size = aml_local(1);
1092     fit = aml_local(2);
1093 
1094     aml_append(dev, aml_create_dword_field(aml_buffer(4, NULL),
1095                aml_int(0), NVDIMM_DSM_RFIT_STATUS));
1096 
1097     /* build helper function, RFIT. */
1098     method = aml_method("RFIT", 1, AML_SERIALIZED);
1099     aml_append(method, aml_create_dword_field(aml_buffer(4, NULL),
1100                                               aml_int(0), "OFST"));
1101 
1102     /* prepare input package. */
1103     pkg = aml_package(1);
1104     aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
1105     aml_append(pkg, aml_name("OFST"));
1106 
1107     /* call Read_FIT function. */
1108     call_result = aml_call5(NVDIMM_COMMON_DSM,
1109                             aml_touuid(NVDIMM_QEMU_RSVD_UUID),
1110                             aml_int(1) /* Revision 1 */,
1111                             aml_int(0x1) /* Read FIT */,
1112                             pkg, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT));
1113     aml_append(method, aml_store(call_result, buf));
1114 
1115     /* handle _DSM result. */
1116     aml_append(method, aml_create_dword_field(buf,
1117                aml_int(0) /* offset at byte 0 */, "STAU"));
1118 
1119     aml_append(method, aml_store(aml_name("STAU"),
1120                                  aml_name(NVDIMM_DSM_RFIT_STATUS)));
1121 
1122      /* if something is wrong during _DSM. */
1123     ifcond = aml_equal(aml_int(0 /* Success */), aml_name("STAU"));
1124     ifctx = aml_if(aml_lnot(ifcond));
1125     aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
1126     aml_append(method, ifctx);
1127 
1128     aml_append(method, aml_store(aml_sizeof(buf), buf_size));
1129     aml_append(method, aml_subtract(buf_size,
1130                                     aml_int(4) /* the size of "STAU" */,
1131                                     buf_size));
1132 
1133     /* if we read the end of fit. */
1134     ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
1135     aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
1136     aml_append(method, ifctx);
1137 
1138     aml_append(method, aml_store(aml_shiftleft(buf_size, aml_int(3)),
1139                                  buf_size));
1140     aml_append(method, aml_create_field(buf,
1141                             aml_int(4 * BITS_PER_BYTE), /* offset at byte 4.*/
1142                             buf_size, "BUFF"));
1143     aml_append(method, aml_return(aml_name("BUFF")));
1144     aml_append(dev, method);
1145 
1146     /* build _FIT. */
1147     method = aml_method("_FIT", 0, AML_SERIALIZED);
1148     offset = aml_local(3);
1149 
1150     aml_append(method, aml_store(aml_buffer(0, NULL), fit));
1151     aml_append(method, aml_store(aml_int(0), offset));
1152 
1153     whilectx = aml_while(aml_int(1));
1154     aml_append(whilectx, aml_store(aml_call1("RFIT", offset), buf));
1155     aml_append(whilectx, aml_store(aml_sizeof(buf), buf_size));
1156 
1157     /*
1158      * if fit buffer was changed during RFIT, read from the beginning
1159      * again.
1160      */
1161     ifctx = aml_if(aml_equal(aml_name(NVDIMM_DSM_RFIT_STATUS),
1162                              aml_int(0x100 /* fit changed */)));
1163     aml_append(ifctx, aml_store(aml_buffer(0, NULL), fit));
1164     aml_append(ifctx, aml_store(aml_int(0), offset));
1165     aml_append(whilectx, ifctx);
1166 
1167     elsectx = aml_else();
1168 
1169     /* finish fit read if no data is read out. */
1170     ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
1171     aml_append(ifctx, aml_return(fit));
1172     aml_append(elsectx, ifctx);
1173 
1174     /* update the offset. */
1175     aml_append(elsectx, aml_add(offset, buf_size, offset));
1176     /* append the data we read out to the fit buffer. */
1177     aml_append(elsectx, aml_concatenate(fit, buf, fit));
1178     aml_append(whilectx, elsectx);
1179     aml_append(method, whilectx);
1180 
1181     aml_append(dev, method);
1182 }
1183 
1184 static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots)
1185 {
1186     uint32_t slot;
1187 
1188     for (slot = 0; slot < ram_slots; slot++) {
1189         uint32_t handle = nvdimm_slot_to_handle(slot);
1190         Aml *nvdimm_dev;
1191 
1192         nvdimm_dev = aml_device("NV%02X", slot);
1193 
1194         /*
1195          * ACPI 6.0: 9.20 NVDIMM Devices:
1196          *
1197          * _ADR object that is used to supply OSPM with unique address
1198          * of the NVDIMM device. This is done by returning the NFIT Device
1199          * handle that is used to identify the associated entries in ACPI
1200          * table NFIT or _FIT.
1201          */
1202         aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle)));
1203 
1204         nvdimm_build_device_dsm(nvdimm_dev, handle);
1205         aml_append(root_dev, nvdimm_dev);
1206     }
1207 }
1208 
1209 static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data,
1210                               BIOSLinker *linker, GArray *dsm_dma_arrea,
1211                               uint32_t ram_slots)
1212 {
1213     Aml *ssdt, *sb_scope, *dev;
1214     int mem_addr_offset, nvdimm_ssdt;
1215 
1216     acpi_add_table(table_offsets, table_data);
1217 
1218     ssdt = init_aml_allocator();
1219     acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader));
1220 
1221     sb_scope = aml_scope("\\_SB");
1222 
1223     dev = aml_device("NVDR");
1224 
1225     /*
1226      * ACPI 6.0: 9.20 NVDIMM Devices:
1227      *
1228      * The ACPI Name Space device uses _HID of ACPI0012 to identify the root
1229      * NVDIMM interface device. Platform firmware is required to contain one
1230      * such device in _SB scope if NVDIMMs support is exposed by platform to
1231      * OSPM.
1232      * For each NVDIMM present or intended to be supported by platform,
1233      * platform firmware also exposes an ACPI Namespace Device under the
1234      * root device.
1235      */
1236     aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
1237 
1238     nvdimm_build_common_dsm(dev);
1239 
1240     /* 0 is reserved for root device. */
1241     nvdimm_build_device_dsm(dev, 0);
1242     nvdimm_build_fit(dev);
1243 
1244     nvdimm_build_nvdimm_devices(dev, ram_slots);
1245 
1246     aml_append(sb_scope, dev);
1247     aml_append(ssdt, sb_scope);
1248 
1249     nvdimm_ssdt = table_data->len;
1250 
1251     /* copy AML table into ACPI tables blob and patch header there */
1252     g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
1253     mem_addr_offset = build_append_named_dword(table_data,
1254                                                NVDIMM_ACPI_MEM_ADDR);
1255 
1256     bios_linker_loader_alloc(linker,
1257                              NVDIMM_DSM_MEM_FILE, dsm_dma_arrea,
1258                              sizeof(NvdimmDsmIn), false /* high memory */);
1259     bios_linker_loader_add_pointer(linker,
1260         ACPI_BUILD_TABLE_FILE, mem_addr_offset, sizeof(uint32_t),
1261         NVDIMM_DSM_MEM_FILE, 0);
1262     build_header(linker, table_data,
1263         (void *)(table_data->data + nvdimm_ssdt),
1264         "SSDT", table_data->len - nvdimm_ssdt, 1, NULL, "NVDIMM");
1265     free_aml_allocator();
1266 }
1267 
1268 void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
1269                        BIOSLinker *linker, AcpiNVDIMMState *state,
1270                        uint32_t ram_slots)
1271 {
1272     GSList *device_list;
1273 
1274     /* no nvdimm device can be plugged. */
1275     if (!ram_slots) {
1276         return;
1277     }
1278 
1279     nvdimm_build_ssdt(table_offsets, table_data, linker, state->dsm_mem,
1280                       ram_slots);
1281 
1282     device_list = nvdimm_get_plugged_device_list();
1283     /* no NVDIMM device is plugged. */
1284     if (!device_list) {
1285         return;
1286     }
1287 
1288     nvdimm_build_nfit(state, table_offsets, table_data, linker);
1289     g_slist_free(device_list);
1290 }
1291