1 /* 2 * NVDIMM ACPI Implementation 3 * 4 * Copyright(C) 2015 Intel Corporation. 5 * 6 * Author: 7 * Xiao Guangrong <guangrong.xiao@linux.intel.com> 8 * 9 * NFIT is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT) 10 * and the DSM specification can be found at: 11 * http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf 12 * 13 * Currently, it only supports PMEM Virtualization. 14 * 15 * This library is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU Lesser General Public 17 * License as published by the Free Software Foundation; either 18 * version 2.1 of the License, or (at your option) any later version. 19 * 20 * This library is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 23 * Lesser General Public License for more details. 24 * 25 * You should have received a copy of the GNU Lesser General Public 26 * License along with this library; if not, see <http://www.gnu.org/licenses/> 27 */ 28 29 #include "qemu/osdep.h" 30 #include "qemu/uuid.h" 31 #include "qapi/error.h" 32 #include "hw/acpi/acpi.h" 33 #include "hw/acpi/aml-build.h" 34 #include "hw/acpi/bios-linker-loader.h" 35 #include "hw/nvram/fw_cfg.h" 36 #include "hw/mem/nvdimm.h" 37 #include "qemu/nvdimm-utils.h" 38 #include "trace.h" 39 40 /* 41 * define Byte Addressable Persistent Memory (PM) Region according to 42 * ACPI 6.0: 5.2.25.1 System Physical Address Range Structure. 43 */ 44 static const uint8_t nvdimm_nfit_spa_uuid[] = 45 UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33, 46 0x18, 0xb7, 0x8c, 0xdb); 47 48 /* 49 * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware 50 * Interface Table (NFIT). 51 */ 52 53 /* 54 * System Physical Address Range Structure 55 * 56 * It describes the system physical address ranges occupied by NVDIMMs and 57 * the types of the regions. 58 */ 59 struct NvdimmNfitSpa { 60 uint16_t type; 61 uint16_t length; 62 uint16_t spa_index; 63 uint16_t flags; 64 uint32_t reserved; 65 uint32_t proximity_domain; 66 uint8_t type_guid[16]; 67 uint64_t spa_base; 68 uint64_t spa_length; 69 uint64_t mem_attr; 70 } QEMU_PACKED; 71 typedef struct NvdimmNfitSpa NvdimmNfitSpa; 72 73 /* 74 * Memory Device to System Physical Address Range Mapping Structure 75 * 76 * It enables identifying each NVDIMM region and the corresponding SPA 77 * describing the memory interleave 78 */ 79 struct NvdimmNfitMemDev { 80 uint16_t type; 81 uint16_t length; 82 uint32_t nfit_handle; 83 uint16_t phys_id; 84 uint16_t region_id; 85 uint16_t spa_index; 86 uint16_t dcr_index; 87 uint64_t region_len; 88 uint64_t region_offset; 89 uint64_t region_dpa; 90 uint16_t interleave_index; 91 uint16_t interleave_ways; 92 uint16_t flags; 93 uint16_t reserved; 94 } QEMU_PACKED; 95 typedef struct NvdimmNfitMemDev NvdimmNfitMemDev; 96 97 #define ACPI_NFIT_MEM_NOT_ARMED (1 << 3) 98 99 /* 100 * NVDIMM Control Region Structure 101 * 102 * It describes the NVDIMM and if applicable, Block Control Window. 103 */ 104 struct NvdimmNfitControlRegion { 105 uint16_t type; 106 uint16_t length; 107 uint16_t dcr_index; 108 uint16_t vendor_id; 109 uint16_t device_id; 110 uint16_t revision_id; 111 uint16_t sub_vendor_id; 112 uint16_t sub_device_id; 113 uint16_t sub_revision_id; 114 uint8_t reserved[6]; 115 uint32_t serial_number; 116 uint16_t fic; 117 uint16_t num_bcw; 118 uint64_t bcw_size; 119 uint64_t cmd_offset; 120 uint64_t cmd_size; 121 uint64_t status_offset; 122 uint64_t status_size; 123 uint16_t flags; 124 uint8_t reserved2[6]; 125 } QEMU_PACKED; 126 typedef struct NvdimmNfitControlRegion NvdimmNfitControlRegion; 127 128 /* 129 * NVDIMM Platform Capabilities Structure 130 * 131 * Defined in section 5.2.25.9 of ACPI 6.2 Errata A, September 2017 132 */ 133 struct NvdimmNfitPlatformCaps { 134 uint16_t type; 135 uint16_t length; 136 uint8_t highest_cap; 137 uint8_t reserved[3]; 138 uint32_t capabilities; 139 uint8_t reserved2[4]; 140 } QEMU_PACKED; 141 typedef struct NvdimmNfitPlatformCaps NvdimmNfitPlatformCaps; 142 143 /* 144 * Module serial number is a unique number for each device. We use the 145 * slot id of NVDIMM device to generate this number so that each device 146 * associates with a different number. 147 * 148 * 0x123456 is a magic number we arbitrarily chose. 149 */ 150 static uint32_t nvdimm_slot_to_sn(int slot) 151 { 152 return 0x123456 + slot; 153 } 154 155 /* 156 * handle is used to uniquely associate nfit_memdev structure with NVDIMM 157 * ACPI device - nfit_memdev.nfit_handle matches with the value returned 158 * by ACPI device _ADR method. 159 * 160 * We generate the handle with the slot id of NVDIMM device and reserve 161 * 0 for NVDIMM root device. 162 */ 163 static uint32_t nvdimm_slot_to_handle(int slot) 164 { 165 return slot + 1; 166 } 167 168 /* 169 * index uniquely identifies the structure, 0 is reserved which indicates 170 * that the structure is not valid or the associated structure is not 171 * present. 172 * 173 * Each NVDIMM device needs two indexes, one for nfit_spa and another for 174 * nfit_dc which are generated by the slot id of NVDIMM device. 175 */ 176 static uint16_t nvdimm_slot_to_spa_index(int slot) 177 { 178 return (slot + 1) << 1; 179 } 180 181 /* See the comments of nvdimm_slot_to_spa_index(). */ 182 static uint32_t nvdimm_slot_to_dcr_index(int slot) 183 { 184 return nvdimm_slot_to_spa_index(slot) + 1; 185 } 186 187 static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle) 188 { 189 NVDIMMDevice *nvdimm = NULL; 190 GSList *list, *device_list = nvdimm_get_device_list(); 191 192 for (list = device_list; list; list = list->next) { 193 NVDIMMDevice *nvd = list->data; 194 int slot = object_property_get_int(OBJECT(nvd), PC_DIMM_SLOT_PROP, 195 NULL); 196 197 if (nvdimm_slot_to_handle(slot) == handle) { 198 nvdimm = nvd; 199 break; 200 } 201 } 202 203 g_slist_free(device_list); 204 return nvdimm; 205 } 206 207 /* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */ 208 static void 209 nvdimm_build_structure_spa(GArray *structures, DeviceState *dev) 210 { 211 NvdimmNfitSpa *nfit_spa; 212 uint64_t addr = object_property_get_uint(OBJECT(dev), PC_DIMM_ADDR_PROP, 213 NULL); 214 uint64_t size = object_property_get_uint(OBJECT(dev), PC_DIMM_SIZE_PROP, 215 NULL); 216 uint32_t node = object_property_get_uint(OBJECT(dev), PC_DIMM_NODE_PROP, 217 NULL); 218 int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, 219 NULL); 220 221 nfit_spa = acpi_data_push(structures, sizeof(*nfit_spa)); 222 223 nfit_spa->type = cpu_to_le16(0 /* System Physical Address Range 224 Structure */); 225 nfit_spa->length = cpu_to_le16(sizeof(*nfit_spa)); 226 nfit_spa->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot)); 227 228 /* 229 * Control region is strict as all the device info, such as SN, index, 230 * is associated with slot id. 231 */ 232 nfit_spa->flags = cpu_to_le16(1 /* Control region is strictly for 233 management during hot add/online 234 operation */ | 235 2 /* Data in Proximity Domain field is 236 valid*/); 237 238 /* NUMA node. */ 239 nfit_spa->proximity_domain = cpu_to_le32(node); 240 /* the region reported as PMEM. */ 241 memcpy(nfit_spa->type_guid, nvdimm_nfit_spa_uuid, 242 sizeof(nvdimm_nfit_spa_uuid)); 243 244 nfit_spa->spa_base = cpu_to_le64(addr); 245 nfit_spa->spa_length = cpu_to_le64(size); 246 247 /* It is the PMEM and can be cached as writeback. */ 248 nfit_spa->mem_attr = cpu_to_le64(0x8ULL /* EFI_MEMORY_WB */ | 249 0x8000ULL /* EFI_MEMORY_NV */); 250 } 251 252 /* 253 * ACPI 6.0: 5.2.25.2 Memory Device to System Physical Address Range Mapping 254 * Structure 255 */ 256 static void 257 nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev) 258 { 259 NvdimmNfitMemDev *nfit_memdev; 260 NVDIMMDevice *nvdimm = NVDIMM(OBJECT(dev)); 261 uint64_t size = object_property_get_uint(OBJECT(dev), PC_DIMM_SIZE_PROP, 262 NULL); 263 int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, 264 NULL); 265 uint32_t handle = nvdimm_slot_to_handle(slot); 266 267 nfit_memdev = acpi_data_push(structures, sizeof(*nfit_memdev)); 268 269 nfit_memdev->type = cpu_to_le16(1 /* Memory Device to System Address 270 Range Map Structure*/); 271 nfit_memdev->length = cpu_to_le16(sizeof(*nfit_memdev)); 272 nfit_memdev->nfit_handle = cpu_to_le32(handle); 273 274 /* 275 * associate memory device with System Physical Address Range 276 * Structure. 277 */ 278 nfit_memdev->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot)); 279 /* associate memory device with Control Region Structure. */ 280 nfit_memdev->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot)); 281 282 /* The memory region on the device. */ 283 nfit_memdev->region_len = cpu_to_le64(size); 284 /* The device address starts from 0. */ 285 nfit_memdev->region_dpa = cpu_to_le64(0); 286 287 /* Only one interleave for PMEM. */ 288 nfit_memdev->interleave_ways = cpu_to_le16(1); 289 290 if (nvdimm->unarmed) { 291 nfit_memdev->flags |= cpu_to_le16(ACPI_NFIT_MEM_NOT_ARMED); 292 } 293 } 294 295 /* 296 * ACPI 6.0: 5.2.25.5 NVDIMM Control Region Structure. 297 */ 298 static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev) 299 { 300 NvdimmNfitControlRegion *nfit_dcr; 301 int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, 302 NULL); 303 uint32_t sn = nvdimm_slot_to_sn(slot); 304 305 nfit_dcr = acpi_data_push(structures, sizeof(*nfit_dcr)); 306 307 nfit_dcr->type = cpu_to_le16(4 /* NVDIMM Control Region Structure */); 308 nfit_dcr->length = cpu_to_le16(sizeof(*nfit_dcr)); 309 nfit_dcr->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot)); 310 311 /* vendor: Intel. */ 312 nfit_dcr->vendor_id = cpu_to_le16(0x8086); 313 nfit_dcr->device_id = cpu_to_le16(1); 314 315 /* The _DSM method is following Intel's DSM specification. */ 316 nfit_dcr->revision_id = cpu_to_le16(1 /* Current Revision supported 317 in ACPI 6.0 is 1. */); 318 nfit_dcr->serial_number = cpu_to_le32(sn); 319 nfit_dcr->fic = cpu_to_le16(0x301 /* Format Interface Code: 320 Byte addressable, no energy backed. 321 See ACPI 6.2, sect 5.2.25.6 and 322 JEDEC Annex L Release 3. */); 323 } 324 325 /* 326 * ACPI 6.2 Errata A: 5.2.25.9 NVDIMM Platform Capabilities Structure 327 */ 328 static void 329 nvdimm_build_structure_caps(GArray *structures, uint32_t capabilities) 330 { 331 NvdimmNfitPlatformCaps *nfit_caps; 332 333 nfit_caps = acpi_data_push(structures, sizeof(*nfit_caps)); 334 335 nfit_caps->type = cpu_to_le16(7 /* NVDIMM Platform Capabilities */); 336 nfit_caps->length = cpu_to_le16(sizeof(*nfit_caps)); 337 nfit_caps->highest_cap = 31 - clz32(capabilities); 338 nfit_caps->capabilities = cpu_to_le32(capabilities); 339 } 340 341 static GArray *nvdimm_build_device_structure(NVDIMMState *state) 342 { 343 GSList *device_list, *list = nvdimm_get_device_list(); 344 GArray *structures = g_array_new(false, true /* clear */, 1); 345 346 for (device_list = list; device_list; device_list = device_list->next) { 347 DeviceState *dev = device_list->data; 348 349 /* build System Physical Address Range Structure. */ 350 nvdimm_build_structure_spa(structures, dev); 351 352 /* 353 * build Memory Device to System Physical Address Range Mapping 354 * Structure. 355 */ 356 nvdimm_build_structure_memdev(structures, dev); 357 358 /* build NVDIMM Control Region Structure. */ 359 nvdimm_build_structure_dcr(structures, dev); 360 } 361 g_slist_free(list); 362 363 if (state->persistence) { 364 nvdimm_build_structure_caps(structures, state->persistence); 365 } 366 367 return structures; 368 } 369 370 static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf) 371 { 372 fit_buf->fit = g_array_new(false, true /* clear */, 1); 373 } 374 375 static void nvdimm_build_fit_buffer(NVDIMMState *state) 376 { 377 NvdimmFitBuffer *fit_buf = &state->fit_buf; 378 379 g_array_free(fit_buf->fit, true); 380 fit_buf->fit = nvdimm_build_device_structure(state); 381 fit_buf->dirty = true; 382 } 383 384 void nvdimm_plug(NVDIMMState *state) 385 { 386 nvdimm_build_fit_buffer(state); 387 } 388 389 /* 390 * NVDIMM Firmware Interface Table 391 * @signature: "NFIT" 392 * 393 * It provides information that allows OSPM to enumerate NVDIMM present in 394 * the platform and associate system physical address ranges created by the 395 * NVDIMMs. 396 * 397 * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT) 398 */ 399 400 static void nvdimm_build_nfit(NVDIMMState *state, GArray *table_offsets, 401 GArray *table_data, BIOSLinker *linker, 402 const char *oem_id, const char *oem_table_id) 403 { 404 NvdimmFitBuffer *fit_buf = &state->fit_buf; 405 AcpiTable table = { .sig = "NFIT", .rev = 1, 406 .oem_id = oem_id, .oem_table_id = oem_table_id }; 407 408 acpi_add_table(table_offsets, table_data); 409 410 acpi_table_begin(&table, table_data); 411 /* Reserved */ 412 build_append_int_noprefix(table_data, 0, 4); 413 /* NVDIMM device structures. */ 414 g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len); 415 acpi_table_end(linker, &table); 416 } 417 418 #define NVDIMM_DSM_MEMORY_SIZE 4096 419 420 struct NvdimmDsmIn { 421 uint32_t handle; 422 uint32_t revision; 423 uint32_t function; 424 /* the remaining size in the page is used by arg3. */ 425 union { 426 uint8_t arg3[4084]; 427 }; 428 } QEMU_PACKED; 429 typedef struct NvdimmDsmIn NvdimmDsmIn; 430 QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmIn) != NVDIMM_DSM_MEMORY_SIZE); 431 432 struct NvdimmDsmOut { 433 /* the size of buffer filled by QEMU. */ 434 uint32_t len; 435 uint8_t data[4092]; 436 } QEMU_PACKED; 437 typedef struct NvdimmDsmOut NvdimmDsmOut; 438 QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmOut) != NVDIMM_DSM_MEMORY_SIZE); 439 440 struct NvdimmDsmFunc0Out { 441 /* the size of buffer filled by QEMU. */ 442 uint32_t len; 443 uint32_t supported_func; 444 } QEMU_PACKED; 445 typedef struct NvdimmDsmFunc0Out NvdimmDsmFunc0Out; 446 447 struct NvdimmDsmFuncNoPayloadOut { 448 /* the size of buffer filled by QEMU. */ 449 uint32_t len; 450 uint32_t func_ret_status; 451 } QEMU_PACKED; 452 typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut; 453 454 struct NvdimmFuncGetLabelSizeOut { 455 /* the size of buffer filled by QEMU. */ 456 uint32_t len; 457 uint32_t func_ret_status; /* return status code. */ 458 uint32_t label_size; /* the size of label data area. */ 459 /* 460 * Maximum size of the namespace label data length supported by 461 * the platform in Get/Set Namespace Label Data functions. 462 */ 463 uint32_t max_xfer; 464 } QEMU_PACKED; 465 typedef struct NvdimmFuncGetLabelSizeOut NvdimmFuncGetLabelSizeOut; 466 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelSizeOut) > NVDIMM_DSM_MEMORY_SIZE); 467 468 struct NvdimmFuncGetLabelDataIn { 469 uint32_t offset; /* the offset in the namespace label data area. */ 470 uint32_t length; /* the size of data is to be read via the function. */ 471 } QEMU_PACKED; 472 typedef struct NvdimmFuncGetLabelDataIn NvdimmFuncGetLabelDataIn; 473 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataIn) + 474 offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE); 475 476 struct NvdimmFuncGetLabelDataOut { 477 /* the size of buffer filled by QEMU. */ 478 uint32_t len; 479 uint32_t func_ret_status; /* return status code. */ 480 uint8_t out_buf[]; /* the data got via Get Namespace Label function. */ 481 } QEMU_PACKED; 482 typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut; 483 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > NVDIMM_DSM_MEMORY_SIZE); 484 485 struct NvdimmFuncSetLabelDataIn { 486 uint32_t offset; /* the offset in the namespace label data area. */ 487 uint32_t length; /* the size of data is to be written via the function. */ 488 uint8_t in_buf[]; /* the data written to label data area. */ 489 } QEMU_PACKED; 490 typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn; 491 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) + 492 offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE); 493 494 struct NvdimmFuncReadFITIn { 495 uint32_t offset; /* the offset into FIT buffer. */ 496 } QEMU_PACKED; 497 typedef struct NvdimmFuncReadFITIn NvdimmFuncReadFITIn; 498 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITIn) + 499 offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE); 500 501 struct NvdimmFuncReadFITOut { 502 /* the size of buffer filled by QEMU. */ 503 uint32_t len; 504 uint32_t func_ret_status; /* return status code. */ 505 uint8_t fit[]; /* the FIT data. */ 506 } QEMU_PACKED; 507 typedef struct NvdimmFuncReadFITOut NvdimmFuncReadFITOut; 508 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITOut) > NVDIMM_DSM_MEMORY_SIZE); 509 510 static void 511 nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr) 512 { 513 NvdimmDsmFunc0Out func0 = { 514 .len = cpu_to_le32(sizeof(func0)), 515 .supported_func = cpu_to_le32(supported_func), 516 }; 517 cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof(func0)); 518 } 519 520 static void 521 nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr) 522 { 523 NvdimmDsmFuncNoPayloadOut out = { 524 .len = cpu_to_le32(sizeof(out)), 525 .func_ret_status = cpu_to_le32(func_ret_status), 526 }; 527 cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out)); 528 } 529 530 #define NVDIMM_DSM_RET_STATUS_SUCCESS 0 /* Success */ 531 #define NVDIMM_DSM_RET_STATUS_UNSUPPORT 1 /* Not Supported */ 532 #define NVDIMM_DSM_RET_STATUS_NOMEMDEV 2 /* Non-Existing Memory Device */ 533 #define NVDIMM_DSM_RET_STATUS_INVALID 3 /* Invalid Input Parameters */ 534 #define NVDIMM_DSM_RET_STATUS_FIT_CHANGED 0x100 /* FIT Changed */ 535 536 #define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000 537 538 /* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */ 539 static void nvdimm_dsm_func_read_fit(NVDIMMState *state, NvdimmDsmIn *in, 540 hwaddr dsm_mem_addr) 541 { 542 NvdimmFitBuffer *fit_buf = &state->fit_buf; 543 NvdimmFuncReadFITIn *read_fit; 544 NvdimmFuncReadFITOut *read_fit_out; 545 GArray *fit; 546 uint32_t read_len = 0, func_ret_status; 547 int size; 548 549 read_fit = (NvdimmFuncReadFITIn *)in->arg3; 550 read_fit->offset = le32_to_cpu(read_fit->offset); 551 552 fit = fit_buf->fit; 553 554 trace_acpi_nvdimm_read_fit(read_fit->offset, fit->len, 555 fit_buf->dirty ? "Yes" : "No"); 556 557 if (read_fit->offset > fit->len) { 558 func_ret_status = NVDIMM_DSM_RET_STATUS_INVALID; 559 goto exit; 560 } 561 562 /* It is the first time to read FIT. */ 563 if (!read_fit->offset) { 564 fit_buf->dirty = false; 565 } else if (fit_buf->dirty) { /* FIT has been changed during RFIT. */ 566 func_ret_status = NVDIMM_DSM_RET_STATUS_FIT_CHANGED; 567 goto exit; 568 } 569 570 func_ret_status = NVDIMM_DSM_RET_STATUS_SUCCESS; 571 read_len = MIN(fit->len - read_fit->offset, 572 NVDIMM_DSM_MEMORY_SIZE - sizeof(NvdimmFuncReadFITOut)); 573 574 exit: 575 size = sizeof(NvdimmFuncReadFITOut) + read_len; 576 read_fit_out = g_malloc(size); 577 578 read_fit_out->len = cpu_to_le32(size); 579 read_fit_out->func_ret_status = cpu_to_le32(func_ret_status); 580 memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len); 581 582 cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size); 583 584 g_free(read_fit_out); 585 } 586 587 static void 588 nvdimm_dsm_handle_reserved_root_method(NVDIMMState *state, 589 NvdimmDsmIn *in, hwaddr dsm_mem_addr) 590 { 591 switch (in->function) { 592 case 0x0: 593 nvdimm_dsm_function0(0x1 | 1 << 1 /* Read FIT */, dsm_mem_addr); 594 return; 595 case 0x1 /* Read FIT */: 596 nvdimm_dsm_func_read_fit(state, in, dsm_mem_addr); 597 return; 598 } 599 600 nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr); 601 } 602 603 static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr) 604 { 605 /* 606 * function 0 is called to inquire which functions are supported by 607 * OSPM 608 */ 609 if (!in->function) { 610 nvdimm_dsm_function0(0 /* No function supported other than 611 function 0 */, dsm_mem_addr); 612 return; 613 } 614 615 /* No function except function 0 is supported yet. */ 616 nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr); 617 } 618 619 /* 620 * the max transfer size is the max size transferred by both a 621 * 'Get Namespace Label Data' function and a 'Set Namespace Label Data' 622 * function. 623 */ 624 static uint32_t nvdimm_get_max_xfer_label_size(void) 625 { 626 uint32_t max_get_size, max_set_size, dsm_memory_size; 627 628 dsm_memory_size = NVDIMM_DSM_MEMORY_SIZE; 629 630 /* 631 * the max data ACPI can read one time which is transferred by 632 * the response of 'Get Namespace Label Data' function. 633 */ 634 max_get_size = dsm_memory_size - sizeof(NvdimmFuncGetLabelDataOut); 635 636 /* 637 * the max data ACPI can write one time which is transferred by 638 * 'Set Namespace Label Data' function. 639 */ 640 max_set_size = dsm_memory_size - offsetof(NvdimmDsmIn, arg3) - 641 sizeof(NvdimmFuncSetLabelDataIn); 642 643 return MIN(max_get_size, max_set_size); 644 } 645 646 /* 647 * DSM Spec Rev1 4.4 Get Namespace Label Size (Function Index 4). 648 * 649 * It gets the size of Namespace Label data area and the max data size 650 * that Get/Set Namespace Label Data functions can transfer. 651 */ 652 static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr) 653 { 654 NvdimmFuncGetLabelSizeOut label_size_out = { 655 .len = cpu_to_le32(sizeof(label_size_out)), 656 }; 657 uint32_t label_size, mxfer; 658 659 label_size = nvdimm->label_size; 660 mxfer = nvdimm_get_max_xfer_label_size(); 661 662 trace_acpi_nvdimm_label_info(label_size, mxfer); 663 664 label_size_out.func_ret_status = cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS); 665 label_size_out.label_size = cpu_to_le32(label_size); 666 label_size_out.max_xfer = cpu_to_le32(mxfer); 667 668 cpu_physical_memory_write(dsm_mem_addr, &label_size_out, 669 sizeof(label_size_out)); 670 } 671 672 static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm, 673 uint32_t offset, uint32_t length) 674 { 675 uint32_t ret = NVDIMM_DSM_RET_STATUS_INVALID; 676 677 if (offset + length < offset) { 678 trace_acpi_nvdimm_label_overflow(offset, length); 679 return ret; 680 } 681 682 if (nvdimm->label_size < offset + length) { 683 trace_acpi_nvdimm_label_oversize(offset + length, nvdimm->label_size); 684 return ret; 685 } 686 687 if (length > nvdimm_get_max_xfer_label_size()) { 688 trace_acpi_nvdimm_label_xfer_exceed(length, 689 nvdimm_get_max_xfer_label_size()); 690 return ret; 691 } 692 693 return NVDIMM_DSM_RET_STATUS_SUCCESS; 694 } 695 696 /* 697 * DSM Spec Rev1 4.5 Get Namespace Label Data (Function Index 5). 698 */ 699 static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, 700 hwaddr dsm_mem_addr) 701 { 702 NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm); 703 NvdimmFuncGetLabelDataIn *get_label_data; 704 NvdimmFuncGetLabelDataOut *get_label_data_out; 705 uint32_t status; 706 int size; 707 708 get_label_data = (NvdimmFuncGetLabelDataIn *)in->arg3; 709 get_label_data->offset = le32_to_cpu(get_label_data->offset); 710 get_label_data->length = le32_to_cpu(get_label_data->length); 711 712 trace_acpi_nvdimm_read_label(get_label_data->offset, 713 get_label_data->length); 714 715 status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset, 716 get_label_data->length); 717 if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) { 718 nvdimm_dsm_no_payload(status, dsm_mem_addr); 719 return; 720 } 721 722 size = sizeof(*get_label_data_out) + get_label_data->length; 723 assert(size <= NVDIMM_DSM_MEMORY_SIZE); 724 get_label_data_out = g_malloc(size); 725 726 get_label_data_out->len = cpu_to_le32(size); 727 get_label_data_out->func_ret_status = 728 cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS); 729 nvc->read_label_data(nvdimm, get_label_data_out->out_buf, 730 get_label_data->length, get_label_data->offset); 731 732 cpu_physical_memory_write(dsm_mem_addr, get_label_data_out, size); 733 g_free(get_label_data_out); 734 } 735 736 /* 737 * DSM Spec Rev1 4.6 Set Namespace Label Data (Function Index 6). 738 */ 739 static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, 740 hwaddr dsm_mem_addr) 741 { 742 NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm); 743 NvdimmFuncSetLabelDataIn *set_label_data; 744 uint32_t status; 745 746 set_label_data = (NvdimmFuncSetLabelDataIn *)in->arg3; 747 748 set_label_data->offset = le32_to_cpu(set_label_data->offset); 749 set_label_data->length = le32_to_cpu(set_label_data->length); 750 751 trace_acpi_nvdimm_write_label(set_label_data->offset, 752 set_label_data->length); 753 754 status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset, 755 set_label_data->length); 756 if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) { 757 nvdimm_dsm_no_payload(status, dsm_mem_addr); 758 return; 759 } 760 761 assert(offsetof(NvdimmDsmIn, arg3) + sizeof(*set_label_data) + 762 set_label_data->length <= NVDIMM_DSM_MEMORY_SIZE); 763 764 nvc->write_label_data(nvdimm, set_label_data->in_buf, 765 set_label_data->length, set_label_data->offset); 766 nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_SUCCESS, dsm_mem_addr); 767 } 768 769 static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr) 770 { 771 NVDIMMDevice *nvdimm = nvdimm_get_device_by_handle(in->handle); 772 773 /* See the comments in nvdimm_dsm_root(). */ 774 if (!in->function) { 775 uint32_t supported_func = 0; 776 777 if (nvdimm && nvdimm->label_size) { 778 supported_func |= 0x1 /* Bit 0 indicates whether there is 779 support for any functions other 780 than function 0. */ | 781 1 << 4 /* Get Namespace Label Size */ | 782 1 << 5 /* Get Namespace Label Data */ | 783 1 << 6 /* Set Namespace Label Data */; 784 } 785 nvdimm_dsm_function0(supported_func, dsm_mem_addr); 786 return; 787 } 788 789 if (!nvdimm) { 790 nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_NOMEMDEV, 791 dsm_mem_addr); 792 return; 793 } 794 795 /* Encode DSM function according to DSM Spec Rev1. */ 796 switch (in->function) { 797 case 4 /* Get Namespace Label Size */: 798 if (nvdimm->label_size) { 799 nvdimm_dsm_label_size(nvdimm, dsm_mem_addr); 800 return; 801 } 802 break; 803 case 5 /* Get Namespace Label Data */: 804 if (nvdimm->label_size) { 805 nvdimm_dsm_get_label_data(nvdimm, in, dsm_mem_addr); 806 return; 807 } 808 break; 809 case 0x6 /* Set Namespace Label Data */: 810 if (nvdimm->label_size) { 811 nvdimm_dsm_set_label_data(nvdimm, in, dsm_mem_addr); 812 return; 813 } 814 break; 815 } 816 817 nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr); 818 } 819 820 static uint64_t 821 nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size) 822 { 823 trace_acpi_nvdimm_read_io_port(); 824 return 0; 825 } 826 827 static void 828 nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) 829 { 830 NVDIMMState *state = opaque; 831 NvdimmDsmIn *in; 832 hwaddr dsm_mem_addr = val; 833 834 trace_acpi_nvdimm_dsm_mem_addr(dsm_mem_addr); 835 836 /* 837 * The DSM memory is mapped to guest address space so an evil guest 838 * can change its content while we are doing DSM emulation. Avoid 839 * this by copying DSM memory to QEMU local memory. 840 */ 841 in = g_new(NvdimmDsmIn, 1); 842 cpu_physical_memory_read(dsm_mem_addr, in, sizeof(*in)); 843 844 in->revision = le32_to_cpu(in->revision); 845 in->function = le32_to_cpu(in->function); 846 in->handle = le32_to_cpu(in->handle); 847 848 trace_acpi_nvdimm_dsm_info(in->revision, in->handle, in->function); 849 850 if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) { 851 trace_acpi_nvdimm_invalid_revision(in->revision); 852 nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr); 853 goto exit; 854 } 855 856 if (in->handle == NVDIMM_QEMU_RSVD_HANDLE_ROOT) { 857 nvdimm_dsm_handle_reserved_root_method(state, in, dsm_mem_addr); 858 goto exit; 859 } 860 861 /* Handle 0 is reserved for NVDIMM Root Device. */ 862 if (!in->handle) { 863 nvdimm_dsm_root(in, dsm_mem_addr); 864 goto exit; 865 } 866 867 nvdimm_dsm_device(in, dsm_mem_addr); 868 869 exit: 870 g_free(in); 871 } 872 873 static const MemoryRegionOps nvdimm_dsm_ops = { 874 .read = nvdimm_dsm_read, 875 .write = nvdimm_dsm_write, 876 .endianness = DEVICE_LITTLE_ENDIAN, 877 .valid = { 878 .min_access_size = 4, 879 .max_access_size = 4, 880 }, 881 }; 882 883 void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev) 884 { 885 if (dev->hotplugged) { 886 acpi_send_event(DEVICE(hotplug_dev), ACPI_NVDIMM_HOTPLUG_STATUS); 887 } 888 } 889 890 void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io, 891 struct AcpiGenericAddress dsm_io, 892 FWCfgState *fw_cfg, Object *owner) 893 { 894 state->dsm_io = dsm_io; 895 memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state, 896 "nvdimm-acpi-io", dsm_io.bit_width >> 3); 897 memory_region_add_subregion(io, dsm_io.address, &state->io_mr); 898 899 state->dsm_mem = g_array_new(false, true /* clear */, 1); 900 acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn)); 901 fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data, 902 state->dsm_mem->len); 903 904 nvdimm_init_fit_buffer(&state->fit_buf); 905 } 906 907 #define NVDIMM_COMMON_DSM "NCAL" 908 #define NVDIMM_ACPI_MEM_ADDR "MEMA" 909 910 #define NVDIMM_DSM_MEMORY "NRAM" 911 #define NVDIMM_DSM_IOPORT "NPIO" 912 913 #define NVDIMM_DSM_NOTIFY "NTFI" 914 #define NVDIMM_DSM_HANDLE "HDLE" 915 #define NVDIMM_DSM_REVISION "REVS" 916 #define NVDIMM_DSM_FUNCTION "FUNC" 917 #define NVDIMM_DSM_ARG3 "FARG" 918 919 #define NVDIMM_DSM_OUT_BUF_SIZE "RLEN" 920 #define NVDIMM_DSM_OUT_BUF "ODAT" 921 922 #define NVDIMM_DSM_RFIT_STATUS "RSTA" 923 924 #define NVDIMM_QEMU_RSVD_UUID "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62" 925 926 static void nvdimm_build_common_dsm(Aml *dev, 927 NVDIMMState *nvdimm_state) 928 { 929 Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *elsectx2; 930 Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid; 931 Aml *pckg, *pckg_index, *pckg_buf, *field, *dsm_out_buf, *dsm_out_buf_size; 932 Aml *whilectx, *offset; 933 uint8_t byte_list[1]; 934 AmlRegionSpace rs; 935 936 method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED); 937 uuid = aml_arg(0); 938 function = aml_arg(2); 939 handle = aml_arg(4); 940 dsm_mem = aml_local(6); 941 dsm_out_buf = aml_local(7); 942 943 aml_append(method, aml_store(aml_name(NVDIMM_ACPI_MEM_ADDR), dsm_mem)); 944 945 if (nvdimm_state->dsm_io.space_id == AML_AS_SYSTEM_IO) { 946 rs = AML_SYSTEM_IO; 947 } else { 948 rs = AML_SYSTEM_MEMORY; 949 } 950 951 /* map DSM memory and IO into ACPI namespace. */ 952 aml_append(method, aml_operation_region(NVDIMM_DSM_IOPORT, rs, 953 aml_int(nvdimm_state->dsm_io.address), 954 nvdimm_state->dsm_io.bit_width >> 3)); 955 aml_append(method, aml_operation_region(NVDIMM_DSM_MEMORY, 956 AML_SYSTEM_MEMORY, dsm_mem, sizeof(NvdimmDsmIn))); 957 958 /* 959 * DSM notifier: 960 * NVDIMM_DSM_NOTIFY: write the address of DSM memory and notify QEMU to 961 * emulate the access. 962 * 963 * It is the IO port so that accessing them will cause VM-exit, the 964 * control will be transferred to QEMU. 965 */ 966 field = aml_field(NVDIMM_DSM_IOPORT, AML_DWORD_ACC, AML_NOLOCK, 967 AML_PRESERVE); 968 aml_append(field, aml_named_field(NVDIMM_DSM_NOTIFY, 969 nvdimm_state->dsm_io.bit_width)); 970 aml_append(method, field); 971 972 /* 973 * DSM input: 974 * NVDIMM_DSM_HANDLE: store device's handle, it's zero if the _DSM call 975 * happens on NVDIMM Root Device. 976 * NVDIMM_DSM_REVISION: store the Arg1 of _DSM call. 977 * NVDIMM_DSM_FUNCTION: store the Arg2 of _DSM call. 978 * NVDIMM_DSM_ARG3: store the Arg3 of _DSM call which is a Package 979 * containing function-specific arguments. 980 * 981 * They are RAM mapping on host so that these accesses never cause 982 * VM-EXIT. 983 */ 984 field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK, 985 AML_PRESERVE); 986 aml_append(field, aml_named_field(NVDIMM_DSM_HANDLE, 987 sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE)); 988 aml_append(field, aml_named_field(NVDIMM_DSM_REVISION, 989 sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE)); 990 aml_append(field, aml_named_field(NVDIMM_DSM_FUNCTION, 991 sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE)); 992 aml_append(field, aml_named_field(NVDIMM_DSM_ARG3, 993 (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE)); 994 aml_append(method, field); 995 996 /* 997 * DSM output: 998 * NVDIMM_DSM_OUT_BUF_SIZE: the size of the buffer filled by QEMU. 999 * NVDIMM_DSM_OUT_BUF: the buffer QEMU uses to store the result. 1000 * 1001 * Since the page is reused by both input and out, the input data 1002 * will be lost after storing new result into ODAT so we should fetch 1003 * all the input data before writing the result. 1004 */ 1005 field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK, 1006 AML_PRESERVE); 1007 aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF_SIZE, 1008 sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE)); 1009 aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF, 1010 (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE)); 1011 aml_append(method, field); 1012 1013 /* 1014 * do not support any method if DSM memory address has not been 1015 * patched. 1016 */ 1017 unpatched = aml_equal(dsm_mem, aml_int(0x0)); 1018 1019 expected_uuid = aml_local(0); 1020 1021 ifctx = aml_if(aml_equal(handle, aml_int(0x0))); 1022 aml_append(ifctx, aml_store( 1023 aml_touuid("2F10E7A4-9E91-11E4-89D3-123B93F75CBA") 1024 /* UUID for NVDIMM Root Device */, expected_uuid)); 1025 aml_append(method, ifctx); 1026 elsectx = aml_else(); 1027 ifctx = aml_if(aml_equal(handle, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT))); 1028 aml_append(ifctx, aml_store(aml_touuid(NVDIMM_QEMU_RSVD_UUID 1029 /* UUID for QEMU internal use */), expected_uuid)); 1030 aml_append(elsectx, ifctx); 1031 elsectx2 = aml_else(); 1032 aml_append(elsectx2, aml_store( 1033 aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66") 1034 /* UUID for NVDIMM Devices */, expected_uuid)); 1035 aml_append(elsectx, elsectx2); 1036 aml_append(method, elsectx); 1037 1038 uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid)); 1039 1040 unsupport = aml_if(aml_or(unpatched, uuid_invalid, NULL)); 1041 1042 /* 1043 * function 0 is called to inquire what functions are supported by 1044 * OSPM 1045 */ 1046 ifctx = aml_if(aml_equal(function, aml_int(0))); 1047 byte_list[0] = 0 /* No function Supported */; 1048 aml_append(ifctx, aml_return(aml_buffer(1, byte_list))); 1049 aml_append(unsupport, ifctx); 1050 1051 /* No function is supported yet. */ 1052 byte_list[0] = NVDIMM_DSM_RET_STATUS_UNSUPPORT; 1053 aml_append(unsupport, aml_return(aml_buffer(1, byte_list))); 1054 aml_append(method, unsupport); 1055 1056 /* 1057 * The HDLE indicates the DSM function is issued from which device, 1058 * it reserves 0 for root device and is the handle for NVDIMM devices. 1059 * See the comments in nvdimm_slot_to_handle(). 1060 */ 1061 aml_append(method, aml_store(handle, aml_name(NVDIMM_DSM_HANDLE))); 1062 aml_append(method, aml_store(aml_arg(1), aml_name(NVDIMM_DSM_REVISION))); 1063 aml_append(method, aml_store(function, aml_name(NVDIMM_DSM_FUNCTION))); 1064 1065 /* 1066 * The fourth parameter (Arg3) of _DSM is a package which contains 1067 * a buffer, the layout of the buffer is specified by UUID (Arg0), 1068 * Revision ID (Arg1) and Function Index (Arg2) which are documented 1069 * in the DSM Spec. 1070 */ 1071 pckg = aml_arg(3); 1072 ifctx = aml_if(aml_and(aml_equal(aml_object_type(pckg), 1073 aml_int(4 /* Package */)) /* It is a Package? */, 1074 aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */, 1075 NULL)); 1076 1077 pckg_index = aml_local(2); 1078 pckg_buf = aml_local(3); 1079 aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index)); 1080 aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf)); 1081 aml_append(ifctx, aml_store(pckg_buf, aml_name(NVDIMM_DSM_ARG3))); 1082 aml_append(method, ifctx); 1083 1084 /* 1085 * tell QEMU about the real address of DSM memory, then QEMU 1086 * gets the control and fills the result in DSM memory. 1087 */ 1088 aml_append(method, aml_store(dsm_mem, aml_name(NVDIMM_DSM_NOTIFY))); 1089 1090 dsm_out_buf_size = aml_local(1); 1091 /* RLEN is not included in the payload returned to guest. */ 1092 aml_append(method, aml_subtract(aml_name(NVDIMM_DSM_OUT_BUF_SIZE), 1093 aml_int(4), dsm_out_buf_size)); 1094 1095 /* 1096 * As per ACPI spec 6.3, Table 19-419 Object Conversion Rules, if 1097 * the Buffer Field <= to the size of an Integer (in bits), it will 1098 * be treated as an integer. Moreover, the integer size depends on 1099 * DSDT tables revision number. If revision number is < 2, integer 1100 * size is 32 bits, otherwise it is 64 bits. 1101 * Because of this CreateField() canot be used if RLEN < Integer Size. 1102 * 1103 * Also please note that APCI ASL operator SizeOf() doesn't support 1104 * Integer and there isn't any other way to figure out the Integer 1105 * size. Hence we assume 8 byte as Integer size and if RLEN < 8 bytes, 1106 * build dsm_out_buf byte by byte. 1107 */ 1108 ifctx = aml_if(aml_lless(dsm_out_buf_size, aml_int(8))); 1109 offset = aml_local(2); 1110 aml_append(ifctx, aml_store(aml_int(0), offset)); 1111 aml_append(ifctx, aml_name_decl("TBUF", aml_buffer(1, NULL))); 1112 aml_append(ifctx, aml_store(aml_buffer(0, NULL), dsm_out_buf)); 1113 1114 whilectx = aml_while(aml_lless(offset, dsm_out_buf_size)); 1115 /* Copy 1 byte at offset from ODAT to temporary buffer(TBUF). */ 1116 aml_append(whilectx, aml_store(aml_derefof(aml_index( 1117 aml_name(NVDIMM_DSM_OUT_BUF), offset)), 1118 aml_index(aml_name("TBUF"), aml_int(0)))); 1119 aml_append(whilectx, aml_concatenate(dsm_out_buf, aml_name("TBUF"), 1120 dsm_out_buf)); 1121 aml_append(whilectx, aml_increment(offset)); 1122 aml_append(ifctx, whilectx); 1123 1124 aml_append(ifctx, aml_return(dsm_out_buf)); 1125 aml_append(method, ifctx); 1126 1127 /* If RLEN >= Integer size, just use CreateField() operator */ 1128 aml_append(method, aml_store(aml_shiftleft(dsm_out_buf_size, aml_int(3)), 1129 dsm_out_buf_size)); 1130 aml_append(method, aml_create_field(aml_name(NVDIMM_DSM_OUT_BUF), 1131 aml_int(0), dsm_out_buf_size, "OBUF")); 1132 aml_append(method, aml_return(aml_name("OBUF"))); 1133 1134 aml_append(dev, method); 1135 } 1136 1137 static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle) 1138 { 1139 Aml *method; 1140 1141 method = aml_method("_DSM", 4, AML_NOTSERIALIZED); 1142 aml_append(method, aml_return(aml_call5(NVDIMM_COMMON_DSM, aml_arg(0), 1143 aml_arg(1), aml_arg(2), aml_arg(3), 1144 aml_int(handle)))); 1145 aml_append(dev, method); 1146 } 1147 1148 static void nvdimm_build_fit(Aml *dev) 1149 { 1150 Aml *method, *pkg, *buf, *buf_size, *offset, *call_result; 1151 Aml *whilectx, *ifcond, *ifctx, *elsectx, *fit; 1152 1153 buf = aml_local(0); 1154 buf_size = aml_local(1); 1155 fit = aml_local(2); 1156 1157 aml_append(dev, aml_name_decl(NVDIMM_DSM_RFIT_STATUS, aml_int(0))); 1158 1159 /* build helper function, RFIT. */ 1160 method = aml_method("RFIT", 1, AML_SERIALIZED); 1161 aml_append(method, aml_name_decl("OFST", aml_int(0))); 1162 1163 /* prepare input package. */ 1164 pkg = aml_package(1); 1165 aml_append(method, aml_store(aml_arg(0), aml_name("OFST"))); 1166 aml_append(pkg, aml_name("OFST")); 1167 1168 /* call Read_FIT function. */ 1169 call_result = aml_call5(NVDIMM_COMMON_DSM, 1170 aml_touuid(NVDIMM_QEMU_RSVD_UUID), 1171 aml_int(1) /* Revision 1 */, 1172 aml_int(0x1) /* Read FIT */, 1173 pkg, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT)); 1174 aml_append(method, aml_store(call_result, buf)); 1175 1176 /* handle _DSM result. */ 1177 aml_append(method, aml_create_dword_field(buf, 1178 aml_int(0) /* offset at byte 0 */, "STAU")); 1179 1180 aml_append(method, aml_store(aml_name("STAU"), 1181 aml_name(NVDIMM_DSM_RFIT_STATUS))); 1182 1183 /* if something is wrong during _DSM. */ 1184 ifcond = aml_equal(aml_int(NVDIMM_DSM_RET_STATUS_SUCCESS), 1185 aml_name("STAU")); 1186 ifctx = aml_if(aml_lnot(ifcond)); 1187 aml_append(ifctx, aml_return(aml_buffer(0, NULL))); 1188 aml_append(method, ifctx); 1189 1190 aml_append(method, aml_store(aml_sizeof(buf), buf_size)); 1191 aml_append(method, aml_subtract(buf_size, 1192 aml_int(4) /* the size of "STAU" */, 1193 buf_size)); 1194 1195 /* if we read the end of fit. */ 1196 ifctx = aml_if(aml_equal(buf_size, aml_int(0))); 1197 aml_append(ifctx, aml_return(aml_buffer(0, NULL))); 1198 aml_append(method, ifctx); 1199 1200 aml_append(method, aml_create_field(buf, 1201 aml_int(4 * BITS_PER_BYTE), /* offset at byte 4.*/ 1202 aml_shiftleft(buf_size, aml_int(3)), "BUFF")); 1203 aml_append(method, aml_return(aml_name("BUFF"))); 1204 aml_append(dev, method); 1205 1206 /* build _FIT. */ 1207 method = aml_method("_FIT", 0, AML_SERIALIZED); 1208 offset = aml_local(3); 1209 1210 aml_append(method, aml_store(aml_buffer(0, NULL), fit)); 1211 aml_append(method, aml_store(aml_int(0), offset)); 1212 1213 whilectx = aml_while(aml_int(1)); 1214 aml_append(whilectx, aml_store(aml_call1("RFIT", offset), buf)); 1215 aml_append(whilectx, aml_store(aml_sizeof(buf), buf_size)); 1216 1217 /* 1218 * if fit buffer was changed during RFIT, read from the beginning 1219 * again. 1220 */ 1221 ifctx = aml_if(aml_equal(aml_name(NVDIMM_DSM_RFIT_STATUS), 1222 aml_int(NVDIMM_DSM_RET_STATUS_FIT_CHANGED))); 1223 aml_append(ifctx, aml_store(aml_buffer(0, NULL), fit)); 1224 aml_append(ifctx, aml_store(aml_int(0), offset)); 1225 aml_append(whilectx, ifctx); 1226 1227 elsectx = aml_else(); 1228 1229 /* finish fit read if no data is read out. */ 1230 ifctx = aml_if(aml_equal(buf_size, aml_int(0))); 1231 aml_append(ifctx, aml_return(fit)); 1232 aml_append(elsectx, ifctx); 1233 1234 /* update the offset. */ 1235 aml_append(elsectx, aml_add(offset, buf_size, offset)); 1236 /* append the data we read out to the fit buffer. */ 1237 aml_append(elsectx, aml_concatenate(fit, buf, fit)); 1238 aml_append(whilectx, elsectx); 1239 aml_append(method, whilectx); 1240 1241 aml_append(dev, method); 1242 } 1243 1244 static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots) 1245 { 1246 uint32_t slot; 1247 1248 for (slot = 0; slot < ram_slots; slot++) { 1249 uint32_t handle = nvdimm_slot_to_handle(slot); 1250 Aml *nvdimm_dev; 1251 1252 nvdimm_dev = aml_device("NV%02X", slot); 1253 1254 /* 1255 * ACPI 6.0: 9.20 NVDIMM Devices: 1256 * 1257 * _ADR object that is used to supply OSPM with unique address 1258 * of the NVDIMM device. This is done by returning the NFIT Device 1259 * handle that is used to identify the associated entries in ACPI 1260 * table NFIT or _FIT. 1261 */ 1262 aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle))); 1263 1264 nvdimm_build_device_dsm(nvdimm_dev, handle); 1265 aml_append(root_dev, nvdimm_dev); 1266 } 1267 } 1268 1269 static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, 1270 BIOSLinker *linker, 1271 NVDIMMState *nvdimm_state, 1272 uint32_t ram_slots, const char *oem_id) 1273 { 1274 int mem_addr_offset; 1275 Aml *ssdt, *sb_scope, *dev; 1276 AcpiTable table = { .sig = "SSDT", .rev = 1, 1277 .oem_id = oem_id, .oem_table_id = "NVDIMM" }; 1278 1279 acpi_add_table(table_offsets, table_data); 1280 1281 acpi_table_begin(&table, table_data); 1282 ssdt = init_aml_allocator(); 1283 sb_scope = aml_scope("\\_SB"); 1284 1285 dev = aml_device("NVDR"); 1286 1287 /* 1288 * ACPI 6.0: 9.20 NVDIMM Devices: 1289 * 1290 * The ACPI Name Space device uses _HID of ACPI0012 to identify the root 1291 * NVDIMM interface device. Platform firmware is required to contain one 1292 * such device in _SB scope if NVDIMMs support is exposed by platform to 1293 * OSPM. 1294 * For each NVDIMM present or intended to be supported by platform, 1295 * platform firmware also exposes an ACPI Namespace Device under the 1296 * root device. 1297 */ 1298 aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012"))); 1299 1300 nvdimm_build_common_dsm(dev, nvdimm_state); 1301 1302 /* 0 is reserved for root device. */ 1303 nvdimm_build_device_dsm(dev, 0); 1304 nvdimm_build_fit(dev); 1305 1306 nvdimm_build_nvdimm_devices(dev, ram_slots); 1307 1308 aml_append(sb_scope, dev); 1309 aml_append(ssdt, sb_scope); 1310 1311 /* copy AML table into ACPI tables blob and patch header there */ 1312 g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len); 1313 mem_addr_offset = build_append_named_dword(table_data, 1314 NVDIMM_ACPI_MEM_ADDR); 1315 1316 bios_linker_loader_alloc(linker, 1317 NVDIMM_DSM_MEM_FILE, nvdimm_state->dsm_mem, 1318 sizeof(NvdimmDsmIn), false /* high memory */); 1319 bios_linker_loader_add_pointer(linker, 1320 ACPI_BUILD_TABLE_FILE, mem_addr_offset, sizeof(uint32_t), 1321 NVDIMM_DSM_MEM_FILE, 0); 1322 free_aml_allocator(); 1323 /* 1324 * must be executed as the last so that pointer patching command above 1325 * would be executed by guest before it recalculated checksum which were 1326 * scheduled by acpi_table_end() 1327 */ 1328 acpi_table_end(linker, &table); 1329 } 1330 1331 void nvdimm_build_srat(GArray *table_data) 1332 { 1333 GSList *device_list, *list = nvdimm_get_device_list(); 1334 1335 for (device_list = list; device_list; device_list = device_list->next) { 1336 DeviceState *dev = device_list->data; 1337 Object *obj = OBJECT(dev); 1338 uint64_t addr, size; 1339 int node; 1340 1341 node = object_property_get_int(obj, PC_DIMM_NODE_PROP, &error_abort); 1342 addr = object_property_get_uint(obj, PC_DIMM_ADDR_PROP, &error_abort); 1343 size = object_property_get_uint(obj, PC_DIMM_SIZE_PROP, &error_abort); 1344 1345 build_srat_memory(table_data, addr, size, node, 1346 MEM_AFFINITY_ENABLED | MEM_AFFINITY_NON_VOLATILE); 1347 } 1348 g_slist_free(list); 1349 } 1350 1351 void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, 1352 BIOSLinker *linker, NVDIMMState *state, 1353 uint32_t ram_slots, const char *oem_id, 1354 const char *oem_table_id) 1355 { 1356 GSList *device_list; 1357 1358 /* no nvdimm device can be plugged. */ 1359 if (!ram_slots) { 1360 return; 1361 } 1362 1363 nvdimm_build_ssdt(table_offsets, table_data, linker, state, 1364 ram_slots, oem_id); 1365 1366 device_list = nvdimm_get_device_list(); 1367 /* no NVDIMM device is plugged. */ 1368 if (!device_list) { 1369 return; 1370 } 1371 1372 nvdimm_build_nfit(state, table_offsets, table_data, linker, 1373 oem_id, oem_table_id); 1374 g_slist_free(device_list); 1375 } 1376