1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/libnvdimm.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/ndctl.h> 18 #include <linux/sysfs.h> 19 #include <linux/delay.h> 20 #include <linux/list.h> 21 #include <linux/acpi.h> 22 #include <linux/sort.h> 23 #include <linux/io.h> 24 #include <linux/nd.h> 25 #include <asm/cacheflush.h> 26 #include <acpi/nfit.h> 27 #include "nfit.h" 28 #include "intel.h" 29 30 /* 31 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 32 * irrelevant. 33 */ 34 #include <linux/io-64-nonatomic-hi-lo.h> 35 36 static bool force_enable_dimms; 37 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 38 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 39 40 static bool disable_vendor_specific; 41 module_param(disable_vendor_specific, bool, S_IRUGO); 42 MODULE_PARM_DESC(disable_vendor_specific, 43 "Limit commands to the publicly specified set"); 44 45 static unsigned long override_dsm_mask; 46 module_param(override_dsm_mask, ulong, S_IRUGO); 47 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); 48 49 static int default_dsm_family = -1; 50 module_param(default_dsm_family, int, S_IRUGO); 51 MODULE_PARM_DESC(default_dsm_family, 52 "Try this DSM type first when identifying NVDIMM family"); 53 54 static bool no_init_ars; 55 module_param(no_init_ars, bool, 0644); 56 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); 57 58 LIST_HEAD(acpi_descs); 59 DEFINE_MUTEX(acpi_desc_lock); 60 61 static struct workqueue_struct *nfit_wq; 62 63 struct nfit_table_prev { 64 struct list_head spas; 65 struct list_head memdevs; 66 struct list_head dcrs; 67 struct list_head bdws; 68 struct list_head idts; 69 struct list_head flushes; 70 }; 71 72 static guid_t nfit_uuid[NFIT_UUID_MAX]; 73 74 const guid_t *to_nfit_uuid(enum nfit_uuids id) 75 { 76 return &nfit_uuid[id]; 77 } 78 EXPORT_SYMBOL(to_nfit_uuid); 79 80 static struct acpi_nfit_desc *to_acpi_nfit_desc( 81 struct nvdimm_bus_descriptor *nd_desc) 82 { 83 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 84 } 85 86 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 87 { 88 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 89 90 /* 91 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 92 * acpi_device. 93 */ 94 if (!nd_desc->provider_name 95 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 96 return NULL; 97 98 return to_acpi_device(acpi_desc->dev); 99 } 100 101 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) 102 { 103 struct nd_cmd_clear_error *clear_err; 104 struct nd_cmd_ars_status *ars_status; 105 u16 flags; 106 107 switch (cmd) { 108 case ND_CMD_ARS_CAP: 109 if ((status & 0xffff) == NFIT_ARS_CAP_NONE) 110 return -ENOTTY; 111 112 /* Command failed */ 113 if (status & 0xffff) 114 return -EIO; 115 116 /* No supported scan types for this range */ 117 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 118 if ((status >> 16 & flags) == 0) 119 return -ENOTTY; 120 return 0; 121 case ND_CMD_ARS_START: 122 /* ARS is in progress */ 123 if ((status & 0xffff) == NFIT_ARS_START_BUSY) 124 return -EBUSY; 125 126 /* Command failed */ 127 if (status & 0xffff) 128 return -EIO; 129 return 0; 130 case ND_CMD_ARS_STATUS: 131 ars_status = buf; 132 /* Command failed */ 133 if (status & 0xffff) 134 return -EIO; 135 /* Check extended status (Upper two bytes) */ 136 if (status == NFIT_ARS_STATUS_DONE) 137 return 0; 138 139 /* ARS is in progress */ 140 if (status == NFIT_ARS_STATUS_BUSY) 141 return -EBUSY; 142 143 /* No ARS performed for the current boot */ 144 if (status == NFIT_ARS_STATUS_NONE) 145 return -EAGAIN; 146 147 /* 148 * ARS interrupted, either we overflowed or some other 149 * agent wants the scan to stop. If we didn't overflow 150 * then just continue with the returned results. 151 */ 152 if (status == NFIT_ARS_STATUS_INTR) { 153 if (ars_status->out_length >= 40 && (ars_status->flags 154 & NFIT_ARS_F_OVERFLOW)) 155 return -ENOSPC; 156 return 0; 157 } 158 159 /* Unknown status */ 160 if (status >> 16) 161 return -EIO; 162 return 0; 163 case ND_CMD_CLEAR_ERROR: 164 clear_err = buf; 165 if (status & 0xffff) 166 return -EIO; 167 if (!clear_err->cleared) 168 return -EIO; 169 if (clear_err->length > clear_err->cleared) 170 return clear_err->cleared; 171 return 0; 172 default: 173 break; 174 } 175 176 /* all other non-zero status results in an error */ 177 if (status) 178 return -EIO; 179 return 0; 180 } 181 182 #define ACPI_LABELS_LOCKED 3 183 184 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 185 u32 status) 186 { 187 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 188 189 switch (cmd) { 190 case ND_CMD_GET_CONFIG_SIZE: 191 /* 192 * In the _LSI, _LSR, _LSW case the locked status is 193 * communicated via the read/write commands 194 */ 195 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) 196 break; 197 198 if (status >> 16 & ND_CONFIG_LOCKED) 199 return -EACCES; 200 break; 201 case ND_CMD_GET_CONFIG_DATA: 202 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) 203 && status == ACPI_LABELS_LOCKED) 204 return -EACCES; 205 break; 206 case ND_CMD_SET_CONFIG_DATA: 207 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags) 208 && status == ACPI_LABELS_LOCKED) 209 return -EACCES; 210 break; 211 default: 212 break; 213 } 214 215 /* all other non-zero status results in an error */ 216 if (status) 217 return -EIO; 218 return 0; 219 } 220 221 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 222 u32 status) 223 { 224 if (!nvdimm) 225 return xlat_bus_status(buf, cmd, status); 226 return xlat_nvdimm_status(nvdimm, buf, cmd, status); 227 } 228 229 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */ 230 static union acpi_object *pkg_to_buf(union acpi_object *pkg) 231 { 232 int i; 233 void *dst; 234 size_t size = 0; 235 union acpi_object *buf = NULL; 236 237 if (pkg->type != ACPI_TYPE_PACKAGE) { 238 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 239 pkg->type); 240 goto err; 241 } 242 243 for (i = 0; i < pkg->package.count; i++) { 244 union acpi_object *obj = &pkg->package.elements[i]; 245 246 if (obj->type == ACPI_TYPE_INTEGER) 247 size += 4; 248 else if (obj->type == ACPI_TYPE_BUFFER) 249 size += obj->buffer.length; 250 else { 251 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 252 obj->type); 253 goto err; 254 } 255 } 256 257 buf = ACPI_ALLOCATE(sizeof(*buf) + size); 258 if (!buf) 259 goto err; 260 261 dst = buf + 1; 262 buf->type = ACPI_TYPE_BUFFER; 263 buf->buffer.length = size; 264 buf->buffer.pointer = dst; 265 for (i = 0; i < pkg->package.count; i++) { 266 union acpi_object *obj = &pkg->package.elements[i]; 267 268 if (obj->type == ACPI_TYPE_INTEGER) { 269 memcpy(dst, &obj->integer.value, 4); 270 dst += 4; 271 } else if (obj->type == ACPI_TYPE_BUFFER) { 272 memcpy(dst, obj->buffer.pointer, obj->buffer.length); 273 dst += obj->buffer.length; 274 } 275 } 276 err: 277 ACPI_FREE(pkg); 278 return buf; 279 } 280 281 static union acpi_object *int_to_buf(union acpi_object *integer) 282 { 283 union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4); 284 void *dst = NULL; 285 286 if (!buf) 287 goto err; 288 289 if (integer->type != ACPI_TYPE_INTEGER) { 290 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 291 integer->type); 292 goto err; 293 } 294 295 dst = buf + 1; 296 buf->type = ACPI_TYPE_BUFFER; 297 buf->buffer.length = 4; 298 buf->buffer.pointer = dst; 299 memcpy(dst, &integer->integer.value, 4); 300 err: 301 ACPI_FREE(integer); 302 return buf; 303 } 304 305 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset, 306 u32 len, void *data) 307 { 308 acpi_status rc; 309 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 310 struct acpi_object_list input = { 311 .count = 3, 312 .pointer = (union acpi_object []) { 313 [0] = { 314 .integer.type = ACPI_TYPE_INTEGER, 315 .integer.value = offset, 316 }, 317 [1] = { 318 .integer.type = ACPI_TYPE_INTEGER, 319 .integer.value = len, 320 }, 321 [2] = { 322 .buffer.type = ACPI_TYPE_BUFFER, 323 .buffer.pointer = data, 324 .buffer.length = len, 325 }, 326 }, 327 }; 328 329 rc = acpi_evaluate_object(handle, "_LSW", &input, &buf); 330 if (ACPI_FAILURE(rc)) 331 return NULL; 332 return int_to_buf(buf.pointer); 333 } 334 335 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset, 336 u32 len) 337 { 338 acpi_status rc; 339 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 340 struct acpi_object_list input = { 341 .count = 2, 342 .pointer = (union acpi_object []) { 343 [0] = { 344 .integer.type = ACPI_TYPE_INTEGER, 345 .integer.value = offset, 346 }, 347 [1] = { 348 .integer.type = ACPI_TYPE_INTEGER, 349 .integer.value = len, 350 }, 351 }, 352 }; 353 354 rc = acpi_evaluate_object(handle, "_LSR", &input, &buf); 355 if (ACPI_FAILURE(rc)) 356 return NULL; 357 return pkg_to_buf(buf.pointer); 358 } 359 360 static union acpi_object *acpi_label_info(acpi_handle handle) 361 { 362 acpi_status rc; 363 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 364 365 rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf); 366 if (ACPI_FAILURE(rc)) 367 return NULL; 368 return pkg_to_buf(buf.pointer); 369 } 370 371 static u8 nfit_dsm_revid(unsigned family, unsigned func) 372 { 373 static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { 374 [NVDIMM_FAMILY_INTEL] = { 375 [NVDIMM_INTEL_GET_MODES] = 2, 376 [NVDIMM_INTEL_GET_FWINFO] = 2, 377 [NVDIMM_INTEL_START_FWUPDATE] = 2, 378 [NVDIMM_INTEL_SEND_FWUPDATE] = 2, 379 [NVDIMM_INTEL_FINISH_FWUPDATE] = 2, 380 [NVDIMM_INTEL_QUERY_FWUPDATE] = 2, 381 [NVDIMM_INTEL_SET_THRESHOLD] = 2, 382 [NVDIMM_INTEL_INJECT_ERROR] = 2, 383 }, 384 }; 385 u8 id; 386 387 if (family > NVDIMM_FAMILY_MAX) 388 return 0; 389 if (func > 31) 390 return 0; 391 id = revid_table[family][func]; 392 if (id == 0) 393 return 1; /* default */ 394 return id; 395 } 396 397 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 398 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 399 { 400 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 401 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 402 union acpi_object in_obj, in_buf, *out_obj; 403 const struct nd_cmd_desc *desc = NULL; 404 struct device *dev = acpi_desc->dev; 405 struct nd_cmd_pkg *call_pkg = NULL; 406 const char *cmd_name, *dimm_name; 407 unsigned long cmd_mask, dsm_mask; 408 u32 offset, fw_status = 0; 409 acpi_handle handle; 410 unsigned int func; 411 const guid_t *guid; 412 int rc, i; 413 414 if (cmd_rc) 415 *cmd_rc = -EINVAL; 416 func = cmd; 417 if (cmd == ND_CMD_CALL) { 418 call_pkg = buf; 419 func = call_pkg->nd_command; 420 421 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) 422 if (call_pkg->nd_reserved2[i]) 423 return -EINVAL; 424 } 425 426 if (nvdimm) { 427 struct acpi_device *adev = nfit_mem->adev; 428 429 if (!adev) 430 return -ENOTTY; 431 if (call_pkg && nfit_mem->family != call_pkg->nd_family) 432 return -ENOTTY; 433 434 dimm_name = nvdimm_name(nvdimm); 435 cmd_name = nvdimm_cmd_name(cmd); 436 cmd_mask = nvdimm_cmd_mask(nvdimm); 437 dsm_mask = nfit_mem->dsm_mask; 438 desc = nd_cmd_dimm_desc(cmd); 439 guid = to_nfit_uuid(nfit_mem->family); 440 handle = adev->handle; 441 } else { 442 struct acpi_device *adev = to_acpi_dev(acpi_desc); 443 444 cmd_name = nvdimm_bus_cmd_name(cmd); 445 cmd_mask = nd_desc->cmd_mask; 446 dsm_mask = cmd_mask; 447 if (cmd == ND_CMD_CALL) 448 dsm_mask = nd_desc->bus_dsm_mask; 449 desc = nd_cmd_bus_desc(cmd); 450 guid = to_nfit_uuid(NFIT_DEV_BUS); 451 handle = adev->handle; 452 dimm_name = "bus"; 453 } 454 455 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 456 return -ENOTTY; 457 458 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) 459 return -ENOTTY; 460 461 in_obj.type = ACPI_TYPE_PACKAGE; 462 in_obj.package.count = 1; 463 in_obj.package.elements = &in_buf; 464 in_buf.type = ACPI_TYPE_BUFFER; 465 in_buf.buffer.pointer = buf; 466 in_buf.buffer.length = 0; 467 468 /* libnvdimm has already validated the input envelope */ 469 for (i = 0; i < desc->in_num; i++) 470 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 471 i, buf); 472 473 if (call_pkg) { 474 /* skip over package wrapper */ 475 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; 476 in_buf.buffer.length = call_pkg->nd_size_in; 477 } 478 479 dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n", 480 dimm_name, cmd, func, in_buf.buffer.length); 481 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, 482 in_buf.buffer.pointer, 483 min_t(u32, 256, in_buf.buffer.length), true); 484 485 /* call the BIOS, prefer the named methods over _DSM if available */ 486 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE 487 && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) 488 out_obj = acpi_label_info(handle); 489 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA 490 && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { 491 struct nd_cmd_get_config_data_hdr *p = buf; 492 493 out_obj = acpi_label_read(handle, p->in_offset, p->in_length); 494 } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA 495 && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) { 496 struct nd_cmd_set_config_hdr *p = buf; 497 498 out_obj = acpi_label_write(handle, p->in_offset, p->in_length, 499 p->in_buf); 500 } else { 501 u8 revid; 502 503 if (nvdimm) 504 revid = nfit_dsm_revid(nfit_mem->family, func); 505 else 506 revid = 1; 507 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 508 } 509 510 if (!out_obj) { 511 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name); 512 return -EINVAL; 513 } 514 515 if (call_pkg) { 516 call_pkg->nd_fw_size = out_obj->buffer.length; 517 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, 518 out_obj->buffer.pointer, 519 min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); 520 521 ACPI_FREE(out_obj); 522 /* 523 * Need to support FW function w/o known size in advance. 524 * Caller can determine required size based upon nd_fw_size. 525 * If we return an error (like elsewhere) then caller wouldn't 526 * be able to rely upon data returned to make calculation. 527 */ 528 if (cmd_rc) 529 *cmd_rc = 0; 530 return 0; 531 } 532 533 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 534 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", 535 dimm_name, cmd_name, out_obj->type); 536 rc = -EINVAL; 537 goto out; 538 } 539 540 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, 541 cmd_name, out_obj->buffer.length); 542 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, 543 out_obj->buffer.pointer, 544 min_t(u32, 128, out_obj->buffer.length), true); 545 546 for (i = 0, offset = 0; i < desc->out_num; i++) { 547 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 548 (u32 *) out_obj->buffer.pointer, 549 out_obj->buffer.length - offset); 550 551 if (offset + out_size > out_obj->buffer.length) { 552 dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n", 553 dimm_name, cmd_name, i); 554 break; 555 } 556 557 if (in_buf.buffer.length + offset + out_size > buf_len) { 558 dev_dbg(dev, "%s output overrun cmd: %s field: %d\n", 559 dimm_name, cmd_name, i); 560 rc = -ENXIO; 561 goto out; 562 } 563 memcpy(buf + in_buf.buffer.length + offset, 564 out_obj->buffer.pointer + offset, out_size); 565 offset += out_size; 566 } 567 568 /* 569 * Set fw_status for all the commands with a known format to be 570 * later interpreted by xlat_status(). 571 */ 572 if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP 573 && cmd <= ND_CMD_CLEAR_ERROR) 574 || (nvdimm && cmd >= ND_CMD_SMART 575 && cmd <= ND_CMD_VENDOR))) 576 fw_status = *(u32 *) out_obj->buffer.pointer; 577 578 if (offset + in_buf.buffer.length < buf_len) { 579 if (i >= 1) { 580 /* 581 * status valid, return the number of bytes left 582 * unfilled in the output buffer 583 */ 584 rc = buf_len - offset - in_buf.buffer.length; 585 if (cmd_rc) 586 *cmd_rc = xlat_status(nvdimm, buf, cmd, 587 fw_status); 588 } else { 589 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 590 __func__, dimm_name, cmd_name, buf_len, 591 offset); 592 rc = -ENXIO; 593 } 594 } else { 595 rc = 0; 596 if (cmd_rc) 597 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); 598 } 599 600 out: 601 ACPI_FREE(out_obj); 602 603 return rc; 604 } 605 EXPORT_SYMBOL_GPL(acpi_nfit_ctl); 606 607 static const char *spa_type_name(u16 type) 608 { 609 static const char *to_name[] = { 610 [NFIT_SPA_VOLATILE] = "volatile", 611 [NFIT_SPA_PM] = "pmem", 612 [NFIT_SPA_DCR] = "dimm-control-region", 613 [NFIT_SPA_BDW] = "block-data-window", 614 [NFIT_SPA_VDISK] = "volatile-disk", 615 [NFIT_SPA_VCD] = "volatile-cd", 616 [NFIT_SPA_PDISK] = "persistent-disk", 617 [NFIT_SPA_PCD] = "persistent-cd", 618 619 }; 620 621 if (type > NFIT_SPA_PCD) 622 return "unknown"; 623 624 return to_name[type]; 625 } 626 627 int nfit_spa_type(struct acpi_nfit_system_address *spa) 628 { 629 int i; 630 631 for (i = 0; i < NFIT_UUID_MAX; i++) 632 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid)) 633 return i; 634 return -1; 635 } 636 637 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 638 struct nfit_table_prev *prev, 639 struct acpi_nfit_system_address *spa) 640 { 641 struct device *dev = acpi_desc->dev; 642 struct nfit_spa *nfit_spa; 643 644 if (spa->header.length != sizeof(*spa)) 645 return false; 646 647 list_for_each_entry(nfit_spa, &prev->spas, list) { 648 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 649 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 650 return true; 651 } 652 } 653 654 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), 655 GFP_KERNEL); 656 if (!nfit_spa) 657 return false; 658 INIT_LIST_HEAD(&nfit_spa->list); 659 memcpy(nfit_spa->spa, spa, sizeof(*spa)); 660 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 661 dev_dbg(dev, "spa index: %d type: %s\n", 662 spa->range_index, 663 spa_type_name(nfit_spa_type(spa))); 664 return true; 665 } 666 667 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 668 struct nfit_table_prev *prev, 669 struct acpi_nfit_memory_map *memdev) 670 { 671 struct device *dev = acpi_desc->dev; 672 struct nfit_memdev *nfit_memdev; 673 674 if (memdev->header.length != sizeof(*memdev)) 675 return false; 676 677 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 678 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 679 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 680 return true; 681 } 682 683 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), 684 GFP_KERNEL); 685 if (!nfit_memdev) 686 return false; 687 INIT_LIST_HEAD(&nfit_memdev->list); 688 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); 689 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 690 dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n", 691 memdev->device_handle, memdev->range_index, 692 memdev->region_index, memdev->flags); 693 return true; 694 } 695 696 int nfit_get_smbios_id(u32 device_handle, u16 *flags) 697 { 698 struct acpi_nfit_memory_map *memdev; 699 struct acpi_nfit_desc *acpi_desc; 700 struct nfit_mem *nfit_mem; 701 702 mutex_lock(&acpi_desc_lock); 703 list_for_each_entry(acpi_desc, &acpi_descs, list) { 704 mutex_lock(&acpi_desc->init_mutex); 705 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 706 memdev = __to_nfit_memdev(nfit_mem); 707 if (memdev->device_handle == device_handle) { 708 mutex_unlock(&acpi_desc->init_mutex); 709 mutex_unlock(&acpi_desc_lock); 710 *flags = memdev->flags; 711 return memdev->physical_id; 712 } 713 } 714 mutex_unlock(&acpi_desc->init_mutex); 715 } 716 mutex_unlock(&acpi_desc_lock); 717 718 return -ENODEV; 719 } 720 EXPORT_SYMBOL_GPL(nfit_get_smbios_id); 721 722 /* 723 * An implementation may provide a truncated control region if no block windows 724 * are defined. 725 */ 726 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) 727 { 728 if (dcr->header.length < offsetof(struct acpi_nfit_control_region, 729 window_size)) 730 return 0; 731 if (dcr->windows) 732 return sizeof(*dcr); 733 return offsetof(struct acpi_nfit_control_region, window_size); 734 } 735 736 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 737 struct nfit_table_prev *prev, 738 struct acpi_nfit_control_region *dcr) 739 { 740 struct device *dev = acpi_desc->dev; 741 struct nfit_dcr *nfit_dcr; 742 743 if (!sizeof_dcr(dcr)) 744 return false; 745 746 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 747 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { 748 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 749 return true; 750 } 751 752 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), 753 GFP_KERNEL); 754 if (!nfit_dcr) 755 return false; 756 INIT_LIST_HEAD(&nfit_dcr->list); 757 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); 758 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 759 dev_dbg(dev, "dcr index: %d windows: %d\n", 760 dcr->region_index, dcr->windows); 761 return true; 762 } 763 764 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 765 struct nfit_table_prev *prev, 766 struct acpi_nfit_data_region *bdw) 767 { 768 struct device *dev = acpi_desc->dev; 769 struct nfit_bdw *nfit_bdw; 770 771 if (bdw->header.length != sizeof(*bdw)) 772 return false; 773 list_for_each_entry(nfit_bdw, &prev->bdws, list) 774 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 775 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 776 return true; 777 } 778 779 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), 780 GFP_KERNEL); 781 if (!nfit_bdw) 782 return false; 783 INIT_LIST_HEAD(&nfit_bdw->list); 784 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); 785 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 786 dev_dbg(dev, "bdw dcr: %d windows: %d\n", 787 bdw->region_index, bdw->windows); 788 return true; 789 } 790 791 static size_t sizeof_idt(struct acpi_nfit_interleave *idt) 792 { 793 if (idt->header.length < sizeof(*idt)) 794 return 0; 795 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); 796 } 797 798 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 799 struct nfit_table_prev *prev, 800 struct acpi_nfit_interleave *idt) 801 { 802 struct device *dev = acpi_desc->dev; 803 struct nfit_idt *nfit_idt; 804 805 if (!sizeof_idt(idt)) 806 return false; 807 808 list_for_each_entry(nfit_idt, &prev->idts, list) { 809 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) 810 continue; 811 812 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { 813 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 814 return true; 815 } 816 } 817 818 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), 819 GFP_KERNEL); 820 if (!nfit_idt) 821 return false; 822 INIT_LIST_HEAD(&nfit_idt->list); 823 memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); 824 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 825 dev_dbg(dev, "idt index: %d num_lines: %d\n", 826 idt->interleave_index, idt->line_count); 827 return true; 828 } 829 830 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) 831 { 832 if (flush->header.length < sizeof(*flush)) 833 return 0; 834 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); 835 } 836 837 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 838 struct nfit_table_prev *prev, 839 struct acpi_nfit_flush_address *flush) 840 { 841 struct device *dev = acpi_desc->dev; 842 struct nfit_flush *nfit_flush; 843 844 if (!sizeof_flush(flush)) 845 return false; 846 847 list_for_each_entry(nfit_flush, &prev->flushes, list) { 848 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) 849 continue; 850 851 if (memcmp(nfit_flush->flush, flush, 852 sizeof_flush(flush)) == 0) { 853 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 854 return true; 855 } 856 } 857 858 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) 859 + sizeof_flush(flush), GFP_KERNEL); 860 if (!nfit_flush) 861 return false; 862 INIT_LIST_HEAD(&nfit_flush->list); 863 memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); 864 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 865 dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n", 866 flush->device_handle, flush->hint_count); 867 return true; 868 } 869 870 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, 871 struct acpi_nfit_capabilities *pcap) 872 { 873 struct device *dev = acpi_desc->dev; 874 u32 mask; 875 876 mask = (1 << (pcap->highest_capability + 1)) - 1; 877 acpi_desc->platform_cap = pcap->capabilities & mask; 878 dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap); 879 return true; 880 } 881 882 static void *add_table(struct acpi_nfit_desc *acpi_desc, 883 struct nfit_table_prev *prev, void *table, const void *end) 884 { 885 struct device *dev = acpi_desc->dev; 886 struct acpi_nfit_header *hdr; 887 void *err = ERR_PTR(-ENOMEM); 888 889 if (table >= end) 890 return NULL; 891 892 hdr = table; 893 if (!hdr->length) { 894 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 895 hdr->type); 896 return NULL; 897 } 898 899 switch (hdr->type) { 900 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 901 if (!add_spa(acpi_desc, prev, table)) 902 return err; 903 break; 904 case ACPI_NFIT_TYPE_MEMORY_MAP: 905 if (!add_memdev(acpi_desc, prev, table)) 906 return err; 907 break; 908 case ACPI_NFIT_TYPE_CONTROL_REGION: 909 if (!add_dcr(acpi_desc, prev, table)) 910 return err; 911 break; 912 case ACPI_NFIT_TYPE_DATA_REGION: 913 if (!add_bdw(acpi_desc, prev, table)) 914 return err; 915 break; 916 case ACPI_NFIT_TYPE_INTERLEAVE: 917 if (!add_idt(acpi_desc, prev, table)) 918 return err; 919 break; 920 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 921 if (!add_flush(acpi_desc, prev, table)) 922 return err; 923 break; 924 case ACPI_NFIT_TYPE_SMBIOS: 925 dev_dbg(dev, "smbios\n"); 926 break; 927 case ACPI_NFIT_TYPE_CAPABILITIES: 928 if (!add_platform_cap(acpi_desc, table)) 929 return err; 930 break; 931 default: 932 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 933 break; 934 } 935 936 return table + hdr->length; 937 } 938 939 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 940 struct nfit_mem *nfit_mem) 941 { 942 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 943 u16 dcr = nfit_mem->dcr->region_index; 944 struct nfit_spa *nfit_spa; 945 946 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 947 u16 range_index = nfit_spa->spa->range_index; 948 int type = nfit_spa_type(nfit_spa->spa); 949 struct nfit_memdev *nfit_memdev; 950 951 if (type != NFIT_SPA_BDW) 952 continue; 953 954 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 955 if (nfit_memdev->memdev->range_index != range_index) 956 continue; 957 if (nfit_memdev->memdev->device_handle != device_handle) 958 continue; 959 if (nfit_memdev->memdev->region_index != dcr) 960 continue; 961 962 nfit_mem->spa_bdw = nfit_spa->spa; 963 return; 964 } 965 } 966 967 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 968 nfit_mem->spa_dcr->range_index); 969 nfit_mem->bdw = NULL; 970 } 971 972 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, 973 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 974 { 975 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 976 struct nfit_memdev *nfit_memdev; 977 struct nfit_bdw *nfit_bdw; 978 struct nfit_idt *nfit_idt; 979 u16 idt_idx, range_index; 980 981 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 982 if (nfit_bdw->bdw->region_index != dcr) 983 continue; 984 nfit_mem->bdw = nfit_bdw->bdw; 985 break; 986 } 987 988 if (!nfit_mem->bdw) 989 return; 990 991 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 992 993 if (!nfit_mem->spa_bdw) 994 return; 995 996 range_index = nfit_mem->spa_bdw->range_index; 997 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 998 if (nfit_memdev->memdev->range_index != range_index || 999 nfit_memdev->memdev->region_index != dcr) 1000 continue; 1001 nfit_mem->memdev_bdw = nfit_memdev->memdev; 1002 idt_idx = nfit_memdev->memdev->interleave_index; 1003 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 1004 if (nfit_idt->idt->interleave_index != idt_idx) 1005 continue; 1006 nfit_mem->idt_bdw = nfit_idt->idt; 1007 break; 1008 } 1009 break; 1010 } 1011 } 1012 1013 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, 1014 struct acpi_nfit_system_address *spa) 1015 { 1016 struct nfit_mem *nfit_mem, *found; 1017 struct nfit_memdev *nfit_memdev; 1018 int type = spa ? nfit_spa_type(spa) : 0; 1019 1020 switch (type) { 1021 case NFIT_SPA_DCR: 1022 case NFIT_SPA_PM: 1023 break; 1024 default: 1025 if (spa) 1026 return 0; 1027 } 1028 1029 /* 1030 * This loop runs in two modes, when a dimm is mapped the loop 1031 * adds memdev associations to an existing dimm, or creates a 1032 * dimm. In the unmapped dimm case this loop sweeps for memdev 1033 * instances with an invalid / zero range_index and adds those 1034 * dimms without spa associations. 1035 */ 1036 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1037 struct nfit_flush *nfit_flush; 1038 struct nfit_dcr *nfit_dcr; 1039 u32 device_handle; 1040 u16 dcr; 1041 1042 if (spa && nfit_memdev->memdev->range_index != spa->range_index) 1043 continue; 1044 if (!spa && nfit_memdev->memdev->range_index) 1045 continue; 1046 found = NULL; 1047 dcr = nfit_memdev->memdev->region_index; 1048 device_handle = nfit_memdev->memdev->device_handle; 1049 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1050 if (__to_nfit_memdev(nfit_mem)->device_handle 1051 == device_handle) { 1052 found = nfit_mem; 1053 break; 1054 } 1055 1056 if (found) 1057 nfit_mem = found; 1058 else { 1059 nfit_mem = devm_kzalloc(acpi_desc->dev, 1060 sizeof(*nfit_mem), GFP_KERNEL); 1061 if (!nfit_mem) 1062 return -ENOMEM; 1063 INIT_LIST_HEAD(&nfit_mem->list); 1064 nfit_mem->acpi_desc = acpi_desc; 1065 list_add(&nfit_mem->list, &acpi_desc->dimms); 1066 } 1067 1068 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1069 if (nfit_dcr->dcr->region_index != dcr) 1070 continue; 1071 /* 1072 * Record the control region for the dimm. For 1073 * the ACPI 6.1 case, where there are separate 1074 * control regions for the pmem vs blk 1075 * interfaces, be sure to record the extended 1076 * blk details. 1077 */ 1078 if (!nfit_mem->dcr) 1079 nfit_mem->dcr = nfit_dcr->dcr; 1080 else if (nfit_mem->dcr->windows == 0 1081 && nfit_dcr->dcr->windows) 1082 nfit_mem->dcr = nfit_dcr->dcr; 1083 break; 1084 } 1085 1086 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 1087 struct acpi_nfit_flush_address *flush; 1088 u16 i; 1089 1090 if (nfit_flush->flush->device_handle != device_handle) 1091 continue; 1092 nfit_mem->nfit_flush = nfit_flush; 1093 flush = nfit_flush->flush; 1094 nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev, 1095 flush->hint_count, 1096 sizeof(struct resource), 1097 GFP_KERNEL); 1098 if (!nfit_mem->flush_wpq) 1099 return -ENOMEM; 1100 for (i = 0; i < flush->hint_count; i++) { 1101 struct resource *res = &nfit_mem->flush_wpq[i]; 1102 1103 res->start = flush->hint_address[i]; 1104 res->end = res->start + 8 - 1; 1105 } 1106 break; 1107 } 1108 1109 if (dcr && !nfit_mem->dcr) { 1110 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 1111 spa->range_index, dcr); 1112 return -ENODEV; 1113 } 1114 1115 if (type == NFIT_SPA_DCR) { 1116 struct nfit_idt *nfit_idt; 1117 u16 idt_idx; 1118 1119 /* multiple dimms may share a SPA when interleaved */ 1120 nfit_mem->spa_dcr = spa; 1121 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1122 idt_idx = nfit_memdev->memdev->interleave_index; 1123 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 1124 if (nfit_idt->idt->interleave_index != idt_idx) 1125 continue; 1126 nfit_mem->idt_dcr = nfit_idt->idt; 1127 break; 1128 } 1129 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); 1130 } else if (type == NFIT_SPA_PM) { 1131 /* 1132 * A single dimm may belong to multiple SPA-PM 1133 * ranges, record at least one in addition to 1134 * any SPA-DCR range. 1135 */ 1136 nfit_mem->memdev_pmem = nfit_memdev->memdev; 1137 } else 1138 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1139 } 1140 1141 return 0; 1142 } 1143 1144 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 1145 { 1146 struct nfit_mem *a = container_of(_a, typeof(*a), list); 1147 struct nfit_mem *b = container_of(_b, typeof(*b), list); 1148 u32 handleA, handleB; 1149 1150 handleA = __to_nfit_memdev(a)->device_handle; 1151 handleB = __to_nfit_memdev(b)->device_handle; 1152 if (handleA < handleB) 1153 return -1; 1154 else if (handleA > handleB) 1155 return 1; 1156 return 0; 1157 } 1158 1159 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 1160 { 1161 struct nfit_spa *nfit_spa; 1162 int rc; 1163 1164 1165 /* 1166 * For each SPA-DCR or SPA-PMEM address range find its 1167 * corresponding MEMDEV(s). From each MEMDEV find the 1168 * corresponding DCR. Then, if we're operating on a SPA-DCR, 1169 * try to find a SPA-BDW and a corresponding BDW that references 1170 * the DCR. Throw it all into an nfit_mem object. Note, that 1171 * BDWs are optional. 1172 */ 1173 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1174 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); 1175 if (rc) 1176 return rc; 1177 } 1178 1179 /* 1180 * If a DIMM has failed to be mapped into SPA there will be no 1181 * SPA entries above. Find and register all the unmapped DIMMs 1182 * for reporting and recovery purposes. 1183 */ 1184 rc = __nfit_mem_init(acpi_desc, NULL); 1185 if (rc) 1186 return rc; 1187 1188 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 1189 1190 return 0; 1191 } 1192 1193 static ssize_t bus_dsm_mask_show(struct device *dev, 1194 struct device_attribute *attr, char *buf) 1195 { 1196 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1197 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1198 1199 return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); 1200 } 1201 static struct device_attribute dev_attr_bus_dsm_mask = 1202 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); 1203 1204 static ssize_t revision_show(struct device *dev, 1205 struct device_attribute *attr, char *buf) 1206 { 1207 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1208 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1209 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1210 1211 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 1212 } 1213 static DEVICE_ATTR_RO(revision); 1214 1215 static ssize_t hw_error_scrub_show(struct device *dev, 1216 struct device_attribute *attr, char *buf) 1217 { 1218 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1219 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1220 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1221 1222 return sprintf(buf, "%d\n", acpi_desc->scrub_mode); 1223 } 1224 1225 /* 1226 * The 'hw_error_scrub' attribute can have the following values written to it: 1227 * '0': Switch to the default mode where an exception will only insert 1228 * the address of the memory error into the poison and badblocks lists. 1229 * '1': Enable a full scrub to happen if an exception for a memory error is 1230 * received. 1231 */ 1232 static ssize_t hw_error_scrub_store(struct device *dev, 1233 struct device_attribute *attr, const char *buf, size_t size) 1234 { 1235 struct nvdimm_bus_descriptor *nd_desc; 1236 ssize_t rc; 1237 long val; 1238 1239 rc = kstrtol(buf, 0, &val); 1240 if (rc) 1241 return rc; 1242 1243 device_lock(dev); 1244 nd_desc = dev_get_drvdata(dev); 1245 if (nd_desc) { 1246 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1247 1248 switch (val) { 1249 case HW_ERROR_SCRUB_ON: 1250 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; 1251 break; 1252 case HW_ERROR_SCRUB_OFF: 1253 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; 1254 break; 1255 default: 1256 rc = -EINVAL; 1257 break; 1258 } 1259 } 1260 device_unlock(dev); 1261 if (rc) 1262 return rc; 1263 return size; 1264 } 1265 static DEVICE_ATTR_RW(hw_error_scrub); 1266 1267 /* 1268 * This shows the number of full Address Range Scrubs that have been 1269 * completed since driver load time. Userspace can wait on this using 1270 * select/poll etc. A '+' at the end indicates an ARS is in progress 1271 */ 1272 static ssize_t scrub_show(struct device *dev, 1273 struct device_attribute *attr, char *buf) 1274 { 1275 struct nvdimm_bus_descriptor *nd_desc; 1276 ssize_t rc = -ENXIO; 1277 1278 device_lock(dev); 1279 nd_desc = dev_get_drvdata(dev); 1280 if (nd_desc) { 1281 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1282 1283 mutex_lock(&acpi_desc->init_mutex); 1284 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1285 acpi_desc->scrub_busy 1286 && !acpi_desc->cancel ? "+\n" : "\n"); 1287 mutex_unlock(&acpi_desc->init_mutex); 1288 } 1289 device_unlock(dev); 1290 return rc; 1291 } 1292 1293 static ssize_t scrub_store(struct device *dev, 1294 struct device_attribute *attr, const char *buf, size_t size) 1295 { 1296 struct nvdimm_bus_descriptor *nd_desc; 1297 ssize_t rc; 1298 long val; 1299 1300 rc = kstrtol(buf, 0, &val); 1301 if (rc) 1302 return rc; 1303 if (val != 1) 1304 return -EINVAL; 1305 1306 device_lock(dev); 1307 nd_desc = dev_get_drvdata(dev); 1308 if (nd_desc) { 1309 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1310 1311 rc = acpi_nfit_ars_rescan(acpi_desc, 0); 1312 } 1313 device_unlock(dev); 1314 if (rc) 1315 return rc; 1316 return size; 1317 } 1318 static DEVICE_ATTR_RW(scrub); 1319 1320 static bool ars_supported(struct nvdimm_bus *nvdimm_bus) 1321 { 1322 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1323 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START 1324 | 1 << ND_CMD_ARS_STATUS; 1325 1326 return (nd_desc->cmd_mask & mask) == mask; 1327 } 1328 1329 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) 1330 { 1331 struct device *dev = container_of(kobj, struct device, kobj); 1332 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1333 1334 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) 1335 return 0; 1336 return a->mode; 1337 } 1338 1339 static struct attribute *acpi_nfit_attributes[] = { 1340 &dev_attr_revision.attr, 1341 &dev_attr_scrub.attr, 1342 &dev_attr_hw_error_scrub.attr, 1343 &dev_attr_bus_dsm_mask.attr, 1344 NULL, 1345 }; 1346 1347 static const struct attribute_group acpi_nfit_attribute_group = { 1348 .name = "nfit", 1349 .attrs = acpi_nfit_attributes, 1350 .is_visible = nfit_visible, 1351 }; 1352 1353 static const struct attribute_group *acpi_nfit_attribute_groups[] = { 1354 &nvdimm_bus_attribute_group, 1355 &acpi_nfit_attribute_group, 1356 NULL, 1357 }; 1358 1359 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 1360 { 1361 struct nvdimm *nvdimm = to_nvdimm(dev); 1362 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1363 1364 return __to_nfit_memdev(nfit_mem); 1365 } 1366 1367 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 1368 { 1369 struct nvdimm *nvdimm = to_nvdimm(dev); 1370 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1371 1372 return nfit_mem->dcr; 1373 } 1374 1375 static ssize_t handle_show(struct device *dev, 1376 struct device_attribute *attr, char *buf) 1377 { 1378 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1379 1380 return sprintf(buf, "%#x\n", memdev->device_handle); 1381 } 1382 static DEVICE_ATTR_RO(handle); 1383 1384 static ssize_t phys_id_show(struct device *dev, 1385 struct device_attribute *attr, char *buf) 1386 { 1387 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1388 1389 return sprintf(buf, "%#x\n", memdev->physical_id); 1390 } 1391 static DEVICE_ATTR_RO(phys_id); 1392 1393 static ssize_t vendor_show(struct device *dev, 1394 struct device_attribute *attr, char *buf) 1395 { 1396 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1397 1398 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1399 } 1400 static DEVICE_ATTR_RO(vendor); 1401 1402 static ssize_t rev_id_show(struct device *dev, 1403 struct device_attribute *attr, char *buf) 1404 { 1405 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1406 1407 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1408 } 1409 static DEVICE_ATTR_RO(rev_id); 1410 1411 static ssize_t device_show(struct device *dev, 1412 struct device_attribute *attr, char *buf) 1413 { 1414 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1415 1416 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1417 } 1418 static DEVICE_ATTR_RO(device); 1419 1420 static ssize_t subsystem_vendor_show(struct device *dev, 1421 struct device_attribute *attr, char *buf) 1422 { 1423 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1424 1425 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1426 } 1427 static DEVICE_ATTR_RO(subsystem_vendor); 1428 1429 static ssize_t subsystem_rev_id_show(struct device *dev, 1430 struct device_attribute *attr, char *buf) 1431 { 1432 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1433 1434 return sprintf(buf, "0x%04x\n", 1435 be16_to_cpu(dcr->subsystem_revision_id)); 1436 } 1437 static DEVICE_ATTR_RO(subsystem_rev_id); 1438 1439 static ssize_t subsystem_device_show(struct device *dev, 1440 struct device_attribute *attr, char *buf) 1441 { 1442 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1443 1444 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1445 } 1446 static DEVICE_ATTR_RO(subsystem_device); 1447 1448 static int num_nvdimm_formats(struct nvdimm *nvdimm) 1449 { 1450 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1451 int formats = 0; 1452 1453 if (nfit_mem->memdev_pmem) 1454 formats++; 1455 if (nfit_mem->memdev_bdw) 1456 formats++; 1457 return formats; 1458 } 1459 1460 static ssize_t format_show(struct device *dev, 1461 struct device_attribute *attr, char *buf) 1462 { 1463 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1464 1465 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1466 } 1467 static DEVICE_ATTR_RO(format); 1468 1469 static ssize_t format1_show(struct device *dev, 1470 struct device_attribute *attr, char *buf) 1471 { 1472 u32 handle; 1473 ssize_t rc = -ENXIO; 1474 struct nfit_mem *nfit_mem; 1475 struct nfit_memdev *nfit_memdev; 1476 struct acpi_nfit_desc *acpi_desc; 1477 struct nvdimm *nvdimm = to_nvdimm(dev); 1478 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1479 1480 nfit_mem = nvdimm_provider_data(nvdimm); 1481 acpi_desc = nfit_mem->acpi_desc; 1482 handle = to_nfit_memdev(dev)->device_handle; 1483 1484 /* assumes DIMMs have at most 2 published interface codes */ 1485 mutex_lock(&acpi_desc->init_mutex); 1486 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1487 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1488 struct nfit_dcr *nfit_dcr; 1489 1490 if (memdev->device_handle != handle) 1491 continue; 1492 1493 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1494 if (nfit_dcr->dcr->region_index != memdev->region_index) 1495 continue; 1496 if (nfit_dcr->dcr->code == dcr->code) 1497 continue; 1498 rc = sprintf(buf, "0x%04x\n", 1499 le16_to_cpu(nfit_dcr->dcr->code)); 1500 break; 1501 } 1502 if (rc != ENXIO) 1503 break; 1504 } 1505 mutex_unlock(&acpi_desc->init_mutex); 1506 return rc; 1507 } 1508 static DEVICE_ATTR_RO(format1); 1509 1510 static ssize_t formats_show(struct device *dev, 1511 struct device_attribute *attr, char *buf) 1512 { 1513 struct nvdimm *nvdimm = to_nvdimm(dev); 1514 1515 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1516 } 1517 static DEVICE_ATTR_RO(formats); 1518 1519 static ssize_t serial_show(struct device *dev, 1520 struct device_attribute *attr, char *buf) 1521 { 1522 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1523 1524 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1525 } 1526 static DEVICE_ATTR_RO(serial); 1527 1528 static ssize_t family_show(struct device *dev, 1529 struct device_attribute *attr, char *buf) 1530 { 1531 struct nvdimm *nvdimm = to_nvdimm(dev); 1532 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1533 1534 if (nfit_mem->family < 0) 1535 return -ENXIO; 1536 return sprintf(buf, "%d\n", nfit_mem->family); 1537 } 1538 static DEVICE_ATTR_RO(family); 1539 1540 static ssize_t dsm_mask_show(struct device *dev, 1541 struct device_attribute *attr, char *buf) 1542 { 1543 struct nvdimm *nvdimm = to_nvdimm(dev); 1544 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1545 1546 if (nfit_mem->family < 0) 1547 return -ENXIO; 1548 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); 1549 } 1550 static DEVICE_ATTR_RO(dsm_mask); 1551 1552 static ssize_t flags_show(struct device *dev, 1553 struct device_attribute *attr, char *buf) 1554 { 1555 struct nvdimm *nvdimm = to_nvdimm(dev); 1556 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1557 u16 flags = __to_nfit_memdev(nfit_mem)->flags; 1558 1559 if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags)) 1560 flags |= ACPI_NFIT_MEM_FLUSH_FAILED; 1561 1562 return sprintf(buf, "%s%s%s%s%s%s%s\n", 1563 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 1564 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 1565 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 1566 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 1567 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", 1568 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", 1569 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); 1570 } 1571 static DEVICE_ATTR_RO(flags); 1572 1573 static ssize_t id_show(struct device *dev, 1574 struct device_attribute *attr, char *buf) 1575 { 1576 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1577 1578 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) 1579 return sprintf(buf, "%04x-%02x-%04x-%08x\n", 1580 be16_to_cpu(dcr->vendor_id), 1581 dcr->manufacturing_location, 1582 be16_to_cpu(dcr->manufacturing_date), 1583 be32_to_cpu(dcr->serial_number)); 1584 else 1585 return sprintf(buf, "%04x-%08x\n", 1586 be16_to_cpu(dcr->vendor_id), 1587 be32_to_cpu(dcr->serial_number)); 1588 } 1589 static DEVICE_ATTR_RO(id); 1590 1591 static ssize_t dirty_shutdown_show(struct device *dev, 1592 struct device_attribute *attr, char *buf) 1593 { 1594 struct nvdimm *nvdimm = to_nvdimm(dev); 1595 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1596 1597 return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown); 1598 } 1599 static DEVICE_ATTR_RO(dirty_shutdown); 1600 1601 static struct attribute *acpi_nfit_dimm_attributes[] = { 1602 &dev_attr_handle.attr, 1603 &dev_attr_phys_id.attr, 1604 &dev_attr_vendor.attr, 1605 &dev_attr_device.attr, 1606 &dev_attr_rev_id.attr, 1607 &dev_attr_subsystem_vendor.attr, 1608 &dev_attr_subsystem_device.attr, 1609 &dev_attr_subsystem_rev_id.attr, 1610 &dev_attr_format.attr, 1611 &dev_attr_formats.attr, 1612 &dev_attr_format1.attr, 1613 &dev_attr_serial.attr, 1614 &dev_attr_flags.attr, 1615 &dev_attr_id.attr, 1616 &dev_attr_family.attr, 1617 &dev_attr_dsm_mask.attr, 1618 &dev_attr_dirty_shutdown.attr, 1619 NULL, 1620 }; 1621 1622 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 1623 struct attribute *a, int n) 1624 { 1625 struct device *dev = container_of(kobj, struct device, kobj); 1626 struct nvdimm *nvdimm = to_nvdimm(dev); 1627 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1628 1629 if (!to_nfit_dcr(dev)) { 1630 /* Without a dcr only the memdev attributes can be surfaced */ 1631 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr 1632 || a == &dev_attr_flags.attr 1633 || a == &dev_attr_family.attr 1634 || a == &dev_attr_dsm_mask.attr) 1635 return a->mode; 1636 return 0; 1637 } 1638 1639 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) 1640 return 0; 1641 1642 if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags) 1643 && a == &dev_attr_dirty_shutdown.attr) 1644 return 0; 1645 1646 return a->mode; 1647 } 1648 1649 static const struct attribute_group acpi_nfit_dimm_attribute_group = { 1650 .name = "nfit", 1651 .attrs = acpi_nfit_dimm_attributes, 1652 .is_visible = acpi_nfit_dimm_attr_visible, 1653 }; 1654 1655 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 1656 &nvdimm_attribute_group, 1657 &nd_device_attribute_group, 1658 &acpi_nfit_dimm_attribute_group, 1659 NULL, 1660 }; 1661 1662 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 1663 u32 device_handle) 1664 { 1665 struct nfit_mem *nfit_mem; 1666 1667 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1668 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 1669 return nfit_mem->nvdimm; 1670 1671 return NULL; 1672 } 1673 1674 void __acpi_nvdimm_notify(struct device *dev, u32 event) 1675 { 1676 struct nfit_mem *nfit_mem; 1677 struct acpi_nfit_desc *acpi_desc; 1678 1679 dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev), 1680 event); 1681 1682 if (event != NFIT_NOTIFY_DIMM_HEALTH) { 1683 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), 1684 event); 1685 return; 1686 } 1687 1688 acpi_desc = dev_get_drvdata(dev->parent); 1689 if (!acpi_desc) 1690 return; 1691 1692 /* 1693 * If we successfully retrieved acpi_desc, then we know nfit_mem data 1694 * is still valid. 1695 */ 1696 nfit_mem = dev_get_drvdata(dev); 1697 if (nfit_mem && nfit_mem->flags_attr) 1698 sysfs_notify_dirent(nfit_mem->flags_attr); 1699 } 1700 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); 1701 1702 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) 1703 { 1704 struct acpi_device *adev = data; 1705 struct device *dev = &adev->dev; 1706 1707 device_lock(dev->parent); 1708 __acpi_nvdimm_notify(dev, event); 1709 device_unlock(dev->parent); 1710 } 1711 1712 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) 1713 { 1714 acpi_handle handle; 1715 acpi_status status; 1716 1717 status = acpi_get_handle(adev->handle, method, &handle); 1718 1719 if (ACPI_SUCCESS(status)) 1720 return true; 1721 return false; 1722 } 1723 1724 __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) 1725 { 1726 struct nd_intel_smart smart = { 0 }; 1727 union acpi_object in_buf = { 1728 .type = ACPI_TYPE_BUFFER, 1729 .buffer.pointer = (char *) &smart, 1730 .buffer.length = sizeof(smart), 1731 }; 1732 union acpi_object in_obj = { 1733 .type = ACPI_TYPE_PACKAGE, 1734 .package.count = 1, 1735 .package.elements = &in_buf, 1736 }; 1737 const u8 func = ND_INTEL_SMART; 1738 const guid_t *guid = to_nfit_uuid(nfit_mem->family); 1739 u8 revid = nfit_dsm_revid(nfit_mem->family, func); 1740 struct acpi_device *adev = nfit_mem->adev; 1741 acpi_handle handle = adev->handle; 1742 union acpi_object *out_obj; 1743 1744 if ((nfit_mem->dsm_mask & (1 << func)) == 0) 1745 return; 1746 1747 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 1748 if (!out_obj) 1749 return; 1750 1751 if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) { 1752 if (smart.shutdown_state) 1753 set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags); 1754 } 1755 1756 if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) { 1757 set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags); 1758 nfit_mem->dirty_shutdown = smart.shutdown_count; 1759 } 1760 ACPI_FREE(out_obj); 1761 } 1762 1763 static void populate_shutdown_status(struct nfit_mem *nfit_mem) 1764 { 1765 /* 1766 * For DIMMs that provide a dynamic facility to retrieve a 1767 * dirty-shutdown status and/or a dirty-shutdown count, cache 1768 * these values in nfit_mem. 1769 */ 1770 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) 1771 nfit_intel_shutdown_status(nfit_mem); 1772 } 1773 1774 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1775 struct nfit_mem *nfit_mem, u32 device_handle) 1776 { 1777 struct acpi_device *adev, *adev_dimm; 1778 struct device *dev = acpi_desc->dev; 1779 unsigned long dsm_mask, label_mask; 1780 const guid_t *guid; 1781 int i; 1782 int family = -1; 1783 1784 /* nfit test assumes 1:1 relationship between commands and dsms */ 1785 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; 1786 nfit_mem->family = NVDIMM_FAMILY_INTEL; 1787 adev = to_acpi_dev(acpi_desc); 1788 if (!adev) { 1789 /* unit test case */ 1790 populate_shutdown_status(nfit_mem); 1791 return 0; 1792 } 1793 1794 adev_dimm = acpi_find_child_device(adev, device_handle, false); 1795 nfit_mem->adev = adev_dimm; 1796 if (!adev_dimm) { 1797 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 1798 device_handle); 1799 return force_enable_dimms ? 0 : -ENODEV; 1800 } 1801 1802 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, 1803 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { 1804 dev_err(dev, "%s: notification registration failed\n", 1805 dev_name(&adev_dimm->dev)); 1806 return -ENXIO; 1807 } 1808 /* 1809 * Record nfit_mem for the notification path to track back to 1810 * the nfit sysfs attributes for this dimm device object. 1811 */ 1812 dev_set_drvdata(&adev_dimm->dev, nfit_mem); 1813 1814 /* 1815 * Until standardization materializes we need to consider 4 1816 * different command sets. Note, that checking for function0 (bit0) 1817 * tells us if any commands are reachable through this GUID. 1818 */ 1819 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) 1820 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1821 if (family < 0 || i == default_dsm_family) 1822 family = i; 1823 1824 /* limit the supported commands to those that are publicly documented */ 1825 nfit_mem->family = family; 1826 if (override_dsm_mask && !disable_vendor_specific) 1827 dsm_mask = override_dsm_mask; 1828 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1829 dsm_mask = NVDIMM_INTEL_CMDMASK; 1830 if (disable_vendor_specific) 1831 dsm_mask &= ~(1 << ND_CMD_VENDOR); 1832 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { 1833 dsm_mask = 0x1c3c76; 1834 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { 1835 dsm_mask = 0x1fe; 1836 if (disable_vendor_specific) 1837 dsm_mask &= ~(1 << 8); 1838 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { 1839 dsm_mask = 0xffffffff; 1840 } else { 1841 dev_dbg(dev, "unknown dimm command family\n"); 1842 nfit_mem->family = -1; 1843 /* DSMs are optional, continue loading the driver... */ 1844 return 0; 1845 } 1846 1847 guid = to_nfit_uuid(nfit_mem->family); 1848 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1849 if (acpi_check_dsm(adev_dimm->handle, guid, 1850 nfit_dsm_revid(nfit_mem->family, i), 1851 1ULL << i)) 1852 set_bit(i, &nfit_mem->dsm_mask); 1853 1854 /* 1855 * Prefer the NVDIMM_FAMILY_INTEL label read commands if present 1856 * due to their better semantics handling locked capacity. 1857 */ 1858 label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA 1859 | 1 << ND_CMD_SET_CONFIG_DATA; 1860 if (family == NVDIMM_FAMILY_INTEL 1861 && (dsm_mask & label_mask) == label_mask) 1862 return 0; 1863 1864 if (acpi_nvdimm_has_method(adev_dimm, "_LSI") 1865 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { 1866 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1867 set_bit(NFIT_MEM_LSR, &nfit_mem->flags); 1868 } 1869 1870 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) 1871 && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { 1872 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); 1873 set_bit(NFIT_MEM_LSW, &nfit_mem->flags); 1874 } 1875 1876 populate_shutdown_status(nfit_mem); 1877 1878 return 0; 1879 } 1880 1881 static void shutdown_dimm_notify(void *data) 1882 { 1883 struct acpi_nfit_desc *acpi_desc = data; 1884 struct nfit_mem *nfit_mem; 1885 1886 mutex_lock(&acpi_desc->init_mutex); 1887 /* 1888 * Clear out the nfit_mem->flags_attr and shut down dimm event 1889 * notifications. 1890 */ 1891 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1892 struct acpi_device *adev_dimm = nfit_mem->adev; 1893 1894 if (nfit_mem->flags_attr) { 1895 sysfs_put(nfit_mem->flags_attr); 1896 nfit_mem->flags_attr = NULL; 1897 } 1898 if (adev_dimm) { 1899 acpi_remove_notify_handler(adev_dimm->handle, 1900 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); 1901 dev_set_drvdata(&adev_dimm->dev, NULL); 1902 } 1903 } 1904 mutex_unlock(&acpi_desc->init_mutex); 1905 } 1906 1907 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 1908 { 1909 struct nfit_mem *nfit_mem; 1910 int dimm_count = 0, rc; 1911 struct nvdimm *nvdimm; 1912 1913 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1914 struct acpi_nfit_flush_address *flush; 1915 unsigned long flags = 0, cmd_mask; 1916 struct nfit_memdev *nfit_memdev; 1917 u32 device_handle; 1918 u16 mem_flags; 1919 1920 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 1921 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 1922 if (nvdimm) { 1923 dimm_count++; 1924 continue; 1925 } 1926 1927 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 1928 set_bit(NDD_ALIASING, &flags); 1929 1930 /* collate flags across all memdevs for this dimm */ 1931 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1932 struct acpi_nfit_memory_map *dimm_memdev; 1933 1934 dimm_memdev = __to_nfit_memdev(nfit_mem); 1935 if (dimm_memdev->device_handle 1936 != nfit_memdev->memdev->device_handle) 1937 continue; 1938 dimm_memdev->flags |= nfit_memdev->memdev->flags; 1939 } 1940 1941 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 1942 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 1943 set_bit(NDD_UNARMED, &flags); 1944 1945 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 1946 if (rc) 1947 continue; 1948 1949 /* 1950 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL 1951 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the 1952 * userspace interface. 1953 */ 1954 cmd_mask = 1UL << ND_CMD_CALL; 1955 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1956 /* 1957 * These commands have a 1:1 correspondence 1958 * between DSM payload and libnvdimm ioctl 1959 * payload format. 1960 */ 1961 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; 1962 } 1963 1964 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { 1965 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 1966 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); 1967 } 1968 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) 1969 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); 1970 1971 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush 1972 : NULL; 1973 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 1974 acpi_nfit_dimm_attribute_groups, 1975 flags, cmd_mask, flush ? flush->hint_count : 0, 1976 nfit_mem->flush_wpq); 1977 if (!nvdimm) 1978 return -ENOMEM; 1979 1980 nfit_mem->nvdimm = nvdimm; 1981 dimm_count++; 1982 1983 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 1984 continue; 1985 1986 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", 1987 nvdimm_name(nvdimm), 1988 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 1989 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 1990 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 1991 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", 1992 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); 1993 1994 } 1995 1996 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 1997 if (rc) 1998 return rc; 1999 2000 /* 2001 * Now that dimms are successfully registered, and async registration 2002 * is flushed, attempt to enable event notification. 2003 */ 2004 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 2005 struct kernfs_node *nfit_kernfs; 2006 2007 nvdimm = nfit_mem->nvdimm; 2008 if (!nvdimm) 2009 continue; 2010 2011 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 2012 if (nfit_kernfs) 2013 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 2014 "flags"); 2015 sysfs_put(nfit_kernfs); 2016 if (!nfit_mem->flags_attr) 2017 dev_warn(acpi_desc->dev, "%s: notifications disabled\n", 2018 nvdimm_name(nvdimm)); 2019 } 2020 2021 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, 2022 acpi_desc); 2023 } 2024 2025 /* 2026 * These constants are private because there are no kernel consumers of 2027 * these commands. 2028 */ 2029 enum nfit_aux_cmds { 2030 NFIT_CMD_TRANSLATE_SPA = 5, 2031 NFIT_CMD_ARS_INJECT_SET = 7, 2032 NFIT_CMD_ARS_INJECT_CLEAR = 8, 2033 NFIT_CMD_ARS_INJECT_GET = 9, 2034 }; 2035 2036 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 2037 { 2038 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2039 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); 2040 struct acpi_device *adev; 2041 unsigned long dsm_mask; 2042 int i; 2043 2044 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; 2045 nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; 2046 adev = to_acpi_dev(acpi_desc); 2047 if (!adev) 2048 return; 2049 2050 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) 2051 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 2052 set_bit(i, &nd_desc->cmd_mask); 2053 set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); 2054 2055 dsm_mask = 2056 (1 << ND_CMD_ARS_CAP) | 2057 (1 << ND_CMD_ARS_START) | 2058 (1 << ND_CMD_ARS_STATUS) | 2059 (1 << ND_CMD_CLEAR_ERROR) | 2060 (1 << NFIT_CMD_TRANSLATE_SPA) | 2061 (1 << NFIT_CMD_ARS_INJECT_SET) | 2062 (1 << NFIT_CMD_ARS_INJECT_CLEAR) | 2063 (1 << NFIT_CMD_ARS_INJECT_GET); 2064 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 2065 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 2066 set_bit(i, &nd_desc->bus_dsm_mask); 2067 } 2068 2069 static ssize_t range_index_show(struct device *dev, 2070 struct device_attribute *attr, char *buf) 2071 { 2072 struct nd_region *nd_region = to_nd_region(dev); 2073 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 2074 2075 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 2076 } 2077 static DEVICE_ATTR_RO(range_index); 2078 2079 static struct attribute *acpi_nfit_region_attributes[] = { 2080 &dev_attr_range_index.attr, 2081 NULL, 2082 }; 2083 2084 static const struct attribute_group acpi_nfit_region_attribute_group = { 2085 .name = "nfit", 2086 .attrs = acpi_nfit_region_attributes, 2087 }; 2088 2089 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 2090 &nd_region_attribute_group, 2091 &nd_mapping_attribute_group, 2092 &nd_device_attribute_group, 2093 &nd_numa_attribute_group, 2094 &acpi_nfit_region_attribute_group, 2095 NULL, 2096 }; 2097 2098 /* enough info to uniquely specify an interleave set */ 2099 struct nfit_set_info { 2100 struct nfit_set_info_map { 2101 u64 region_offset; 2102 u32 serial_number; 2103 u32 pad; 2104 } mapping[0]; 2105 }; 2106 2107 struct nfit_set_info2 { 2108 struct nfit_set_info_map2 { 2109 u64 region_offset; 2110 u32 serial_number; 2111 u16 vendor_id; 2112 u16 manufacturing_date; 2113 u8 manufacturing_location; 2114 u8 reserved[31]; 2115 } mapping[0]; 2116 }; 2117 2118 static size_t sizeof_nfit_set_info(int num_mappings) 2119 { 2120 return sizeof(struct nfit_set_info) 2121 + num_mappings * sizeof(struct nfit_set_info_map); 2122 } 2123 2124 static size_t sizeof_nfit_set_info2(int num_mappings) 2125 { 2126 return sizeof(struct nfit_set_info2) 2127 + num_mappings * sizeof(struct nfit_set_info_map2); 2128 } 2129 2130 static int cmp_map_compat(const void *m0, const void *m1) 2131 { 2132 const struct nfit_set_info_map *map0 = m0; 2133 const struct nfit_set_info_map *map1 = m1; 2134 2135 return memcmp(&map0->region_offset, &map1->region_offset, 2136 sizeof(u64)); 2137 } 2138 2139 static int cmp_map(const void *m0, const void *m1) 2140 { 2141 const struct nfit_set_info_map *map0 = m0; 2142 const struct nfit_set_info_map *map1 = m1; 2143 2144 if (map0->region_offset < map1->region_offset) 2145 return -1; 2146 else if (map0->region_offset > map1->region_offset) 2147 return 1; 2148 return 0; 2149 } 2150 2151 static int cmp_map2(const void *m0, const void *m1) 2152 { 2153 const struct nfit_set_info_map2 *map0 = m0; 2154 const struct nfit_set_info_map2 *map1 = m1; 2155 2156 if (map0->region_offset < map1->region_offset) 2157 return -1; 2158 else if (map0->region_offset > map1->region_offset) 2159 return 1; 2160 return 0; 2161 } 2162 2163 /* Retrieve the nth entry referencing this spa */ 2164 static struct acpi_nfit_memory_map *memdev_from_spa( 2165 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 2166 { 2167 struct nfit_memdev *nfit_memdev; 2168 2169 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 2170 if (nfit_memdev->memdev->range_index == range_index) 2171 if (n-- == 0) 2172 return nfit_memdev->memdev; 2173 return NULL; 2174 } 2175 2176 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 2177 struct nd_region_desc *ndr_desc, 2178 struct acpi_nfit_system_address *spa) 2179 { 2180 struct device *dev = acpi_desc->dev; 2181 struct nd_interleave_set *nd_set; 2182 u16 nr = ndr_desc->num_mappings; 2183 struct nfit_set_info2 *info2; 2184 struct nfit_set_info *info; 2185 int i; 2186 2187 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2188 if (!nd_set) 2189 return -ENOMEM; 2190 ndr_desc->nd_set = nd_set; 2191 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); 2192 2193 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 2194 if (!info) 2195 return -ENOMEM; 2196 2197 info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL); 2198 if (!info2) 2199 return -ENOMEM; 2200 2201 for (i = 0; i < nr; i++) { 2202 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 2203 struct nfit_set_info_map *map = &info->mapping[i]; 2204 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2205 struct nvdimm *nvdimm = mapping->nvdimm; 2206 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2207 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 2208 spa->range_index, i); 2209 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2210 2211 if (!memdev || !nfit_mem->dcr) { 2212 dev_err(dev, "%s: failed to find DCR\n", __func__); 2213 return -ENODEV; 2214 } 2215 2216 map->region_offset = memdev->region_offset; 2217 map->serial_number = dcr->serial_number; 2218 2219 map2->region_offset = memdev->region_offset; 2220 map2->serial_number = dcr->serial_number; 2221 map2->vendor_id = dcr->vendor_id; 2222 map2->manufacturing_date = dcr->manufacturing_date; 2223 map2->manufacturing_location = dcr->manufacturing_location; 2224 } 2225 2226 /* v1.1 namespaces */ 2227 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2228 cmp_map, NULL); 2229 nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2230 2231 /* v1.2 namespaces */ 2232 sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2), 2233 cmp_map2, NULL); 2234 nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0); 2235 2236 /* support v1.1 namespaces created with the wrong sort order */ 2237 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2238 cmp_map_compat, NULL); 2239 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2240 2241 /* record the result of the sort for the mapping position */ 2242 for (i = 0; i < nr; i++) { 2243 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2244 int j; 2245 2246 for (j = 0; j < nr; j++) { 2247 struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; 2248 struct nvdimm *nvdimm = mapping->nvdimm; 2249 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2250 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2251 2252 if (map2->serial_number == dcr->serial_number && 2253 map2->vendor_id == dcr->vendor_id && 2254 map2->manufacturing_date == dcr->manufacturing_date && 2255 map2->manufacturing_location 2256 == dcr->manufacturing_location) { 2257 mapping->position = i; 2258 break; 2259 } 2260 } 2261 } 2262 2263 ndr_desc->nd_set = nd_set; 2264 devm_kfree(dev, info); 2265 devm_kfree(dev, info2); 2266 2267 return 0; 2268 } 2269 2270 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 2271 { 2272 struct acpi_nfit_interleave *idt = mmio->idt; 2273 u32 sub_line_offset, line_index, line_offset; 2274 u64 line_no, table_skip_count, table_offset; 2275 2276 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 2277 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 2278 line_offset = idt->line_offset[line_index] 2279 * mmio->line_size; 2280 table_offset = table_skip_count * mmio->table_size; 2281 2282 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 2283 } 2284 2285 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 2286 { 2287 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2288 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 2289 const u32 STATUS_MASK = 0x80000037; 2290 2291 if (mmio->num_lines) 2292 offset = to_interleave_offset(offset, mmio); 2293 2294 return readl(mmio->addr.base + offset) & STATUS_MASK; 2295 } 2296 2297 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 2298 resource_size_t dpa, unsigned int len, unsigned int write) 2299 { 2300 u64 cmd, offset; 2301 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2302 2303 enum { 2304 BCW_OFFSET_MASK = (1ULL << 48)-1, 2305 BCW_LEN_SHIFT = 48, 2306 BCW_LEN_MASK = (1ULL << 8) - 1, 2307 BCW_CMD_SHIFT = 56, 2308 }; 2309 2310 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 2311 len = len >> L1_CACHE_SHIFT; 2312 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 2313 cmd |= ((u64) write) << BCW_CMD_SHIFT; 2314 2315 offset = nfit_blk->cmd_offset + mmio->size * bw; 2316 if (mmio->num_lines) 2317 offset = to_interleave_offset(offset, mmio); 2318 2319 writeq(cmd, mmio->addr.base + offset); 2320 nvdimm_flush(nfit_blk->nd_region); 2321 2322 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) 2323 readq(mmio->addr.base + offset); 2324 } 2325 2326 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 2327 resource_size_t dpa, void *iobuf, size_t len, int rw, 2328 unsigned int lane) 2329 { 2330 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2331 unsigned int copied = 0; 2332 u64 base_offset; 2333 int rc; 2334 2335 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 2336 + lane * mmio->size; 2337 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 2338 while (len) { 2339 unsigned int c; 2340 u64 offset; 2341 2342 if (mmio->num_lines) { 2343 u32 line_offset; 2344 2345 offset = to_interleave_offset(base_offset + copied, 2346 mmio); 2347 div_u64_rem(offset, mmio->line_size, &line_offset); 2348 c = min_t(size_t, len, mmio->line_size - line_offset); 2349 } else { 2350 offset = base_offset + nfit_blk->bdw_offset; 2351 c = len; 2352 } 2353 2354 if (rw) 2355 memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); 2356 else { 2357 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) 2358 arch_invalidate_pmem((void __force *) 2359 mmio->addr.aperture + offset, c); 2360 2361 memcpy(iobuf + copied, mmio->addr.aperture + offset, c); 2362 } 2363 2364 copied += c; 2365 len -= c; 2366 } 2367 2368 if (rw) 2369 nvdimm_flush(nfit_blk->nd_region); 2370 2371 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 2372 return rc; 2373 } 2374 2375 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 2376 resource_size_t dpa, void *iobuf, u64 len, int rw) 2377 { 2378 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 2379 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2380 struct nd_region *nd_region = nfit_blk->nd_region; 2381 unsigned int lane, copied = 0; 2382 int rc = 0; 2383 2384 lane = nd_region_acquire_lane(nd_region); 2385 while (len) { 2386 u64 c = min(len, mmio->size); 2387 2388 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 2389 iobuf + copied, c, rw, lane); 2390 if (rc) 2391 break; 2392 2393 copied += c; 2394 len -= c; 2395 } 2396 nd_region_release_lane(nd_region, lane); 2397 2398 return rc; 2399 } 2400 2401 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 2402 struct acpi_nfit_interleave *idt, u16 interleave_ways) 2403 { 2404 if (idt) { 2405 mmio->num_lines = idt->line_count; 2406 mmio->line_size = idt->line_size; 2407 if (interleave_ways == 0) 2408 return -ENXIO; 2409 mmio->table_size = mmio->num_lines * interleave_ways 2410 * mmio->line_size; 2411 } 2412 2413 return 0; 2414 } 2415 2416 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 2417 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 2418 { 2419 struct nd_cmd_dimm_flags flags; 2420 int rc; 2421 2422 memset(&flags, 0, sizeof(flags)); 2423 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 2424 sizeof(flags), NULL); 2425 2426 if (rc >= 0 && flags.status == 0) 2427 nfit_blk->dimm_flags = flags.flags; 2428 else if (rc == -ENOTTY) { 2429 /* fall back to a conservative default */ 2430 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; 2431 rc = 0; 2432 } else 2433 rc = -ENXIO; 2434 2435 return rc; 2436 } 2437 2438 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 2439 struct device *dev) 2440 { 2441 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 2442 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 2443 struct nfit_blk_mmio *mmio; 2444 struct nfit_blk *nfit_blk; 2445 struct nfit_mem *nfit_mem; 2446 struct nvdimm *nvdimm; 2447 int rc; 2448 2449 nvdimm = nd_blk_region_to_dimm(ndbr); 2450 nfit_mem = nvdimm_provider_data(nvdimm); 2451 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 2452 dev_dbg(dev, "missing%s%s%s\n", 2453 nfit_mem ? "" : " nfit_mem", 2454 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 2455 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 2456 return -ENXIO; 2457 } 2458 2459 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 2460 if (!nfit_blk) 2461 return -ENOMEM; 2462 nd_blk_region_set_provider_data(ndbr, nfit_blk); 2463 nfit_blk->nd_region = to_nd_region(dev); 2464 2465 /* map block aperture memory */ 2466 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 2467 mmio = &nfit_blk->mmio[BDW]; 2468 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, 2469 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr)); 2470 if (!mmio->addr.base) { 2471 dev_dbg(dev, "%s failed to map bdw\n", 2472 nvdimm_name(nvdimm)); 2473 return -ENOMEM; 2474 } 2475 mmio->size = nfit_mem->bdw->size; 2476 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 2477 mmio->idt = nfit_mem->idt_bdw; 2478 mmio->spa = nfit_mem->spa_bdw; 2479 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 2480 nfit_mem->memdev_bdw->interleave_ways); 2481 if (rc) { 2482 dev_dbg(dev, "%s failed to init bdw interleave\n", 2483 nvdimm_name(nvdimm)); 2484 return rc; 2485 } 2486 2487 /* map block control memory */ 2488 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 2489 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 2490 mmio = &nfit_blk->mmio[DCR]; 2491 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, 2492 nfit_mem->spa_dcr->length); 2493 if (!mmio->addr.base) { 2494 dev_dbg(dev, "%s failed to map dcr\n", 2495 nvdimm_name(nvdimm)); 2496 return -ENOMEM; 2497 } 2498 mmio->size = nfit_mem->dcr->window_size; 2499 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 2500 mmio->idt = nfit_mem->idt_dcr; 2501 mmio->spa = nfit_mem->spa_dcr; 2502 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 2503 nfit_mem->memdev_dcr->interleave_ways); 2504 if (rc) { 2505 dev_dbg(dev, "%s failed to init dcr interleave\n", 2506 nvdimm_name(nvdimm)); 2507 return rc; 2508 } 2509 2510 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 2511 if (rc < 0) { 2512 dev_dbg(dev, "%s failed get DIMM flags\n", 2513 nvdimm_name(nvdimm)); 2514 return rc; 2515 } 2516 2517 if (nvdimm_has_flush(nfit_blk->nd_region) < 0) 2518 dev_warn(dev, "unable to guarantee persistence of writes\n"); 2519 2520 if (mmio->line_size == 0) 2521 return 0; 2522 2523 if ((u32) nfit_blk->cmd_offset % mmio->line_size 2524 + 8 > mmio->line_size) { 2525 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 2526 return -ENXIO; 2527 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 2528 + 8 > mmio->line_size) { 2529 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 2530 return -ENXIO; 2531 } 2532 2533 return 0; 2534 } 2535 2536 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 2537 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 2538 { 2539 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2540 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2541 int cmd_rc, rc; 2542 2543 cmd->address = spa->address; 2544 cmd->length = spa->length; 2545 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 2546 sizeof(*cmd), &cmd_rc); 2547 if (rc < 0) 2548 return rc; 2549 return cmd_rc; 2550 } 2551 2552 static int ars_start(struct acpi_nfit_desc *acpi_desc, 2553 struct nfit_spa *nfit_spa, enum nfit_ars_state req_type) 2554 { 2555 int rc; 2556 int cmd_rc; 2557 struct nd_cmd_ars_start ars_start; 2558 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2559 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2560 2561 memset(&ars_start, 0, sizeof(ars_start)); 2562 ars_start.address = spa->address; 2563 ars_start.length = spa->length; 2564 if (req_type == ARS_REQ_SHORT) 2565 ars_start.flags = ND_ARS_RETURN_PREV_DATA; 2566 if (nfit_spa_type(spa) == NFIT_SPA_PM) 2567 ars_start.type = ND_ARS_PERSISTENT; 2568 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) 2569 ars_start.type = ND_ARS_VOLATILE; 2570 else 2571 return -ENOTTY; 2572 2573 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2574 sizeof(ars_start), &cmd_rc); 2575 2576 if (rc < 0) 2577 return rc; 2578 return cmd_rc; 2579 } 2580 2581 static int ars_continue(struct acpi_nfit_desc *acpi_desc) 2582 { 2583 int rc, cmd_rc; 2584 struct nd_cmd_ars_start ars_start; 2585 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2586 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2587 2588 memset(&ars_start, 0, sizeof(ars_start)); 2589 ars_start.address = ars_status->restart_address; 2590 ars_start.length = ars_status->restart_length; 2591 ars_start.type = ars_status->type; 2592 ars_start.flags = acpi_desc->ars_start_flags; 2593 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2594 sizeof(ars_start), &cmd_rc); 2595 if (rc < 0) 2596 return rc; 2597 return cmd_rc; 2598 } 2599 2600 static int ars_get_status(struct acpi_nfit_desc *acpi_desc) 2601 { 2602 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2603 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2604 int rc, cmd_rc; 2605 2606 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, 2607 acpi_desc->max_ars, &cmd_rc); 2608 if (rc < 0) 2609 return rc; 2610 return cmd_rc; 2611 } 2612 2613 static void ars_complete(struct acpi_nfit_desc *acpi_desc, 2614 struct nfit_spa *nfit_spa) 2615 { 2616 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2617 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2618 struct nd_region *nd_region = nfit_spa->nd_region; 2619 struct device *dev; 2620 2621 lockdep_assert_held(&acpi_desc->init_mutex); 2622 /* 2623 * Only advance the ARS state for ARS runs initiated by the 2624 * kernel, ignore ARS results from BIOS initiated runs for scrub 2625 * completion tracking. 2626 */ 2627 if (acpi_desc->scrub_spa != nfit_spa) 2628 return; 2629 2630 if ((ars_status->address >= spa->address && ars_status->address 2631 < spa->address + spa->length) 2632 || (ars_status->address < spa->address)) { 2633 /* 2634 * Assume that if a scrub starts at an offset from the 2635 * start of nfit_spa that we are in the continuation 2636 * case. 2637 * 2638 * Otherwise, if the scrub covers the spa range, mark 2639 * any pending request complete. 2640 */ 2641 if (ars_status->address + ars_status->length 2642 >= spa->address + spa->length) 2643 /* complete */; 2644 else 2645 return; 2646 } else 2647 return; 2648 2649 acpi_desc->scrub_spa = NULL; 2650 if (nd_region) { 2651 dev = nd_region_dev(nd_region); 2652 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON); 2653 } else 2654 dev = acpi_desc->dev; 2655 dev_dbg(dev, "ARS: range %d complete\n", spa->range_index); 2656 } 2657 2658 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) 2659 { 2660 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; 2661 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2662 int rc; 2663 u32 i; 2664 2665 /* 2666 * First record starts at 44 byte offset from the start of the 2667 * payload. 2668 */ 2669 if (ars_status->out_length < 44) 2670 return 0; 2671 for (i = 0; i < ars_status->num_records; i++) { 2672 /* only process full records */ 2673 if (ars_status->out_length 2674 < 44 + sizeof(struct nd_ars_record) * (i + 1)) 2675 break; 2676 rc = nvdimm_bus_add_badrange(nvdimm_bus, 2677 ars_status->records[i].err_address, 2678 ars_status->records[i].length); 2679 if (rc) 2680 return rc; 2681 } 2682 if (i < ars_status->num_records) 2683 dev_warn(acpi_desc->dev, "detected truncated ars results\n"); 2684 2685 return 0; 2686 } 2687 2688 static void acpi_nfit_remove_resource(void *data) 2689 { 2690 struct resource *res = data; 2691 2692 remove_resource(res); 2693 } 2694 2695 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, 2696 struct nd_region_desc *ndr_desc) 2697 { 2698 struct resource *res, *nd_res = ndr_desc->res; 2699 int is_pmem, ret; 2700 2701 /* No operation if the region is already registered as PMEM */ 2702 is_pmem = region_intersects(nd_res->start, resource_size(nd_res), 2703 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); 2704 if (is_pmem == REGION_INTERSECTS) 2705 return 0; 2706 2707 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); 2708 if (!res) 2709 return -ENOMEM; 2710 2711 res->name = "Persistent Memory"; 2712 res->start = nd_res->start; 2713 res->end = nd_res->end; 2714 res->flags = IORESOURCE_MEM; 2715 res->desc = IORES_DESC_PERSISTENT_MEMORY; 2716 2717 ret = insert_resource(&iomem_resource, res); 2718 if (ret) 2719 return ret; 2720 2721 ret = devm_add_action_or_reset(acpi_desc->dev, 2722 acpi_nfit_remove_resource, 2723 res); 2724 if (ret) 2725 return ret; 2726 2727 return 0; 2728 } 2729 2730 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 2731 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, 2732 struct acpi_nfit_memory_map *memdev, 2733 struct nfit_spa *nfit_spa) 2734 { 2735 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 2736 memdev->device_handle); 2737 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2738 struct nd_blk_region_desc *ndbr_desc; 2739 struct nfit_mem *nfit_mem; 2740 int rc; 2741 2742 if (!nvdimm) { 2743 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 2744 spa->range_index, memdev->device_handle); 2745 return -ENODEV; 2746 } 2747 2748 mapping->nvdimm = nvdimm; 2749 switch (nfit_spa_type(spa)) { 2750 case NFIT_SPA_PM: 2751 case NFIT_SPA_VOLATILE: 2752 mapping->start = memdev->address; 2753 mapping->size = memdev->region_size; 2754 break; 2755 case NFIT_SPA_DCR: 2756 nfit_mem = nvdimm_provider_data(nvdimm); 2757 if (!nfit_mem || !nfit_mem->bdw) { 2758 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 2759 spa->range_index, nvdimm_name(nvdimm)); 2760 break; 2761 } 2762 2763 mapping->size = nfit_mem->bdw->capacity; 2764 mapping->start = nfit_mem->bdw->start_address; 2765 ndr_desc->num_lanes = nfit_mem->bdw->windows; 2766 ndr_desc->mapping = mapping; 2767 ndr_desc->num_mappings = 1; 2768 ndbr_desc = to_blk_region_desc(ndr_desc); 2769 ndbr_desc->enable = acpi_nfit_blk_region_enable; 2770 ndbr_desc->do_io = acpi_desc->blk_do_io; 2771 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2772 if (rc) 2773 return rc; 2774 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 2775 ndr_desc); 2776 if (!nfit_spa->nd_region) 2777 return -ENOMEM; 2778 break; 2779 } 2780 2781 return 0; 2782 } 2783 2784 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) 2785 { 2786 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2787 nfit_spa_type(spa) == NFIT_SPA_VCD || 2788 nfit_spa_type(spa) == NFIT_SPA_PDISK || 2789 nfit_spa_type(spa) == NFIT_SPA_PCD); 2790 } 2791 2792 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa) 2793 { 2794 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2795 nfit_spa_type(spa) == NFIT_SPA_VCD || 2796 nfit_spa_type(spa) == NFIT_SPA_VOLATILE); 2797 } 2798 2799 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 2800 struct nfit_spa *nfit_spa) 2801 { 2802 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; 2803 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2804 struct nd_blk_region_desc ndbr_desc; 2805 struct nd_region_desc *ndr_desc; 2806 struct nfit_memdev *nfit_memdev; 2807 struct nvdimm_bus *nvdimm_bus; 2808 struct resource res; 2809 int count = 0, rc; 2810 2811 if (nfit_spa->nd_region) 2812 return 0; 2813 2814 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { 2815 dev_dbg(acpi_desc->dev, "detected invalid spa index\n"); 2816 return 0; 2817 } 2818 2819 memset(&res, 0, sizeof(res)); 2820 memset(&mappings, 0, sizeof(mappings)); 2821 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 2822 res.start = spa->address; 2823 res.end = res.start + spa->length - 1; 2824 ndr_desc = &ndbr_desc.ndr_desc; 2825 ndr_desc->res = &res; 2826 ndr_desc->provider_data = nfit_spa; 2827 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 2828 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 2829 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 2830 spa->proximity_domain); 2831 else 2832 ndr_desc->numa_node = NUMA_NO_NODE; 2833 2834 /* 2835 * Persistence domain bits are hierarchical, if 2836 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then 2837 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. 2838 */ 2839 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) 2840 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); 2841 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) 2842 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); 2843 2844 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2845 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 2846 struct nd_mapping_desc *mapping; 2847 2848 if (memdev->range_index != spa->range_index) 2849 continue; 2850 if (count >= ND_MAX_MAPPINGS) { 2851 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 2852 spa->range_index, ND_MAX_MAPPINGS); 2853 return -ENXIO; 2854 } 2855 mapping = &mappings[count++]; 2856 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, 2857 memdev, nfit_spa); 2858 if (rc) 2859 goto out; 2860 } 2861 2862 ndr_desc->mapping = mappings; 2863 ndr_desc->num_mappings = count; 2864 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2865 if (rc) 2866 goto out; 2867 2868 nvdimm_bus = acpi_desc->nvdimm_bus; 2869 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 2870 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); 2871 if (rc) { 2872 dev_warn(acpi_desc->dev, 2873 "failed to insert pmem resource to iomem: %d\n", 2874 rc); 2875 goto out; 2876 } 2877 2878 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2879 ndr_desc); 2880 if (!nfit_spa->nd_region) 2881 rc = -ENOMEM; 2882 } else if (nfit_spa_is_volatile(spa)) { 2883 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, 2884 ndr_desc); 2885 if (!nfit_spa->nd_region) 2886 rc = -ENOMEM; 2887 } else if (nfit_spa_is_virtual(spa)) { 2888 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2889 ndr_desc); 2890 if (!nfit_spa->nd_region) 2891 rc = -ENOMEM; 2892 } 2893 2894 out: 2895 if (rc) 2896 dev_err(acpi_desc->dev, "failed to register spa range %d\n", 2897 nfit_spa->spa->range_index); 2898 return rc; 2899 } 2900 2901 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc) 2902 { 2903 struct device *dev = acpi_desc->dev; 2904 struct nd_cmd_ars_status *ars_status; 2905 2906 if (acpi_desc->ars_status) { 2907 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2908 return 0; 2909 } 2910 2911 ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL); 2912 if (!ars_status) 2913 return -ENOMEM; 2914 acpi_desc->ars_status = ars_status; 2915 return 0; 2916 } 2917 2918 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) 2919 { 2920 int rc; 2921 2922 if (ars_status_alloc(acpi_desc)) 2923 return -ENOMEM; 2924 2925 rc = ars_get_status(acpi_desc); 2926 2927 if (rc < 0 && rc != -ENOSPC) 2928 return rc; 2929 2930 if (ars_status_process_records(acpi_desc)) 2931 dev_err(acpi_desc->dev, "Failed to process ARS records\n"); 2932 2933 return rc; 2934 } 2935 2936 static int ars_register(struct acpi_nfit_desc *acpi_desc, 2937 struct nfit_spa *nfit_spa) 2938 { 2939 int rc; 2940 2941 if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state)) 2942 return acpi_nfit_register_region(acpi_desc, nfit_spa); 2943 2944 set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); 2945 set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); 2946 2947 switch (acpi_nfit_query_poison(acpi_desc)) { 2948 case 0: 2949 case -EAGAIN: 2950 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); 2951 /* shouldn't happen, try again later */ 2952 if (rc == -EBUSY) 2953 break; 2954 if (rc) { 2955 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2956 break; 2957 } 2958 clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); 2959 rc = acpi_nfit_query_poison(acpi_desc); 2960 if (rc) 2961 break; 2962 acpi_desc->scrub_spa = nfit_spa; 2963 ars_complete(acpi_desc, nfit_spa); 2964 /* 2965 * If ars_complete() says we didn't complete the 2966 * short scrub, we'll try again with a long 2967 * request. 2968 */ 2969 acpi_desc->scrub_spa = NULL; 2970 break; 2971 case -EBUSY: 2972 case -ENOMEM: 2973 case -ENOSPC: 2974 /* 2975 * BIOS was using ARS, wait for it to complete (or 2976 * resources to become available) and then perform our 2977 * own scrubs. 2978 */ 2979 break; 2980 default: 2981 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2982 break; 2983 } 2984 2985 return acpi_nfit_register_region(acpi_desc, nfit_spa); 2986 } 2987 2988 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc) 2989 { 2990 struct nfit_spa *nfit_spa; 2991 2992 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2993 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 2994 continue; 2995 ars_complete(acpi_desc, nfit_spa); 2996 } 2997 } 2998 2999 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, 3000 int query_rc) 3001 { 3002 unsigned int tmo = acpi_desc->scrub_tmo; 3003 struct device *dev = acpi_desc->dev; 3004 struct nfit_spa *nfit_spa; 3005 3006 lockdep_assert_held(&acpi_desc->init_mutex); 3007 3008 if (acpi_desc->cancel) 3009 return 0; 3010 3011 if (query_rc == -EBUSY) { 3012 dev_dbg(dev, "ARS: ARS busy\n"); 3013 return min(30U * 60U, tmo * 2); 3014 } 3015 if (query_rc == -ENOSPC) { 3016 dev_dbg(dev, "ARS: ARS continue\n"); 3017 ars_continue(acpi_desc); 3018 return 1; 3019 } 3020 if (query_rc && query_rc != -EAGAIN) { 3021 unsigned long long addr, end; 3022 3023 addr = acpi_desc->ars_status->address; 3024 end = addr + acpi_desc->ars_status->length; 3025 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end, 3026 query_rc); 3027 } 3028 3029 ars_complete_all(acpi_desc); 3030 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3031 enum nfit_ars_state req_type; 3032 int rc; 3033 3034 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3035 continue; 3036 3037 /* prefer short ARS requests first */ 3038 if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)) 3039 req_type = ARS_REQ_SHORT; 3040 else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) 3041 req_type = ARS_REQ_LONG; 3042 else 3043 continue; 3044 rc = ars_start(acpi_desc, nfit_spa, req_type); 3045 3046 dev = nd_region_dev(nfit_spa->nd_region); 3047 dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n", 3048 nfit_spa->spa->range_index, 3049 req_type == ARS_REQ_SHORT ? "short" : "long", 3050 rc); 3051 /* 3052 * Hmm, we raced someone else starting ARS? Try again in 3053 * a bit. 3054 */ 3055 if (rc == -EBUSY) 3056 return 1; 3057 if (rc == 0) { 3058 dev_WARN_ONCE(dev, acpi_desc->scrub_spa, 3059 "scrub start while range %d active\n", 3060 acpi_desc->scrub_spa->spa->range_index); 3061 clear_bit(req_type, &nfit_spa->ars_state); 3062 acpi_desc->scrub_spa = nfit_spa; 3063 /* 3064 * Consider this spa last for future scrub 3065 * requests 3066 */ 3067 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 3068 return 1; 3069 } 3070 3071 dev_err(dev, "ARS: range %d ARS failed (%d)\n", 3072 nfit_spa->spa->range_index, rc); 3073 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3074 } 3075 return 0; 3076 } 3077 3078 static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) 3079 { 3080 lockdep_assert_held(&acpi_desc->init_mutex); 3081 3082 acpi_desc->scrub_busy = 1; 3083 /* note this should only be set from within the workqueue */ 3084 if (tmo) 3085 acpi_desc->scrub_tmo = tmo; 3086 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); 3087 } 3088 3089 static void sched_ars(struct acpi_nfit_desc *acpi_desc) 3090 { 3091 __sched_ars(acpi_desc, 0); 3092 } 3093 3094 static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) 3095 { 3096 lockdep_assert_held(&acpi_desc->init_mutex); 3097 3098 acpi_desc->scrub_busy = 0; 3099 acpi_desc->scrub_count++; 3100 if (acpi_desc->scrub_count_state) 3101 sysfs_notify_dirent(acpi_desc->scrub_count_state); 3102 } 3103 3104 static void acpi_nfit_scrub(struct work_struct *work) 3105 { 3106 struct acpi_nfit_desc *acpi_desc; 3107 unsigned int tmo; 3108 int query_rc; 3109 3110 acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); 3111 mutex_lock(&acpi_desc->init_mutex); 3112 query_rc = acpi_nfit_query_poison(acpi_desc); 3113 tmo = __acpi_nfit_scrub(acpi_desc, query_rc); 3114 if (tmo) 3115 __sched_ars(acpi_desc, tmo); 3116 else 3117 notify_ars_done(acpi_desc); 3118 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 3119 mutex_unlock(&acpi_desc->init_mutex); 3120 } 3121 3122 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, 3123 struct nfit_spa *nfit_spa) 3124 { 3125 int type = nfit_spa_type(nfit_spa->spa); 3126 struct nd_cmd_ars_cap ars_cap; 3127 int rc; 3128 3129 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3130 memset(&ars_cap, 0, sizeof(ars_cap)); 3131 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); 3132 if (rc < 0) 3133 return; 3134 /* check that the supported scrub types match the spa type */ 3135 if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16) 3136 & ND_ARS_VOLATILE) == 0) 3137 return; 3138 if (type == NFIT_SPA_PM && ((ars_cap.status >> 16) 3139 & ND_ARS_PERSISTENT) == 0) 3140 return; 3141 3142 nfit_spa->max_ars = ars_cap.max_ars_out; 3143 nfit_spa->clear_err_unit = ars_cap.clear_err_unit; 3144 acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); 3145 clear_bit(ARS_FAILED, &nfit_spa->ars_state); 3146 } 3147 3148 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 3149 { 3150 struct nfit_spa *nfit_spa; 3151 int rc; 3152 3153 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3154 switch (nfit_spa_type(nfit_spa->spa)) { 3155 case NFIT_SPA_VOLATILE: 3156 case NFIT_SPA_PM: 3157 acpi_nfit_init_ars(acpi_desc, nfit_spa); 3158 break; 3159 } 3160 } 3161 3162 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 3163 switch (nfit_spa_type(nfit_spa->spa)) { 3164 case NFIT_SPA_VOLATILE: 3165 case NFIT_SPA_PM: 3166 /* register regions and kick off initial ARS run */ 3167 rc = ars_register(acpi_desc, nfit_spa); 3168 if (rc) 3169 return rc; 3170 break; 3171 case NFIT_SPA_BDW: 3172 /* nothing to register */ 3173 break; 3174 case NFIT_SPA_DCR: 3175 case NFIT_SPA_VDISK: 3176 case NFIT_SPA_VCD: 3177 case NFIT_SPA_PDISK: 3178 case NFIT_SPA_PCD: 3179 /* register known regions that don't support ARS */ 3180 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 3181 if (rc) 3182 return rc; 3183 break; 3184 default: 3185 /* don't register unknown regions */ 3186 break; 3187 } 3188 3189 sched_ars(acpi_desc); 3190 return 0; 3191 } 3192 3193 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 3194 struct nfit_table_prev *prev) 3195 { 3196 struct device *dev = acpi_desc->dev; 3197 3198 if (!list_empty(&prev->spas) || 3199 !list_empty(&prev->memdevs) || 3200 !list_empty(&prev->dcrs) || 3201 !list_empty(&prev->bdws) || 3202 !list_empty(&prev->idts) || 3203 !list_empty(&prev->flushes)) { 3204 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 3205 return -ENXIO; 3206 } 3207 return 0; 3208 } 3209 3210 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) 3211 { 3212 struct device *dev = acpi_desc->dev; 3213 struct kernfs_node *nfit; 3214 struct device *bus_dev; 3215 3216 if (!ars_supported(acpi_desc->nvdimm_bus)) 3217 return 0; 3218 3219 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3220 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); 3221 if (!nfit) { 3222 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); 3223 return -ENODEV; 3224 } 3225 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); 3226 sysfs_put(nfit); 3227 if (!acpi_desc->scrub_count_state) { 3228 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); 3229 return -ENODEV; 3230 } 3231 3232 return 0; 3233 } 3234 3235 static void acpi_nfit_unregister(void *data) 3236 { 3237 struct acpi_nfit_desc *acpi_desc = data; 3238 3239 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 3240 } 3241 3242 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) 3243 { 3244 struct device *dev = acpi_desc->dev; 3245 struct nfit_table_prev prev; 3246 const void *end; 3247 int rc; 3248 3249 if (!acpi_desc->nvdimm_bus) { 3250 acpi_nfit_init_dsms(acpi_desc); 3251 3252 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, 3253 &acpi_desc->nd_desc); 3254 if (!acpi_desc->nvdimm_bus) 3255 return -ENOMEM; 3256 3257 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, 3258 acpi_desc); 3259 if (rc) 3260 return rc; 3261 3262 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); 3263 if (rc) 3264 return rc; 3265 3266 /* register this acpi_desc for mce notifications */ 3267 mutex_lock(&acpi_desc_lock); 3268 list_add_tail(&acpi_desc->list, &acpi_descs); 3269 mutex_unlock(&acpi_desc_lock); 3270 } 3271 3272 mutex_lock(&acpi_desc->init_mutex); 3273 3274 INIT_LIST_HEAD(&prev.spas); 3275 INIT_LIST_HEAD(&prev.memdevs); 3276 INIT_LIST_HEAD(&prev.dcrs); 3277 INIT_LIST_HEAD(&prev.bdws); 3278 INIT_LIST_HEAD(&prev.idts); 3279 INIT_LIST_HEAD(&prev.flushes); 3280 3281 list_cut_position(&prev.spas, &acpi_desc->spas, 3282 acpi_desc->spas.prev); 3283 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 3284 acpi_desc->memdevs.prev); 3285 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 3286 acpi_desc->dcrs.prev); 3287 list_cut_position(&prev.bdws, &acpi_desc->bdws, 3288 acpi_desc->bdws.prev); 3289 list_cut_position(&prev.idts, &acpi_desc->idts, 3290 acpi_desc->idts.prev); 3291 list_cut_position(&prev.flushes, &acpi_desc->flushes, 3292 acpi_desc->flushes.prev); 3293 3294 end = data + sz; 3295 while (!IS_ERR_OR_NULL(data)) 3296 data = add_table(acpi_desc, &prev, data, end); 3297 3298 if (IS_ERR(data)) { 3299 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data)); 3300 rc = PTR_ERR(data); 3301 goto out_unlock; 3302 } 3303 3304 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 3305 if (rc) 3306 goto out_unlock; 3307 3308 rc = nfit_mem_init(acpi_desc); 3309 if (rc) 3310 goto out_unlock; 3311 3312 rc = acpi_nfit_register_dimms(acpi_desc); 3313 if (rc) 3314 goto out_unlock; 3315 3316 rc = acpi_nfit_register_regions(acpi_desc); 3317 3318 out_unlock: 3319 mutex_unlock(&acpi_desc->init_mutex); 3320 return rc; 3321 } 3322 EXPORT_SYMBOL_GPL(acpi_nfit_init); 3323 3324 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3325 { 3326 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3327 struct device *dev = acpi_desc->dev; 3328 3329 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3330 device_lock(dev); 3331 device_unlock(dev); 3332 3333 /* Bounce the init_mutex to complete initial registration */ 3334 mutex_lock(&acpi_desc->init_mutex); 3335 mutex_unlock(&acpi_desc->init_mutex); 3336 3337 return 0; 3338 } 3339 3340 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3341 struct nvdimm *nvdimm, unsigned int cmd) 3342 { 3343 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3344 3345 if (nvdimm) 3346 return 0; 3347 if (cmd != ND_CMD_ARS_START) 3348 return 0; 3349 3350 /* 3351 * The kernel and userspace may race to initiate a scrub, but 3352 * the scrub thread is prepared to lose that initial race. It 3353 * just needs guarantees that any ARS it initiates are not 3354 * interrupted by any intervening start requests from userspace. 3355 */ 3356 if (work_busy(&acpi_desc->dwork.work)) 3357 return -EBUSY; 3358 3359 return 0; 3360 } 3361 3362 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, 3363 enum nfit_ars_state req_type) 3364 { 3365 struct device *dev = acpi_desc->dev; 3366 int scheduled = 0, busy = 0; 3367 struct nfit_spa *nfit_spa; 3368 3369 mutex_lock(&acpi_desc->init_mutex); 3370 if (acpi_desc->cancel) { 3371 mutex_unlock(&acpi_desc->init_mutex); 3372 return 0; 3373 } 3374 3375 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3376 int type = nfit_spa_type(nfit_spa->spa); 3377 3378 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE) 3379 continue; 3380 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3381 continue; 3382 3383 if (test_and_set_bit(req_type, &nfit_spa->ars_state)) 3384 busy++; 3385 else 3386 scheduled++; 3387 } 3388 if (scheduled) { 3389 sched_ars(acpi_desc); 3390 dev_dbg(dev, "ars_scan triggered\n"); 3391 } 3392 mutex_unlock(&acpi_desc->init_mutex); 3393 3394 if (scheduled) 3395 return 0; 3396 if (busy) 3397 return -EBUSY; 3398 return -ENOTTY; 3399 } 3400 3401 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 3402 { 3403 struct nvdimm_bus_descriptor *nd_desc; 3404 3405 dev_set_drvdata(dev, acpi_desc); 3406 acpi_desc->dev = dev; 3407 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 3408 nd_desc = &acpi_desc->nd_desc; 3409 nd_desc->provider_name = "ACPI.NFIT"; 3410 nd_desc->module = THIS_MODULE; 3411 nd_desc->ndctl = acpi_nfit_ctl; 3412 nd_desc->flush_probe = acpi_nfit_flush_probe; 3413 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 3414 nd_desc->attr_groups = acpi_nfit_attribute_groups; 3415 3416 INIT_LIST_HEAD(&acpi_desc->spas); 3417 INIT_LIST_HEAD(&acpi_desc->dcrs); 3418 INIT_LIST_HEAD(&acpi_desc->bdws); 3419 INIT_LIST_HEAD(&acpi_desc->idts); 3420 INIT_LIST_HEAD(&acpi_desc->flushes); 3421 INIT_LIST_HEAD(&acpi_desc->memdevs); 3422 INIT_LIST_HEAD(&acpi_desc->dimms); 3423 INIT_LIST_HEAD(&acpi_desc->list); 3424 mutex_init(&acpi_desc->init_mutex); 3425 acpi_desc->scrub_tmo = 1; 3426 INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub); 3427 } 3428 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); 3429 3430 static void acpi_nfit_put_table(void *table) 3431 { 3432 acpi_put_table(table); 3433 } 3434 3435 void acpi_nfit_shutdown(void *data) 3436 { 3437 struct acpi_nfit_desc *acpi_desc = data; 3438 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3439 3440 /* 3441 * Destruct under acpi_desc_lock so that nfit_handle_mce does not 3442 * race teardown 3443 */ 3444 mutex_lock(&acpi_desc_lock); 3445 list_del(&acpi_desc->list); 3446 mutex_unlock(&acpi_desc_lock); 3447 3448 mutex_lock(&acpi_desc->init_mutex); 3449 acpi_desc->cancel = 1; 3450 cancel_delayed_work_sync(&acpi_desc->dwork); 3451 mutex_unlock(&acpi_desc->init_mutex); 3452 3453 /* 3454 * Bounce the nvdimm bus lock to make sure any in-flight 3455 * acpi_nfit_ars_rescan() submissions have had a chance to 3456 * either submit or see ->cancel set. 3457 */ 3458 device_lock(bus_dev); 3459 device_unlock(bus_dev); 3460 3461 flush_workqueue(nfit_wq); 3462 } 3463 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); 3464 3465 static int acpi_nfit_add(struct acpi_device *adev) 3466 { 3467 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3468 struct acpi_nfit_desc *acpi_desc; 3469 struct device *dev = &adev->dev; 3470 struct acpi_table_header *tbl; 3471 acpi_status status = AE_OK; 3472 acpi_size sz; 3473 int rc = 0; 3474 3475 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); 3476 if (ACPI_FAILURE(status)) { 3477 /* This is ok, we could have an nvdimm hotplugged later */ 3478 dev_dbg(dev, "failed to find NFIT at startup\n"); 3479 return 0; 3480 } 3481 3482 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); 3483 if (rc) 3484 return rc; 3485 sz = tbl->length; 3486 3487 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3488 if (!acpi_desc) 3489 return -ENOMEM; 3490 acpi_nfit_desc_init(acpi_desc, &adev->dev); 3491 3492 /* Save the acpi header for exporting the revision via sysfs */ 3493 acpi_desc->acpi_header = *tbl; 3494 3495 /* Evaluate _FIT and override with that if present */ 3496 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 3497 if (ACPI_SUCCESS(status) && buf.length > 0) { 3498 union acpi_object *obj = buf.pointer; 3499 3500 if (obj->type == ACPI_TYPE_BUFFER) 3501 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3502 obj->buffer.length); 3503 else 3504 dev_dbg(dev, "invalid type %d, ignoring _FIT\n", 3505 (int) obj->type); 3506 kfree(buf.pointer); 3507 } else 3508 /* skip over the lead-in header table */ 3509 rc = acpi_nfit_init(acpi_desc, (void *) tbl 3510 + sizeof(struct acpi_table_nfit), 3511 sz - sizeof(struct acpi_table_nfit)); 3512 3513 if (rc) 3514 return rc; 3515 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); 3516 } 3517 3518 static int acpi_nfit_remove(struct acpi_device *adev) 3519 { 3520 /* see acpi_nfit_unregister */ 3521 return 0; 3522 } 3523 3524 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) 3525 { 3526 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3527 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3528 union acpi_object *obj; 3529 acpi_status status; 3530 int ret; 3531 3532 if (!dev->driver) { 3533 /* dev->driver may be null if we're being removed */ 3534 dev_dbg(dev, "no driver found for dev\n"); 3535 return; 3536 } 3537 3538 if (!acpi_desc) { 3539 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3540 if (!acpi_desc) 3541 return; 3542 acpi_nfit_desc_init(acpi_desc, dev); 3543 } else { 3544 /* 3545 * Finish previous registration before considering new 3546 * regions. 3547 */ 3548 flush_workqueue(nfit_wq); 3549 } 3550 3551 /* Evaluate _FIT */ 3552 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); 3553 if (ACPI_FAILURE(status)) { 3554 dev_err(dev, "failed to evaluate _FIT\n"); 3555 return; 3556 } 3557 3558 obj = buf.pointer; 3559 if (obj->type == ACPI_TYPE_BUFFER) { 3560 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3561 obj->buffer.length); 3562 if (ret) 3563 dev_err(dev, "failed to merge updated NFIT\n"); 3564 } else 3565 dev_err(dev, "Invalid _FIT\n"); 3566 kfree(buf.pointer); 3567 } 3568 3569 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) 3570 { 3571 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3572 3573 if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) 3574 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); 3575 else 3576 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT); 3577 } 3578 3579 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) 3580 { 3581 dev_dbg(dev, "event: 0x%x\n", event); 3582 3583 switch (event) { 3584 case NFIT_NOTIFY_UPDATE: 3585 return acpi_nfit_update_notify(dev, handle); 3586 case NFIT_NOTIFY_UC_MEMORY_ERROR: 3587 return acpi_nfit_uc_error_notify(dev, handle); 3588 default: 3589 return; 3590 } 3591 } 3592 EXPORT_SYMBOL_GPL(__acpi_nfit_notify); 3593 3594 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 3595 { 3596 device_lock(&adev->dev); 3597 __acpi_nfit_notify(&adev->dev, adev->handle, event); 3598 device_unlock(&adev->dev); 3599 } 3600 3601 static const struct acpi_device_id acpi_nfit_ids[] = { 3602 { "ACPI0012", 0 }, 3603 { "", 0 }, 3604 }; 3605 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 3606 3607 static struct acpi_driver acpi_nfit_driver = { 3608 .name = KBUILD_MODNAME, 3609 .ids = acpi_nfit_ids, 3610 .ops = { 3611 .add = acpi_nfit_add, 3612 .remove = acpi_nfit_remove, 3613 .notify = acpi_nfit_notify, 3614 }, 3615 }; 3616 3617 static __init int nfit_init(void) 3618 { 3619 int ret; 3620 3621 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 3622 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 3623 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 3624 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 3625 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 3626 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 3627 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 3628 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); 3629 3630 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]); 3631 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]); 3632 guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]); 3633 guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]); 3634 guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]); 3635 guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]); 3636 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]); 3637 guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]); 3638 guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]); 3639 guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]); 3640 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 3641 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 3642 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); 3643 3644 nfit_wq = create_singlethread_workqueue("nfit"); 3645 if (!nfit_wq) 3646 return -ENOMEM; 3647 3648 nfit_mce_register(); 3649 ret = acpi_bus_register_driver(&acpi_nfit_driver); 3650 if (ret) { 3651 nfit_mce_unregister(); 3652 destroy_workqueue(nfit_wq); 3653 } 3654 3655 return ret; 3656 3657 } 3658 3659 static __exit void nfit_exit(void) 3660 { 3661 nfit_mce_unregister(); 3662 acpi_bus_unregister_driver(&acpi_nfit_driver); 3663 destroy_workqueue(nfit_wq); 3664 WARN_ON(!list_empty(&acpi_descs)); 3665 } 3666 3667 module_init(nfit_init); 3668 module_exit(nfit_exit); 3669 MODULE_LICENSE("GPL v2"); 3670 MODULE_AUTHOR("Intel Corporation"); 3671