1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/list_sort.h> 6 #include <linux/libnvdimm.h> 7 #include <linux/module.h> 8 #include <linux/nospec.h> 9 #include <linux/mutex.h> 10 #include <linux/ndctl.h> 11 #include <linux/sysfs.h> 12 #include <linux/delay.h> 13 #include <linux/list.h> 14 #include <linux/acpi.h> 15 #include <linux/sort.h> 16 #include <linux/io.h> 17 #include <linux/nd.h> 18 #include <asm/cacheflush.h> 19 #include <acpi/nfit.h> 20 #include "intel.h" 21 #include "nfit.h" 22 23 /* 24 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 25 * irrelevant. 26 */ 27 #include <linux/io-64-nonatomic-hi-lo.h> 28 29 static bool force_enable_dimms; 30 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 31 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 32 33 static bool disable_vendor_specific; 34 module_param(disable_vendor_specific, bool, S_IRUGO); 35 MODULE_PARM_DESC(disable_vendor_specific, 36 "Limit commands to the publicly specified set"); 37 38 static unsigned long override_dsm_mask; 39 module_param(override_dsm_mask, ulong, S_IRUGO); 40 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); 41 42 static int default_dsm_family = -1; 43 module_param(default_dsm_family, int, S_IRUGO); 44 MODULE_PARM_DESC(default_dsm_family, 45 "Try this DSM type first when identifying NVDIMM family"); 46 47 static bool no_init_ars; 48 module_param(no_init_ars, bool, 0644); 49 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); 50 51 static bool force_labels; 52 module_param(force_labels, bool, 0444); 53 MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods"); 54 55 LIST_HEAD(acpi_descs); 56 DEFINE_MUTEX(acpi_desc_lock); 57 58 static struct workqueue_struct *nfit_wq; 59 60 struct nfit_table_prev { 61 struct list_head spas; 62 struct list_head memdevs; 63 struct list_head dcrs; 64 struct list_head bdws; 65 struct list_head idts; 66 struct list_head flushes; 67 }; 68 69 static guid_t nfit_uuid[NFIT_UUID_MAX]; 70 71 const guid_t *to_nfit_uuid(enum nfit_uuids id) 72 { 73 return &nfit_uuid[id]; 74 } 75 EXPORT_SYMBOL(to_nfit_uuid); 76 77 static const guid_t *to_nfit_bus_uuid(int family) 78 { 79 if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT, 80 "only secondary bus families can be translated\n")) 81 return NULL; 82 /* 83 * The index of bus UUIDs starts immediately following the last 84 * NVDIMM/leaf family. 85 */ 86 return to_nfit_uuid(family + NVDIMM_FAMILY_MAX); 87 } 88 89 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 90 { 91 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 92 93 /* 94 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 95 * acpi_device. 96 */ 97 if (!nd_desc->provider_name 98 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 99 return NULL; 100 101 return to_acpi_device(acpi_desc->dev); 102 } 103 104 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) 105 { 106 struct nd_cmd_clear_error *clear_err; 107 struct nd_cmd_ars_status *ars_status; 108 u16 flags; 109 110 switch (cmd) { 111 case ND_CMD_ARS_CAP: 112 if ((status & 0xffff) == NFIT_ARS_CAP_NONE) 113 return -ENOTTY; 114 115 /* Command failed */ 116 if (status & 0xffff) 117 return -EIO; 118 119 /* No supported scan types for this range */ 120 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 121 if ((status >> 16 & flags) == 0) 122 return -ENOTTY; 123 return 0; 124 case ND_CMD_ARS_START: 125 /* ARS is in progress */ 126 if ((status & 0xffff) == NFIT_ARS_START_BUSY) 127 return -EBUSY; 128 129 /* Command failed */ 130 if (status & 0xffff) 131 return -EIO; 132 return 0; 133 case ND_CMD_ARS_STATUS: 134 ars_status = buf; 135 /* Command failed */ 136 if (status & 0xffff) 137 return -EIO; 138 /* Check extended status (Upper two bytes) */ 139 if (status == NFIT_ARS_STATUS_DONE) 140 return 0; 141 142 /* ARS is in progress */ 143 if (status == NFIT_ARS_STATUS_BUSY) 144 return -EBUSY; 145 146 /* No ARS performed for the current boot */ 147 if (status == NFIT_ARS_STATUS_NONE) 148 return -EAGAIN; 149 150 /* 151 * ARS interrupted, either we overflowed or some other 152 * agent wants the scan to stop. If we didn't overflow 153 * then just continue with the returned results. 154 */ 155 if (status == NFIT_ARS_STATUS_INTR) { 156 if (ars_status->out_length >= 40 && (ars_status->flags 157 & NFIT_ARS_F_OVERFLOW)) 158 return -ENOSPC; 159 return 0; 160 } 161 162 /* Unknown status */ 163 if (status >> 16) 164 return -EIO; 165 return 0; 166 case ND_CMD_CLEAR_ERROR: 167 clear_err = buf; 168 if (status & 0xffff) 169 return -EIO; 170 if (!clear_err->cleared) 171 return -EIO; 172 if (clear_err->length > clear_err->cleared) 173 return clear_err->cleared; 174 return 0; 175 default: 176 break; 177 } 178 179 /* all other non-zero status results in an error */ 180 if (status) 181 return -EIO; 182 return 0; 183 } 184 185 #define ACPI_LABELS_LOCKED 3 186 187 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 188 u32 status) 189 { 190 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 191 192 switch (cmd) { 193 case ND_CMD_GET_CONFIG_SIZE: 194 /* 195 * In the _LSI, _LSR, _LSW case the locked status is 196 * communicated via the read/write commands 197 */ 198 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) 199 break; 200 201 if (status >> 16 & ND_CONFIG_LOCKED) 202 return -EACCES; 203 break; 204 case ND_CMD_GET_CONFIG_DATA: 205 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) 206 && status == ACPI_LABELS_LOCKED) 207 return -EACCES; 208 break; 209 case ND_CMD_SET_CONFIG_DATA: 210 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags) 211 && status == ACPI_LABELS_LOCKED) 212 return -EACCES; 213 break; 214 default: 215 break; 216 } 217 218 /* all other non-zero status results in an error */ 219 if (status) 220 return -EIO; 221 return 0; 222 } 223 224 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 225 u32 status) 226 { 227 if (!nvdimm) 228 return xlat_bus_status(buf, cmd, status); 229 return xlat_nvdimm_status(nvdimm, buf, cmd, status); 230 } 231 232 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */ 233 static union acpi_object *pkg_to_buf(union acpi_object *pkg) 234 { 235 int i; 236 void *dst; 237 size_t size = 0; 238 union acpi_object *buf = NULL; 239 240 if (pkg->type != ACPI_TYPE_PACKAGE) { 241 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 242 pkg->type); 243 goto err; 244 } 245 246 for (i = 0; i < pkg->package.count; i++) { 247 union acpi_object *obj = &pkg->package.elements[i]; 248 249 if (obj->type == ACPI_TYPE_INTEGER) 250 size += 4; 251 else if (obj->type == ACPI_TYPE_BUFFER) 252 size += obj->buffer.length; 253 else { 254 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 255 obj->type); 256 goto err; 257 } 258 } 259 260 buf = ACPI_ALLOCATE(sizeof(*buf) + size); 261 if (!buf) 262 goto err; 263 264 dst = buf + 1; 265 buf->type = ACPI_TYPE_BUFFER; 266 buf->buffer.length = size; 267 buf->buffer.pointer = dst; 268 for (i = 0; i < pkg->package.count; i++) { 269 union acpi_object *obj = &pkg->package.elements[i]; 270 271 if (obj->type == ACPI_TYPE_INTEGER) { 272 memcpy(dst, &obj->integer.value, 4); 273 dst += 4; 274 } else if (obj->type == ACPI_TYPE_BUFFER) { 275 memcpy(dst, obj->buffer.pointer, obj->buffer.length); 276 dst += obj->buffer.length; 277 } 278 } 279 err: 280 ACPI_FREE(pkg); 281 return buf; 282 } 283 284 static union acpi_object *int_to_buf(union acpi_object *integer) 285 { 286 union acpi_object *buf = NULL; 287 void *dst = NULL; 288 289 if (integer->type != ACPI_TYPE_INTEGER) { 290 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 291 integer->type); 292 goto err; 293 } 294 295 buf = ACPI_ALLOCATE(sizeof(*buf) + 4); 296 if (!buf) 297 goto err; 298 299 dst = buf + 1; 300 buf->type = ACPI_TYPE_BUFFER; 301 buf->buffer.length = 4; 302 buf->buffer.pointer = dst; 303 memcpy(dst, &integer->integer.value, 4); 304 err: 305 ACPI_FREE(integer); 306 return buf; 307 } 308 309 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset, 310 u32 len, void *data) 311 { 312 acpi_status rc; 313 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 314 struct acpi_object_list input = { 315 .count = 3, 316 .pointer = (union acpi_object []) { 317 [0] = { 318 .integer.type = ACPI_TYPE_INTEGER, 319 .integer.value = offset, 320 }, 321 [1] = { 322 .integer.type = ACPI_TYPE_INTEGER, 323 .integer.value = len, 324 }, 325 [2] = { 326 .buffer.type = ACPI_TYPE_BUFFER, 327 .buffer.pointer = data, 328 .buffer.length = len, 329 }, 330 }, 331 }; 332 333 rc = acpi_evaluate_object(handle, "_LSW", &input, &buf); 334 if (ACPI_FAILURE(rc)) 335 return NULL; 336 return int_to_buf(buf.pointer); 337 } 338 339 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset, 340 u32 len) 341 { 342 acpi_status rc; 343 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 344 struct acpi_object_list input = { 345 .count = 2, 346 .pointer = (union acpi_object []) { 347 [0] = { 348 .integer.type = ACPI_TYPE_INTEGER, 349 .integer.value = offset, 350 }, 351 [1] = { 352 .integer.type = ACPI_TYPE_INTEGER, 353 .integer.value = len, 354 }, 355 }, 356 }; 357 358 rc = acpi_evaluate_object(handle, "_LSR", &input, &buf); 359 if (ACPI_FAILURE(rc)) 360 return NULL; 361 return pkg_to_buf(buf.pointer); 362 } 363 364 static union acpi_object *acpi_label_info(acpi_handle handle) 365 { 366 acpi_status rc; 367 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 368 369 rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf); 370 if (ACPI_FAILURE(rc)) 371 return NULL; 372 return pkg_to_buf(buf.pointer); 373 } 374 375 static u8 nfit_dsm_revid(unsigned family, unsigned func) 376 { 377 static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = { 378 [NVDIMM_FAMILY_INTEL] = { 379 [NVDIMM_INTEL_GET_MODES ... 380 NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2, 381 }, 382 }; 383 u8 id; 384 385 if (family > NVDIMM_FAMILY_MAX) 386 return 0; 387 if (func > NVDIMM_CMD_MAX) 388 return 0; 389 id = revid_table[family][func]; 390 if (id == 0) 391 return 1; /* default */ 392 return id; 393 } 394 395 static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) 396 { 397 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 398 399 if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL 400 && func >= NVDIMM_INTEL_GET_SECURITY_STATE 401 && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE) 402 return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG); 403 return true; 404 } 405 406 static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, 407 struct nd_cmd_pkg *call_pkg, int *family) 408 { 409 if (call_pkg) { 410 int i; 411 412 if (nfit_mem && nfit_mem->family != call_pkg->nd_family) 413 return -ENOTTY; 414 415 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) 416 if (call_pkg->nd_reserved2[i]) 417 return -EINVAL; 418 *family = call_pkg->nd_family; 419 return call_pkg->nd_command; 420 } 421 422 /* In the !call_pkg case, bus commands == bus functions */ 423 if (!nfit_mem) 424 return cmd; 425 426 /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ 427 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) 428 return cmd; 429 430 /* 431 * Force function number validation to fail since 0 is never 432 * published as a valid function in dsm_mask. 433 */ 434 return 0; 435 } 436 437 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 438 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 439 { 440 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 441 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 442 union acpi_object in_obj, in_buf, *out_obj; 443 const struct nd_cmd_desc *desc = NULL; 444 struct device *dev = acpi_desc->dev; 445 struct nd_cmd_pkg *call_pkg = NULL; 446 const char *cmd_name, *dimm_name; 447 unsigned long cmd_mask, dsm_mask; 448 u32 offset, fw_status = 0; 449 acpi_handle handle; 450 const guid_t *guid; 451 int func, rc, i; 452 int family = 0; 453 454 if (cmd_rc) 455 *cmd_rc = -EINVAL; 456 457 if (cmd == ND_CMD_CALL) 458 call_pkg = buf; 459 func = cmd_to_func(nfit_mem, cmd, call_pkg, &family); 460 if (func < 0) 461 return func; 462 463 if (nvdimm) { 464 struct acpi_device *adev = nfit_mem->adev; 465 466 if (!adev) 467 return -ENOTTY; 468 469 dimm_name = nvdimm_name(nvdimm); 470 cmd_name = nvdimm_cmd_name(cmd); 471 cmd_mask = nvdimm_cmd_mask(nvdimm); 472 dsm_mask = nfit_mem->dsm_mask; 473 desc = nd_cmd_dimm_desc(cmd); 474 guid = to_nfit_uuid(nfit_mem->family); 475 handle = adev->handle; 476 } else { 477 struct acpi_device *adev = to_acpi_dev(acpi_desc); 478 479 cmd_name = nvdimm_bus_cmd_name(cmd); 480 cmd_mask = nd_desc->cmd_mask; 481 if (cmd == ND_CMD_CALL && call_pkg->nd_family) { 482 family = call_pkg->nd_family; 483 if (family > NVDIMM_BUS_FAMILY_MAX || 484 !test_bit(family, &nd_desc->bus_family_mask)) 485 return -EINVAL; 486 family = array_index_nospec(family, 487 NVDIMM_BUS_FAMILY_MAX + 1); 488 dsm_mask = acpi_desc->family_dsm_mask[family]; 489 guid = to_nfit_bus_uuid(family); 490 } else { 491 dsm_mask = acpi_desc->bus_dsm_mask; 492 guid = to_nfit_uuid(NFIT_DEV_BUS); 493 } 494 desc = nd_cmd_bus_desc(cmd); 495 handle = adev->handle; 496 dimm_name = "bus"; 497 } 498 499 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 500 return -ENOTTY; 501 502 /* 503 * Check for a valid command. For ND_CMD_CALL, we also have to 504 * make sure that the DSM function is supported. 505 */ 506 if (cmd == ND_CMD_CALL && 507 (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask))) 508 return -ENOTTY; 509 else if (!test_bit(cmd, &cmd_mask)) 510 return -ENOTTY; 511 512 in_obj.type = ACPI_TYPE_PACKAGE; 513 in_obj.package.count = 1; 514 in_obj.package.elements = &in_buf; 515 in_buf.type = ACPI_TYPE_BUFFER; 516 in_buf.buffer.pointer = buf; 517 in_buf.buffer.length = 0; 518 519 /* libnvdimm has already validated the input envelope */ 520 for (i = 0; i < desc->in_num; i++) 521 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 522 i, buf); 523 524 if (call_pkg) { 525 /* skip over package wrapper */ 526 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; 527 in_buf.buffer.length = call_pkg->nd_size_in; 528 } 529 530 dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n", 531 dimm_name, cmd, family, func, in_buf.buffer.length); 532 if (payload_dumpable(nvdimm, func)) 533 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, 534 in_buf.buffer.pointer, 535 min_t(u32, 256, in_buf.buffer.length), true); 536 537 /* call the BIOS, prefer the named methods over _DSM if available */ 538 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE 539 && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) 540 out_obj = acpi_label_info(handle); 541 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA 542 && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { 543 struct nd_cmd_get_config_data_hdr *p = buf; 544 545 out_obj = acpi_label_read(handle, p->in_offset, p->in_length); 546 } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA 547 && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) { 548 struct nd_cmd_set_config_hdr *p = buf; 549 550 out_obj = acpi_label_write(handle, p->in_offset, p->in_length, 551 p->in_buf); 552 } else { 553 u8 revid; 554 555 if (nvdimm) 556 revid = nfit_dsm_revid(nfit_mem->family, func); 557 else 558 revid = 1; 559 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 560 } 561 562 if (!out_obj) { 563 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name); 564 return -EINVAL; 565 } 566 567 if (out_obj->type != ACPI_TYPE_BUFFER) { 568 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", 569 dimm_name, cmd_name, out_obj->type); 570 rc = -EINVAL; 571 goto out; 572 } 573 574 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, 575 cmd_name, out_obj->buffer.length); 576 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, 577 out_obj->buffer.pointer, 578 min_t(u32, 128, out_obj->buffer.length), true); 579 580 if (call_pkg) { 581 call_pkg->nd_fw_size = out_obj->buffer.length; 582 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, 583 out_obj->buffer.pointer, 584 min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); 585 586 ACPI_FREE(out_obj); 587 /* 588 * Need to support FW function w/o known size in advance. 589 * Caller can determine required size based upon nd_fw_size. 590 * If we return an error (like elsewhere) then caller wouldn't 591 * be able to rely upon data returned to make calculation. 592 */ 593 if (cmd_rc) 594 *cmd_rc = 0; 595 return 0; 596 } 597 598 for (i = 0, offset = 0; i < desc->out_num; i++) { 599 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 600 (u32 *) out_obj->buffer.pointer, 601 out_obj->buffer.length - offset); 602 603 if (offset + out_size > out_obj->buffer.length) { 604 dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n", 605 dimm_name, cmd_name, i); 606 break; 607 } 608 609 if (in_buf.buffer.length + offset + out_size > buf_len) { 610 dev_dbg(dev, "%s output overrun cmd: %s field: %d\n", 611 dimm_name, cmd_name, i); 612 rc = -ENXIO; 613 goto out; 614 } 615 memcpy(buf + in_buf.buffer.length + offset, 616 out_obj->buffer.pointer + offset, out_size); 617 offset += out_size; 618 } 619 620 /* 621 * Set fw_status for all the commands with a known format to be 622 * later interpreted by xlat_status(). 623 */ 624 if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP 625 && cmd <= ND_CMD_CLEAR_ERROR) 626 || (nvdimm && cmd >= ND_CMD_SMART 627 && cmd <= ND_CMD_VENDOR))) 628 fw_status = *(u32 *) out_obj->buffer.pointer; 629 630 if (offset + in_buf.buffer.length < buf_len) { 631 if (i >= 1) { 632 /* 633 * status valid, return the number of bytes left 634 * unfilled in the output buffer 635 */ 636 rc = buf_len - offset - in_buf.buffer.length; 637 if (cmd_rc) 638 *cmd_rc = xlat_status(nvdimm, buf, cmd, 639 fw_status); 640 } else { 641 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 642 __func__, dimm_name, cmd_name, buf_len, 643 offset); 644 rc = -ENXIO; 645 } 646 } else { 647 rc = 0; 648 if (cmd_rc) 649 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); 650 } 651 652 out: 653 ACPI_FREE(out_obj); 654 655 return rc; 656 } 657 EXPORT_SYMBOL_GPL(acpi_nfit_ctl); 658 659 static const char *spa_type_name(u16 type) 660 { 661 static const char *to_name[] = { 662 [NFIT_SPA_VOLATILE] = "volatile", 663 [NFIT_SPA_PM] = "pmem", 664 [NFIT_SPA_DCR] = "dimm-control-region", 665 [NFIT_SPA_BDW] = "block-data-window", 666 [NFIT_SPA_VDISK] = "volatile-disk", 667 [NFIT_SPA_VCD] = "volatile-cd", 668 [NFIT_SPA_PDISK] = "persistent-disk", 669 [NFIT_SPA_PCD] = "persistent-cd", 670 671 }; 672 673 if (type > NFIT_SPA_PCD) 674 return "unknown"; 675 676 return to_name[type]; 677 } 678 679 int nfit_spa_type(struct acpi_nfit_system_address *spa) 680 { 681 guid_t guid; 682 int i; 683 684 import_guid(&guid, spa->range_guid); 685 for (i = 0; i < NFIT_UUID_MAX; i++) 686 if (guid_equal(to_nfit_uuid(i), &guid)) 687 return i; 688 return -1; 689 } 690 691 static size_t sizeof_spa(struct acpi_nfit_system_address *spa) 692 { 693 if (spa->flags & ACPI_NFIT_LOCATION_COOKIE_VALID) 694 return sizeof(*spa); 695 return sizeof(*spa) - 8; 696 } 697 698 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 699 struct nfit_table_prev *prev, 700 struct acpi_nfit_system_address *spa) 701 { 702 struct device *dev = acpi_desc->dev; 703 struct nfit_spa *nfit_spa; 704 705 if (spa->header.length != sizeof_spa(spa)) 706 return false; 707 708 list_for_each_entry(nfit_spa, &prev->spas, list) { 709 if (memcmp(nfit_spa->spa, spa, sizeof_spa(spa)) == 0) { 710 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 711 return true; 712 } 713 } 714 715 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof_spa(spa), 716 GFP_KERNEL); 717 if (!nfit_spa) 718 return false; 719 INIT_LIST_HEAD(&nfit_spa->list); 720 memcpy(nfit_spa->spa, spa, sizeof_spa(spa)); 721 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 722 dev_dbg(dev, "spa index: %d type: %s\n", 723 spa->range_index, 724 spa_type_name(nfit_spa_type(spa))); 725 return true; 726 } 727 728 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 729 struct nfit_table_prev *prev, 730 struct acpi_nfit_memory_map *memdev) 731 { 732 struct device *dev = acpi_desc->dev; 733 struct nfit_memdev *nfit_memdev; 734 735 if (memdev->header.length != sizeof(*memdev)) 736 return false; 737 738 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 739 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 740 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 741 return true; 742 } 743 744 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), 745 GFP_KERNEL); 746 if (!nfit_memdev) 747 return false; 748 INIT_LIST_HEAD(&nfit_memdev->list); 749 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); 750 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 751 dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n", 752 memdev->device_handle, memdev->range_index, 753 memdev->region_index, memdev->flags); 754 return true; 755 } 756 757 int nfit_get_smbios_id(u32 device_handle, u16 *flags) 758 { 759 struct acpi_nfit_memory_map *memdev; 760 struct acpi_nfit_desc *acpi_desc; 761 struct nfit_mem *nfit_mem; 762 u16 physical_id; 763 764 mutex_lock(&acpi_desc_lock); 765 list_for_each_entry(acpi_desc, &acpi_descs, list) { 766 mutex_lock(&acpi_desc->init_mutex); 767 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 768 memdev = __to_nfit_memdev(nfit_mem); 769 if (memdev->device_handle == device_handle) { 770 *flags = memdev->flags; 771 physical_id = memdev->physical_id; 772 mutex_unlock(&acpi_desc->init_mutex); 773 mutex_unlock(&acpi_desc_lock); 774 return physical_id; 775 } 776 } 777 mutex_unlock(&acpi_desc->init_mutex); 778 } 779 mutex_unlock(&acpi_desc_lock); 780 781 return -ENODEV; 782 } 783 EXPORT_SYMBOL_GPL(nfit_get_smbios_id); 784 785 /* 786 * An implementation may provide a truncated control region if no block windows 787 * are defined. 788 */ 789 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) 790 { 791 if (dcr->header.length < offsetof(struct acpi_nfit_control_region, 792 window_size)) 793 return 0; 794 if (dcr->windows) 795 return sizeof(*dcr); 796 return offsetof(struct acpi_nfit_control_region, window_size); 797 } 798 799 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 800 struct nfit_table_prev *prev, 801 struct acpi_nfit_control_region *dcr) 802 { 803 struct device *dev = acpi_desc->dev; 804 struct nfit_dcr *nfit_dcr; 805 806 if (!sizeof_dcr(dcr)) 807 return false; 808 809 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 810 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { 811 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 812 return true; 813 } 814 815 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), 816 GFP_KERNEL); 817 if (!nfit_dcr) 818 return false; 819 INIT_LIST_HEAD(&nfit_dcr->list); 820 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); 821 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 822 dev_dbg(dev, "dcr index: %d windows: %d\n", 823 dcr->region_index, dcr->windows); 824 return true; 825 } 826 827 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 828 struct nfit_table_prev *prev, 829 struct acpi_nfit_data_region *bdw) 830 { 831 struct device *dev = acpi_desc->dev; 832 struct nfit_bdw *nfit_bdw; 833 834 if (bdw->header.length != sizeof(*bdw)) 835 return false; 836 list_for_each_entry(nfit_bdw, &prev->bdws, list) 837 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 838 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 839 return true; 840 } 841 842 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), 843 GFP_KERNEL); 844 if (!nfit_bdw) 845 return false; 846 INIT_LIST_HEAD(&nfit_bdw->list); 847 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); 848 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 849 dev_dbg(dev, "bdw dcr: %d windows: %d\n", 850 bdw->region_index, bdw->windows); 851 return true; 852 } 853 854 static size_t sizeof_idt(struct acpi_nfit_interleave *idt) 855 { 856 if (idt->header.length < sizeof(*idt)) 857 return 0; 858 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); 859 } 860 861 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 862 struct nfit_table_prev *prev, 863 struct acpi_nfit_interleave *idt) 864 { 865 struct device *dev = acpi_desc->dev; 866 struct nfit_idt *nfit_idt; 867 868 if (!sizeof_idt(idt)) 869 return false; 870 871 list_for_each_entry(nfit_idt, &prev->idts, list) { 872 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) 873 continue; 874 875 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { 876 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 877 return true; 878 } 879 } 880 881 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), 882 GFP_KERNEL); 883 if (!nfit_idt) 884 return false; 885 INIT_LIST_HEAD(&nfit_idt->list); 886 memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); 887 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 888 dev_dbg(dev, "idt index: %d num_lines: %d\n", 889 idt->interleave_index, idt->line_count); 890 return true; 891 } 892 893 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) 894 { 895 if (flush->header.length < sizeof(*flush)) 896 return 0; 897 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); 898 } 899 900 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 901 struct nfit_table_prev *prev, 902 struct acpi_nfit_flush_address *flush) 903 { 904 struct device *dev = acpi_desc->dev; 905 struct nfit_flush *nfit_flush; 906 907 if (!sizeof_flush(flush)) 908 return false; 909 910 list_for_each_entry(nfit_flush, &prev->flushes, list) { 911 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) 912 continue; 913 914 if (memcmp(nfit_flush->flush, flush, 915 sizeof_flush(flush)) == 0) { 916 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 917 return true; 918 } 919 } 920 921 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) 922 + sizeof_flush(flush), GFP_KERNEL); 923 if (!nfit_flush) 924 return false; 925 INIT_LIST_HEAD(&nfit_flush->list); 926 memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); 927 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 928 dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n", 929 flush->device_handle, flush->hint_count); 930 return true; 931 } 932 933 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, 934 struct acpi_nfit_capabilities *pcap) 935 { 936 struct device *dev = acpi_desc->dev; 937 u32 mask; 938 939 mask = (1 << (pcap->highest_capability + 1)) - 1; 940 acpi_desc->platform_cap = pcap->capabilities & mask; 941 dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap); 942 return true; 943 } 944 945 static void *add_table(struct acpi_nfit_desc *acpi_desc, 946 struct nfit_table_prev *prev, void *table, const void *end) 947 { 948 struct device *dev = acpi_desc->dev; 949 struct acpi_nfit_header *hdr; 950 void *err = ERR_PTR(-ENOMEM); 951 952 if (table >= end) 953 return NULL; 954 955 hdr = table; 956 if (!hdr->length) { 957 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 958 hdr->type); 959 return NULL; 960 } 961 962 switch (hdr->type) { 963 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 964 if (!add_spa(acpi_desc, prev, table)) 965 return err; 966 break; 967 case ACPI_NFIT_TYPE_MEMORY_MAP: 968 if (!add_memdev(acpi_desc, prev, table)) 969 return err; 970 break; 971 case ACPI_NFIT_TYPE_CONTROL_REGION: 972 if (!add_dcr(acpi_desc, prev, table)) 973 return err; 974 break; 975 case ACPI_NFIT_TYPE_DATA_REGION: 976 if (!add_bdw(acpi_desc, prev, table)) 977 return err; 978 break; 979 case ACPI_NFIT_TYPE_INTERLEAVE: 980 if (!add_idt(acpi_desc, prev, table)) 981 return err; 982 break; 983 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 984 if (!add_flush(acpi_desc, prev, table)) 985 return err; 986 break; 987 case ACPI_NFIT_TYPE_SMBIOS: 988 dev_dbg(dev, "smbios\n"); 989 break; 990 case ACPI_NFIT_TYPE_CAPABILITIES: 991 if (!add_platform_cap(acpi_desc, table)) 992 return err; 993 break; 994 default: 995 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 996 break; 997 } 998 999 return table + hdr->length; 1000 } 1001 1002 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 1003 struct nfit_mem *nfit_mem) 1004 { 1005 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 1006 u16 dcr = nfit_mem->dcr->region_index; 1007 struct nfit_spa *nfit_spa; 1008 1009 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1010 u16 range_index = nfit_spa->spa->range_index; 1011 int type = nfit_spa_type(nfit_spa->spa); 1012 struct nfit_memdev *nfit_memdev; 1013 1014 if (type != NFIT_SPA_BDW) 1015 continue; 1016 1017 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1018 if (nfit_memdev->memdev->range_index != range_index) 1019 continue; 1020 if (nfit_memdev->memdev->device_handle != device_handle) 1021 continue; 1022 if (nfit_memdev->memdev->region_index != dcr) 1023 continue; 1024 1025 nfit_mem->spa_bdw = nfit_spa->spa; 1026 return; 1027 } 1028 } 1029 1030 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 1031 nfit_mem->spa_dcr->range_index); 1032 nfit_mem->bdw = NULL; 1033 } 1034 1035 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, 1036 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 1037 { 1038 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 1039 struct nfit_memdev *nfit_memdev; 1040 struct nfit_bdw *nfit_bdw; 1041 struct nfit_idt *nfit_idt; 1042 u16 idt_idx, range_index; 1043 1044 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 1045 if (nfit_bdw->bdw->region_index != dcr) 1046 continue; 1047 nfit_mem->bdw = nfit_bdw->bdw; 1048 break; 1049 } 1050 1051 if (!nfit_mem->bdw) 1052 return; 1053 1054 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 1055 1056 if (!nfit_mem->spa_bdw) 1057 return; 1058 1059 range_index = nfit_mem->spa_bdw->range_index; 1060 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1061 if (nfit_memdev->memdev->range_index != range_index || 1062 nfit_memdev->memdev->region_index != dcr) 1063 continue; 1064 nfit_mem->memdev_bdw = nfit_memdev->memdev; 1065 idt_idx = nfit_memdev->memdev->interleave_index; 1066 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 1067 if (nfit_idt->idt->interleave_index != idt_idx) 1068 continue; 1069 nfit_mem->idt_bdw = nfit_idt->idt; 1070 break; 1071 } 1072 break; 1073 } 1074 } 1075 1076 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, 1077 struct acpi_nfit_system_address *spa) 1078 { 1079 struct nfit_mem *nfit_mem, *found; 1080 struct nfit_memdev *nfit_memdev; 1081 int type = spa ? nfit_spa_type(spa) : 0; 1082 1083 switch (type) { 1084 case NFIT_SPA_DCR: 1085 case NFIT_SPA_PM: 1086 break; 1087 default: 1088 if (spa) 1089 return 0; 1090 } 1091 1092 /* 1093 * This loop runs in two modes, when a dimm is mapped the loop 1094 * adds memdev associations to an existing dimm, or creates a 1095 * dimm. In the unmapped dimm case this loop sweeps for memdev 1096 * instances with an invalid / zero range_index and adds those 1097 * dimms without spa associations. 1098 */ 1099 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1100 struct nfit_flush *nfit_flush; 1101 struct nfit_dcr *nfit_dcr; 1102 u32 device_handle; 1103 u16 dcr; 1104 1105 if (spa && nfit_memdev->memdev->range_index != spa->range_index) 1106 continue; 1107 if (!spa && nfit_memdev->memdev->range_index) 1108 continue; 1109 found = NULL; 1110 dcr = nfit_memdev->memdev->region_index; 1111 device_handle = nfit_memdev->memdev->device_handle; 1112 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1113 if (__to_nfit_memdev(nfit_mem)->device_handle 1114 == device_handle) { 1115 found = nfit_mem; 1116 break; 1117 } 1118 1119 if (found) 1120 nfit_mem = found; 1121 else { 1122 nfit_mem = devm_kzalloc(acpi_desc->dev, 1123 sizeof(*nfit_mem), GFP_KERNEL); 1124 if (!nfit_mem) 1125 return -ENOMEM; 1126 INIT_LIST_HEAD(&nfit_mem->list); 1127 nfit_mem->acpi_desc = acpi_desc; 1128 list_add(&nfit_mem->list, &acpi_desc->dimms); 1129 } 1130 1131 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1132 if (nfit_dcr->dcr->region_index != dcr) 1133 continue; 1134 /* 1135 * Record the control region for the dimm. For 1136 * the ACPI 6.1 case, where there are separate 1137 * control regions for the pmem vs blk 1138 * interfaces, be sure to record the extended 1139 * blk details. 1140 */ 1141 if (!nfit_mem->dcr) 1142 nfit_mem->dcr = nfit_dcr->dcr; 1143 else if (nfit_mem->dcr->windows == 0 1144 && nfit_dcr->dcr->windows) 1145 nfit_mem->dcr = nfit_dcr->dcr; 1146 break; 1147 } 1148 1149 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 1150 struct acpi_nfit_flush_address *flush; 1151 u16 i; 1152 1153 if (nfit_flush->flush->device_handle != device_handle) 1154 continue; 1155 nfit_mem->nfit_flush = nfit_flush; 1156 flush = nfit_flush->flush; 1157 nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev, 1158 flush->hint_count, 1159 sizeof(struct resource), 1160 GFP_KERNEL); 1161 if (!nfit_mem->flush_wpq) 1162 return -ENOMEM; 1163 for (i = 0; i < flush->hint_count; i++) { 1164 struct resource *res = &nfit_mem->flush_wpq[i]; 1165 1166 res->start = flush->hint_address[i]; 1167 res->end = res->start + 8 - 1; 1168 } 1169 break; 1170 } 1171 1172 if (dcr && !nfit_mem->dcr) { 1173 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 1174 spa->range_index, dcr); 1175 return -ENODEV; 1176 } 1177 1178 if (type == NFIT_SPA_DCR) { 1179 struct nfit_idt *nfit_idt; 1180 u16 idt_idx; 1181 1182 /* multiple dimms may share a SPA when interleaved */ 1183 nfit_mem->spa_dcr = spa; 1184 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1185 idt_idx = nfit_memdev->memdev->interleave_index; 1186 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 1187 if (nfit_idt->idt->interleave_index != idt_idx) 1188 continue; 1189 nfit_mem->idt_dcr = nfit_idt->idt; 1190 break; 1191 } 1192 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); 1193 } else if (type == NFIT_SPA_PM) { 1194 /* 1195 * A single dimm may belong to multiple SPA-PM 1196 * ranges, record at least one in addition to 1197 * any SPA-DCR range. 1198 */ 1199 nfit_mem->memdev_pmem = nfit_memdev->memdev; 1200 } else 1201 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1202 } 1203 1204 return 0; 1205 } 1206 1207 static int nfit_mem_cmp(void *priv, const struct list_head *_a, 1208 const struct list_head *_b) 1209 { 1210 struct nfit_mem *a = container_of(_a, typeof(*a), list); 1211 struct nfit_mem *b = container_of(_b, typeof(*b), list); 1212 u32 handleA, handleB; 1213 1214 handleA = __to_nfit_memdev(a)->device_handle; 1215 handleB = __to_nfit_memdev(b)->device_handle; 1216 if (handleA < handleB) 1217 return -1; 1218 else if (handleA > handleB) 1219 return 1; 1220 return 0; 1221 } 1222 1223 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 1224 { 1225 struct nfit_spa *nfit_spa; 1226 int rc; 1227 1228 1229 /* 1230 * For each SPA-DCR or SPA-PMEM address range find its 1231 * corresponding MEMDEV(s). From each MEMDEV find the 1232 * corresponding DCR. Then, if we're operating on a SPA-DCR, 1233 * try to find a SPA-BDW and a corresponding BDW that references 1234 * the DCR. Throw it all into an nfit_mem object. Note, that 1235 * BDWs are optional. 1236 */ 1237 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1238 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); 1239 if (rc) 1240 return rc; 1241 } 1242 1243 /* 1244 * If a DIMM has failed to be mapped into SPA there will be no 1245 * SPA entries above. Find and register all the unmapped DIMMs 1246 * for reporting and recovery purposes. 1247 */ 1248 rc = __nfit_mem_init(acpi_desc, NULL); 1249 if (rc) 1250 return rc; 1251 1252 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 1253 1254 return 0; 1255 } 1256 1257 static ssize_t bus_dsm_mask_show(struct device *dev, 1258 struct device_attribute *attr, char *buf) 1259 { 1260 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1261 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1262 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1263 1264 return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask); 1265 } 1266 static struct device_attribute dev_attr_bus_dsm_mask = 1267 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); 1268 1269 static ssize_t revision_show(struct device *dev, 1270 struct device_attribute *attr, char *buf) 1271 { 1272 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1273 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1274 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1275 1276 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 1277 } 1278 static DEVICE_ATTR_RO(revision); 1279 1280 static ssize_t hw_error_scrub_show(struct device *dev, 1281 struct device_attribute *attr, char *buf) 1282 { 1283 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1284 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1285 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1286 1287 return sprintf(buf, "%d\n", acpi_desc->scrub_mode); 1288 } 1289 1290 /* 1291 * The 'hw_error_scrub' attribute can have the following values written to it: 1292 * '0': Switch to the default mode where an exception will only insert 1293 * the address of the memory error into the poison and badblocks lists. 1294 * '1': Enable a full scrub to happen if an exception for a memory error is 1295 * received. 1296 */ 1297 static ssize_t hw_error_scrub_store(struct device *dev, 1298 struct device_attribute *attr, const char *buf, size_t size) 1299 { 1300 struct nvdimm_bus_descriptor *nd_desc; 1301 ssize_t rc; 1302 long val; 1303 1304 rc = kstrtol(buf, 0, &val); 1305 if (rc) 1306 return rc; 1307 1308 nfit_device_lock(dev); 1309 nd_desc = dev_get_drvdata(dev); 1310 if (nd_desc) { 1311 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1312 1313 switch (val) { 1314 case HW_ERROR_SCRUB_ON: 1315 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; 1316 break; 1317 case HW_ERROR_SCRUB_OFF: 1318 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; 1319 break; 1320 default: 1321 rc = -EINVAL; 1322 break; 1323 } 1324 } 1325 nfit_device_unlock(dev); 1326 if (rc) 1327 return rc; 1328 return size; 1329 } 1330 static DEVICE_ATTR_RW(hw_error_scrub); 1331 1332 /* 1333 * This shows the number of full Address Range Scrubs that have been 1334 * completed since driver load time. Userspace can wait on this using 1335 * select/poll etc. A '+' at the end indicates an ARS is in progress 1336 */ 1337 static ssize_t scrub_show(struct device *dev, 1338 struct device_attribute *attr, char *buf) 1339 { 1340 struct nvdimm_bus_descriptor *nd_desc; 1341 struct acpi_nfit_desc *acpi_desc; 1342 ssize_t rc = -ENXIO; 1343 bool busy; 1344 1345 nfit_device_lock(dev); 1346 nd_desc = dev_get_drvdata(dev); 1347 if (!nd_desc) { 1348 nfit_device_unlock(dev); 1349 return rc; 1350 } 1351 acpi_desc = to_acpi_desc(nd_desc); 1352 1353 mutex_lock(&acpi_desc->init_mutex); 1354 busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags) 1355 && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags); 1356 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n"); 1357 /* Allow an admin to poll the busy state at a higher rate */ 1358 if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL, 1359 &acpi_desc->scrub_flags)) { 1360 acpi_desc->scrub_tmo = 1; 1361 mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ); 1362 } 1363 1364 mutex_unlock(&acpi_desc->init_mutex); 1365 nfit_device_unlock(dev); 1366 return rc; 1367 } 1368 1369 static ssize_t scrub_store(struct device *dev, 1370 struct device_attribute *attr, const char *buf, size_t size) 1371 { 1372 struct nvdimm_bus_descriptor *nd_desc; 1373 ssize_t rc; 1374 long val; 1375 1376 rc = kstrtol(buf, 0, &val); 1377 if (rc) 1378 return rc; 1379 if (val != 1) 1380 return -EINVAL; 1381 1382 nfit_device_lock(dev); 1383 nd_desc = dev_get_drvdata(dev); 1384 if (nd_desc) { 1385 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1386 1387 rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); 1388 } 1389 nfit_device_unlock(dev); 1390 if (rc) 1391 return rc; 1392 return size; 1393 } 1394 static DEVICE_ATTR_RW(scrub); 1395 1396 static bool ars_supported(struct nvdimm_bus *nvdimm_bus) 1397 { 1398 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1399 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START 1400 | 1 << ND_CMD_ARS_STATUS; 1401 1402 return (nd_desc->cmd_mask & mask) == mask; 1403 } 1404 1405 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) 1406 { 1407 struct device *dev = kobj_to_dev(kobj); 1408 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1409 1410 if (a == &dev_attr_scrub.attr) 1411 return ars_supported(nvdimm_bus) ? a->mode : 0; 1412 1413 if (a == &dev_attr_firmware_activate_noidle.attr) 1414 return intel_fwa_supported(nvdimm_bus) ? a->mode : 0; 1415 1416 return a->mode; 1417 } 1418 1419 static struct attribute *acpi_nfit_attributes[] = { 1420 &dev_attr_revision.attr, 1421 &dev_attr_scrub.attr, 1422 &dev_attr_hw_error_scrub.attr, 1423 &dev_attr_bus_dsm_mask.attr, 1424 &dev_attr_firmware_activate_noidle.attr, 1425 NULL, 1426 }; 1427 1428 static const struct attribute_group acpi_nfit_attribute_group = { 1429 .name = "nfit", 1430 .attrs = acpi_nfit_attributes, 1431 .is_visible = nfit_visible, 1432 }; 1433 1434 static const struct attribute_group *acpi_nfit_attribute_groups[] = { 1435 &acpi_nfit_attribute_group, 1436 NULL, 1437 }; 1438 1439 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 1440 { 1441 struct nvdimm *nvdimm = to_nvdimm(dev); 1442 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1443 1444 return __to_nfit_memdev(nfit_mem); 1445 } 1446 1447 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 1448 { 1449 struct nvdimm *nvdimm = to_nvdimm(dev); 1450 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1451 1452 return nfit_mem->dcr; 1453 } 1454 1455 static ssize_t handle_show(struct device *dev, 1456 struct device_attribute *attr, char *buf) 1457 { 1458 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1459 1460 return sprintf(buf, "%#x\n", memdev->device_handle); 1461 } 1462 static DEVICE_ATTR_RO(handle); 1463 1464 static ssize_t phys_id_show(struct device *dev, 1465 struct device_attribute *attr, char *buf) 1466 { 1467 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1468 1469 return sprintf(buf, "%#x\n", memdev->physical_id); 1470 } 1471 static DEVICE_ATTR_RO(phys_id); 1472 1473 static ssize_t vendor_show(struct device *dev, 1474 struct device_attribute *attr, char *buf) 1475 { 1476 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1477 1478 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1479 } 1480 static DEVICE_ATTR_RO(vendor); 1481 1482 static ssize_t rev_id_show(struct device *dev, 1483 struct device_attribute *attr, char *buf) 1484 { 1485 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1486 1487 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1488 } 1489 static DEVICE_ATTR_RO(rev_id); 1490 1491 static ssize_t device_show(struct device *dev, 1492 struct device_attribute *attr, char *buf) 1493 { 1494 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1495 1496 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1497 } 1498 static DEVICE_ATTR_RO(device); 1499 1500 static ssize_t subsystem_vendor_show(struct device *dev, 1501 struct device_attribute *attr, char *buf) 1502 { 1503 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1504 1505 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1506 } 1507 static DEVICE_ATTR_RO(subsystem_vendor); 1508 1509 static ssize_t subsystem_rev_id_show(struct device *dev, 1510 struct device_attribute *attr, char *buf) 1511 { 1512 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1513 1514 return sprintf(buf, "0x%04x\n", 1515 be16_to_cpu(dcr->subsystem_revision_id)); 1516 } 1517 static DEVICE_ATTR_RO(subsystem_rev_id); 1518 1519 static ssize_t subsystem_device_show(struct device *dev, 1520 struct device_attribute *attr, char *buf) 1521 { 1522 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1523 1524 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1525 } 1526 static DEVICE_ATTR_RO(subsystem_device); 1527 1528 static int num_nvdimm_formats(struct nvdimm *nvdimm) 1529 { 1530 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1531 int formats = 0; 1532 1533 if (nfit_mem->memdev_pmem) 1534 formats++; 1535 if (nfit_mem->memdev_bdw) 1536 formats++; 1537 return formats; 1538 } 1539 1540 static ssize_t format_show(struct device *dev, 1541 struct device_attribute *attr, char *buf) 1542 { 1543 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1544 1545 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1546 } 1547 static DEVICE_ATTR_RO(format); 1548 1549 static ssize_t format1_show(struct device *dev, 1550 struct device_attribute *attr, char *buf) 1551 { 1552 u32 handle; 1553 ssize_t rc = -ENXIO; 1554 struct nfit_mem *nfit_mem; 1555 struct nfit_memdev *nfit_memdev; 1556 struct acpi_nfit_desc *acpi_desc; 1557 struct nvdimm *nvdimm = to_nvdimm(dev); 1558 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1559 1560 nfit_mem = nvdimm_provider_data(nvdimm); 1561 acpi_desc = nfit_mem->acpi_desc; 1562 handle = to_nfit_memdev(dev)->device_handle; 1563 1564 /* assumes DIMMs have at most 2 published interface codes */ 1565 mutex_lock(&acpi_desc->init_mutex); 1566 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1567 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1568 struct nfit_dcr *nfit_dcr; 1569 1570 if (memdev->device_handle != handle) 1571 continue; 1572 1573 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1574 if (nfit_dcr->dcr->region_index != memdev->region_index) 1575 continue; 1576 if (nfit_dcr->dcr->code == dcr->code) 1577 continue; 1578 rc = sprintf(buf, "0x%04x\n", 1579 le16_to_cpu(nfit_dcr->dcr->code)); 1580 break; 1581 } 1582 if (rc != -ENXIO) 1583 break; 1584 } 1585 mutex_unlock(&acpi_desc->init_mutex); 1586 return rc; 1587 } 1588 static DEVICE_ATTR_RO(format1); 1589 1590 static ssize_t formats_show(struct device *dev, 1591 struct device_attribute *attr, char *buf) 1592 { 1593 struct nvdimm *nvdimm = to_nvdimm(dev); 1594 1595 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1596 } 1597 static DEVICE_ATTR_RO(formats); 1598 1599 static ssize_t serial_show(struct device *dev, 1600 struct device_attribute *attr, char *buf) 1601 { 1602 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1603 1604 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1605 } 1606 static DEVICE_ATTR_RO(serial); 1607 1608 static ssize_t family_show(struct device *dev, 1609 struct device_attribute *attr, char *buf) 1610 { 1611 struct nvdimm *nvdimm = to_nvdimm(dev); 1612 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1613 1614 if (nfit_mem->family < 0) 1615 return -ENXIO; 1616 return sprintf(buf, "%d\n", nfit_mem->family); 1617 } 1618 static DEVICE_ATTR_RO(family); 1619 1620 static ssize_t dsm_mask_show(struct device *dev, 1621 struct device_attribute *attr, char *buf) 1622 { 1623 struct nvdimm *nvdimm = to_nvdimm(dev); 1624 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1625 1626 if (nfit_mem->family < 0) 1627 return -ENXIO; 1628 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); 1629 } 1630 static DEVICE_ATTR_RO(dsm_mask); 1631 1632 static ssize_t flags_show(struct device *dev, 1633 struct device_attribute *attr, char *buf) 1634 { 1635 struct nvdimm *nvdimm = to_nvdimm(dev); 1636 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1637 u16 flags = __to_nfit_memdev(nfit_mem)->flags; 1638 1639 if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags)) 1640 flags |= ACPI_NFIT_MEM_FLUSH_FAILED; 1641 1642 return sprintf(buf, "%s%s%s%s%s%s%s\n", 1643 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 1644 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 1645 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 1646 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 1647 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", 1648 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", 1649 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); 1650 } 1651 static DEVICE_ATTR_RO(flags); 1652 1653 static ssize_t id_show(struct device *dev, 1654 struct device_attribute *attr, char *buf) 1655 { 1656 struct nvdimm *nvdimm = to_nvdimm(dev); 1657 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1658 1659 return sprintf(buf, "%s\n", nfit_mem->id); 1660 } 1661 static DEVICE_ATTR_RO(id); 1662 1663 static ssize_t dirty_shutdown_show(struct device *dev, 1664 struct device_attribute *attr, char *buf) 1665 { 1666 struct nvdimm *nvdimm = to_nvdimm(dev); 1667 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1668 1669 return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown); 1670 } 1671 static DEVICE_ATTR_RO(dirty_shutdown); 1672 1673 static struct attribute *acpi_nfit_dimm_attributes[] = { 1674 &dev_attr_handle.attr, 1675 &dev_attr_phys_id.attr, 1676 &dev_attr_vendor.attr, 1677 &dev_attr_device.attr, 1678 &dev_attr_rev_id.attr, 1679 &dev_attr_subsystem_vendor.attr, 1680 &dev_attr_subsystem_device.attr, 1681 &dev_attr_subsystem_rev_id.attr, 1682 &dev_attr_format.attr, 1683 &dev_attr_formats.attr, 1684 &dev_attr_format1.attr, 1685 &dev_attr_serial.attr, 1686 &dev_attr_flags.attr, 1687 &dev_attr_id.attr, 1688 &dev_attr_family.attr, 1689 &dev_attr_dsm_mask.attr, 1690 &dev_attr_dirty_shutdown.attr, 1691 NULL, 1692 }; 1693 1694 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 1695 struct attribute *a, int n) 1696 { 1697 struct device *dev = kobj_to_dev(kobj); 1698 struct nvdimm *nvdimm = to_nvdimm(dev); 1699 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1700 1701 if (!to_nfit_dcr(dev)) { 1702 /* Without a dcr only the memdev attributes can be surfaced */ 1703 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr 1704 || a == &dev_attr_flags.attr 1705 || a == &dev_attr_family.attr 1706 || a == &dev_attr_dsm_mask.attr) 1707 return a->mode; 1708 return 0; 1709 } 1710 1711 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) 1712 return 0; 1713 1714 if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags) 1715 && a == &dev_attr_dirty_shutdown.attr) 1716 return 0; 1717 1718 return a->mode; 1719 } 1720 1721 static const struct attribute_group acpi_nfit_dimm_attribute_group = { 1722 .name = "nfit", 1723 .attrs = acpi_nfit_dimm_attributes, 1724 .is_visible = acpi_nfit_dimm_attr_visible, 1725 }; 1726 1727 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 1728 &acpi_nfit_dimm_attribute_group, 1729 NULL, 1730 }; 1731 1732 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 1733 u32 device_handle) 1734 { 1735 struct nfit_mem *nfit_mem; 1736 1737 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1738 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 1739 return nfit_mem->nvdimm; 1740 1741 return NULL; 1742 } 1743 1744 void __acpi_nvdimm_notify(struct device *dev, u32 event) 1745 { 1746 struct nfit_mem *nfit_mem; 1747 struct acpi_nfit_desc *acpi_desc; 1748 1749 dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev), 1750 event); 1751 1752 if (event != NFIT_NOTIFY_DIMM_HEALTH) { 1753 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), 1754 event); 1755 return; 1756 } 1757 1758 acpi_desc = dev_get_drvdata(dev->parent); 1759 if (!acpi_desc) 1760 return; 1761 1762 /* 1763 * If we successfully retrieved acpi_desc, then we know nfit_mem data 1764 * is still valid. 1765 */ 1766 nfit_mem = dev_get_drvdata(dev); 1767 if (nfit_mem && nfit_mem->flags_attr) 1768 sysfs_notify_dirent(nfit_mem->flags_attr); 1769 } 1770 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); 1771 1772 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) 1773 { 1774 struct acpi_device *adev = data; 1775 struct device *dev = &adev->dev; 1776 1777 nfit_device_lock(dev->parent); 1778 __acpi_nvdimm_notify(dev, event); 1779 nfit_device_unlock(dev->parent); 1780 } 1781 1782 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) 1783 { 1784 acpi_handle handle; 1785 acpi_status status; 1786 1787 status = acpi_get_handle(adev->handle, method, &handle); 1788 1789 if (ACPI_SUCCESS(status)) 1790 return true; 1791 return false; 1792 } 1793 1794 __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) 1795 { 1796 struct device *dev = &nfit_mem->adev->dev; 1797 struct nd_intel_smart smart = { 0 }; 1798 union acpi_object in_buf = { 1799 .buffer.type = ACPI_TYPE_BUFFER, 1800 .buffer.length = 0, 1801 }; 1802 union acpi_object in_obj = { 1803 .package.type = ACPI_TYPE_PACKAGE, 1804 .package.count = 1, 1805 .package.elements = &in_buf, 1806 }; 1807 const u8 func = ND_INTEL_SMART; 1808 const guid_t *guid = to_nfit_uuid(nfit_mem->family); 1809 u8 revid = nfit_dsm_revid(nfit_mem->family, func); 1810 struct acpi_device *adev = nfit_mem->adev; 1811 acpi_handle handle = adev->handle; 1812 union acpi_object *out_obj; 1813 1814 if ((nfit_mem->dsm_mask & (1 << func)) == 0) 1815 return; 1816 1817 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 1818 if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER 1819 || out_obj->buffer.length < sizeof(smart)) { 1820 dev_dbg(dev->parent, "%s: failed to retrieve initial health\n", 1821 dev_name(dev)); 1822 ACPI_FREE(out_obj); 1823 return; 1824 } 1825 memcpy(&smart, out_obj->buffer.pointer, sizeof(smart)); 1826 ACPI_FREE(out_obj); 1827 1828 if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) { 1829 if (smart.shutdown_state) 1830 set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags); 1831 } 1832 1833 if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) { 1834 set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags); 1835 nfit_mem->dirty_shutdown = smart.shutdown_count; 1836 } 1837 } 1838 1839 static void populate_shutdown_status(struct nfit_mem *nfit_mem) 1840 { 1841 /* 1842 * For DIMMs that provide a dynamic facility to retrieve a 1843 * dirty-shutdown status and/or a dirty-shutdown count, cache 1844 * these values in nfit_mem. 1845 */ 1846 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) 1847 nfit_intel_shutdown_status(nfit_mem); 1848 } 1849 1850 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1851 struct nfit_mem *nfit_mem, u32 device_handle) 1852 { 1853 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1854 struct acpi_device *adev, *adev_dimm; 1855 struct device *dev = acpi_desc->dev; 1856 unsigned long dsm_mask, label_mask; 1857 const guid_t *guid; 1858 int i; 1859 int family = -1; 1860 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 1861 1862 /* nfit test assumes 1:1 relationship between commands and dsms */ 1863 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; 1864 nfit_mem->family = NVDIMM_FAMILY_INTEL; 1865 set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); 1866 1867 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) 1868 sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x", 1869 be16_to_cpu(dcr->vendor_id), 1870 dcr->manufacturing_location, 1871 be16_to_cpu(dcr->manufacturing_date), 1872 be32_to_cpu(dcr->serial_number)); 1873 else 1874 sprintf(nfit_mem->id, "%04x-%08x", 1875 be16_to_cpu(dcr->vendor_id), 1876 be32_to_cpu(dcr->serial_number)); 1877 1878 adev = to_acpi_dev(acpi_desc); 1879 if (!adev) { 1880 /* unit test case */ 1881 populate_shutdown_status(nfit_mem); 1882 return 0; 1883 } 1884 1885 adev_dimm = acpi_find_child_device(adev, device_handle, false); 1886 nfit_mem->adev = adev_dimm; 1887 if (!adev_dimm) { 1888 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 1889 device_handle); 1890 return force_enable_dimms ? 0 : -ENODEV; 1891 } 1892 1893 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, 1894 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { 1895 dev_err(dev, "%s: notification registration failed\n", 1896 dev_name(&adev_dimm->dev)); 1897 return -ENXIO; 1898 } 1899 /* 1900 * Record nfit_mem for the notification path to track back to 1901 * the nfit sysfs attributes for this dimm device object. 1902 */ 1903 dev_set_drvdata(&adev_dimm->dev, nfit_mem); 1904 1905 /* 1906 * There are 4 "legacy" NVDIMM command sets 1907 * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before 1908 * an EFI working group was established to constrain this 1909 * proliferation. The nfit driver probes for the supported command 1910 * set by GUID. Note, if you're a platform developer looking to add 1911 * a new command set to this probe, consider using an existing set, 1912 * or otherwise seek approval to publish the command set at 1913 * http://www.uefi.org/RFIC_LIST. 1914 * 1915 * Note, that checking for function0 (bit0) tells us if any commands 1916 * are reachable through this GUID. 1917 */ 1918 clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); 1919 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) 1920 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) { 1921 set_bit(i, &nd_desc->dimm_family_mask); 1922 if (family < 0 || i == default_dsm_family) 1923 family = i; 1924 } 1925 1926 /* limit the supported commands to those that are publicly documented */ 1927 nfit_mem->family = family; 1928 if (override_dsm_mask && !disable_vendor_specific) 1929 dsm_mask = override_dsm_mask; 1930 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1931 dsm_mask = NVDIMM_INTEL_CMDMASK; 1932 if (disable_vendor_specific) 1933 dsm_mask &= ~(1 << ND_CMD_VENDOR); 1934 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { 1935 dsm_mask = 0x1c3c76; 1936 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { 1937 dsm_mask = 0x1fe; 1938 if (disable_vendor_specific) 1939 dsm_mask &= ~(1 << 8); 1940 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { 1941 dsm_mask = 0xffffffff; 1942 } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) { 1943 dsm_mask = 0x1f; 1944 } else { 1945 dev_dbg(dev, "unknown dimm command family\n"); 1946 nfit_mem->family = -1; 1947 /* DSMs are optional, continue loading the driver... */ 1948 return 0; 1949 } 1950 1951 /* 1952 * Function 0 is the command interrogation function, don't 1953 * export it to potential userspace use, and enable it to be 1954 * used as an error value in acpi_nfit_ctl(). 1955 */ 1956 dsm_mask &= ~1UL; 1957 1958 guid = to_nfit_uuid(nfit_mem->family); 1959 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1960 if (acpi_check_dsm(adev_dimm->handle, guid, 1961 nfit_dsm_revid(nfit_mem->family, i), 1962 1ULL << i)) 1963 set_bit(i, &nfit_mem->dsm_mask); 1964 1965 /* 1966 * Prefer the NVDIMM_FAMILY_INTEL label read commands if present 1967 * due to their better semantics handling locked capacity. 1968 */ 1969 label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA 1970 | 1 << ND_CMD_SET_CONFIG_DATA; 1971 if (family == NVDIMM_FAMILY_INTEL 1972 && (dsm_mask & label_mask) == label_mask) 1973 /* skip _LS{I,R,W} enabling */; 1974 else { 1975 if (acpi_nvdimm_has_method(adev_dimm, "_LSI") 1976 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { 1977 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1978 set_bit(NFIT_MEM_LSR, &nfit_mem->flags); 1979 } 1980 1981 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) 1982 && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { 1983 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); 1984 set_bit(NFIT_MEM_LSW, &nfit_mem->flags); 1985 } 1986 1987 /* 1988 * Quirk read-only label configurations to preserve 1989 * access to label-less namespaces by default. 1990 */ 1991 if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags) 1992 && !force_labels) { 1993 dev_dbg(dev, "%s: No _LSW, disable labels\n", 1994 dev_name(&adev_dimm->dev)); 1995 clear_bit(NFIT_MEM_LSR, &nfit_mem->flags); 1996 } else 1997 dev_dbg(dev, "%s: Force enable labels\n", 1998 dev_name(&adev_dimm->dev)); 1999 } 2000 2001 populate_shutdown_status(nfit_mem); 2002 2003 return 0; 2004 } 2005 2006 static void shutdown_dimm_notify(void *data) 2007 { 2008 struct acpi_nfit_desc *acpi_desc = data; 2009 struct nfit_mem *nfit_mem; 2010 2011 mutex_lock(&acpi_desc->init_mutex); 2012 /* 2013 * Clear out the nfit_mem->flags_attr and shut down dimm event 2014 * notifications. 2015 */ 2016 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 2017 struct acpi_device *adev_dimm = nfit_mem->adev; 2018 2019 if (nfit_mem->flags_attr) { 2020 sysfs_put(nfit_mem->flags_attr); 2021 nfit_mem->flags_attr = NULL; 2022 } 2023 if (adev_dimm) { 2024 acpi_remove_notify_handler(adev_dimm->handle, 2025 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); 2026 dev_set_drvdata(&adev_dimm->dev, NULL); 2027 } 2028 } 2029 mutex_unlock(&acpi_desc->init_mutex); 2030 } 2031 2032 static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family) 2033 { 2034 switch (family) { 2035 case NVDIMM_FAMILY_INTEL: 2036 return intel_security_ops; 2037 default: 2038 return NULL; 2039 } 2040 } 2041 2042 static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops( 2043 struct nfit_mem *nfit_mem) 2044 { 2045 unsigned long mask; 2046 struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; 2047 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2048 2049 if (!nd_desc->fw_ops) 2050 return NULL; 2051 2052 if (nfit_mem->family != NVDIMM_FAMILY_INTEL) 2053 return NULL; 2054 2055 mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK; 2056 if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) 2057 return NULL; 2058 2059 return intel_fw_ops; 2060 } 2061 2062 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 2063 { 2064 struct nfit_mem *nfit_mem; 2065 int dimm_count = 0, rc; 2066 struct nvdimm *nvdimm; 2067 2068 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 2069 struct acpi_nfit_flush_address *flush; 2070 unsigned long flags = 0, cmd_mask; 2071 struct nfit_memdev *nfit_memdev; 2072 u32 device_handle; 2073 u16 mem_flags; 2074 2075 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 2076 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 2077 if (nvdimm) { 2078 dimm_count++; 2079 continue; 2080 } 2081 2082 if (nfit_mem->bdw && nfit_mem->memdev_pmem) { 2083 set_bit(NDD_ALIASING, &flags); 2084 set_bit(NDD_LABELING, &flags); 2085 } 2086 2087 /* collate flags across all memdevs for this dimm */ 2088 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2089 struct acpi_nfit_memory_map *dimm_memdev; 2090 2091 dimm_memdev = __to_nfit_memdev(nfit_mem); 2092 if (dimm_memdev->device_handle 2093 != nfit_memdev->memdev->device_handle) 2094 continue; 2095 dimm_memdev->flags |= nfit_memdev->memdev->flags; 2096 } 2097 2098 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 2099 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 2100 set_bit(NDD_UNARMED, &flags); 2101 2102 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 2103 if (rc) 2104 continue; 2105 2106 /* 2107 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL 2108 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the 2109 * userspace interface. 2110 */ 2111 cmd_mask = 1UL << ND_CMD_CALL; 2112 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 2113 /* 2114 * These commands have a 1:1 correspondence 2115 * between DSM payload and libnvdimm ioctl 2116 * payload format. 2117 */ 2118 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; 2119 } 2120 2121 /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */ 2122 if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) 2123 set_bit(NDD_NOBLK, &flags); 2124 2125 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { 2126 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 2127 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); 2128 } 2129 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) 2130 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); 2131 2132 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush 2133 : NULL; 2134 nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 2135 acpi_nfit_dimm_attribute_groups, 2136 flags, cmd_mask, flush ? flush->hint_count : 0, 2137 nfit_mem->flush_wpq, &nfit_mem->id[0], 2138 acpi_nfit_get_security_ops(nfit_mem->family), 2139 acpi_nfit_get_fw_ops(nfit_mem)); 2140 if (!nvdimm) 2141 return -ENOMEM; 2142 2143 nfit_mem->nvdimm = nvdimm; 2144 dimm_count++; 2145 2146 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 2147 continue; 2148 2149 dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n", 2150 nvdimm_name(nvdimm), 2151 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 2152 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 2153 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 2154 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", 2155 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); 2156 2157 } 2158 2159 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 2160 if (rc) 2161 return rc; 2162 2163 /* 2164 * Now that dimms are successfully registered, and async registration 2165 * is flushed, attempt to enable event notification. 2166 */ 2167 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 2168 struct kernfs_node *nfit_kernfs; 2169 2170 nvdimm = nfit_mem->nvdimm; 2171 if (!nvdimm) 2172 continue; 2173 2174 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 2175 if (nfit_kernfs) 2176 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 2177 "flags"); 2178 sysfs_put(nfit_kernfs); 2179 if (!nfit_mem->flags_attr) 2180 dev_warn(acpi_desc->dev, "%s: notifications disabled\n", 2181 nvdimm_name(nvdimm)); 2182 } 2183 2184 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, 2185 acpi_desc); 2186 } 2187 2188 /* 2189 * These constants are private because there are no kernel consumers of 2190 * these commands. 2191 */ 2192 enum nfit_aux_cmds { 2193 NFIT_CMD_TRANSLATE_SPA = 5, 2194 NFIT_CMD_ARS_INJECT_SET = 7, 2195 NFIT_CMD_ARS_INJECT_CLEAR = 8, 2196 NFIT_CMD_ARS_INJECT_GET = 9, 2197 }; 2198 2199 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 2200 { 2201 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2202 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); 2203 unsigned long dsm_mask, *mask; 2204 struct acpi_device *adev; 2205 int i; 2206 2207 set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); 2208 set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask); 2209 2210 /* enable nfit_test to inject bus command emulation */ 2211 if (acpi_desc->bus_cmd_force_en) { 2212 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; 2213 mask = &nd_desc->bus_family_mask; 2214 if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) { 2215 set_bit(NVDIMM_BUS_FAMILY_INTEL, mask); 2216 nd_desc->fw_ops = intel_bus_fw_ops; 2217 } 2218 } 2219 2220 adev = to_acpi_dev(acpi_desc); 2221 if (!adev) 2222 return; 2223 2224 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) 2225 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 2226 set_bit(i, &nd_desc->cmd_mask); 2227 2228 dsm_mask = 2229 (1 << ND_CMD_ARS_CAP) | 2230 (1 << ND_CMD_ARS_START) | 2231 (1 << ND_CMD_ARS_STATUS) | 2232 (1 << ND_CMD_CLEAR_ERROR) | 2233 (1 << NFIT_CMD_TRANSLATE_SPA) | 2234 (1 << NFIT_CMD_ARS_INJECT_SET) | 2235 (1 << NFIT_CMD_ARS_INJECT_CLEAR) | 2236 (1 << NFIT_CMD_ARS_INJECT_GET); 2237 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 2238 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 2239 set_bit(i, &acpi_desc->bus_dsm_mask); 2240 2241 /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */ 2242 dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; 2243 guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL); 2244 mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; 2245 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 2246 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 2247 set_bit(i, mask); 2248 2249 if (*mask == dsm_mask) { 2250 set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask); 2251 nd_desc->fw_ops = intel_bus_fw_ops; 2252 } 2253 } 2254 2255 static ssize_t range_index_show(struct device *dev, 2256 struct device_attribute *attr, char *buf) 2257 { 2258 struct nd_region *nd_region = to_nd_region(dev); 2259 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 2260 2261 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 2262 } 2263 static DEVICE_ATTR_RO(range_index); 2264 2265 static struct attribute *acpi_nfit_region_attributes[] = { 2266 &dev_attr_range_index.attr, 2267 NULL, 2268 }; 2269 2270 static const struct attribute_group acpi_nfit_region_attribute_group = { 2271 .name = "nfit", 2272 .attrs = acpi_nfit_region_attributes, 2273 }; 2274 2275 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 2276 &acpi_nfit_region_attribute_group, 2277 NULL, 2278 }; 2279 2280 /* enough info to uniquely specify an interleave set */ 2281 struct nfit_set_info { 2282 u64 region_offset; 2283 u32 serial_number; 2284 u32 pad; 2285 }; 2286 2287 struct nfit_set_info2 { 2288 u64 region_offset; 2289 u32 serial_number; 2290 u16 vendor_id; 2291 u16 manufacturing_date; 2292 u8 manufacturing_location; 2293 u8 reserved[31]; 2294 }; 2295 2296 static int cmp_map_compat(const void *m0, const void *m1) 2297 { 2298 const struct nfit_set_info *map0 = m0; 2299 const struct nfit_set_info *map1 = m1; 2300 2301 return memcmp(&map0->region_offset, &map1->region_offset, 2302 sizeof(u64)); 2303 } 2304 2305 static int cmp_map(const void *m0, const void *m1) 2306 { 2307 const struct nfit_set_info *map0 = m0; 2308 const struct nfit_set_info *map1 = m1; 2309 2310 if (map0->region_offset < map1->region_offset) 2311 return -1; 2312 else if (map0->region_offset > map1->region_offset) 2313 return 1; 2314 return 0; 2315 } 2316 2317 static int cmp_map2(const void *m0, const void *m1) 2318 { 2319 const struct nfit_set_info2 *map0 = m0; 2320 const struct nfit_set_info2 *map1 = m1; 2321 2322 if (map0->region_offset < map1->region_offset) 2323 return -1; 2324 else if (map0->region_offset > map1->region_offset) 2325 return 1; 2326 return 0; 2327 } 2328 2329 /* Retrieve the nth entry referencing this spa */ 2330 static struct acpi_nfit_memory_map *memdev_from_spa( 2331 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 2332 { 2333 struct nfit_memdev *nfit_memdev; 2334 2335 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 2336 if (nfit_memdev->memdev->range_index == range_index) 2337 if (n-- == 0) 2338 return nfit_memdev->memdev; 2339 return NULL; 2340 } 2341 2342 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 2343 struct nd_region_desc *ndr_desc, 2344 struct acpi_nfit_system_address *spa) 2345 { 2346 struct device *dev = acpi_desc->dev; 2347 struct nd_interleave_set *nd_set; 2348 u16 nr = ndr_desc->num_mappings; 2349 struct nfit_set_info2 *info2; 2350 struct nfit_set_info *info; 2351 int i; 2352 2353 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2354 if (!nd_set) 2355 return -ENOMEM; 2356 import_guid(&nd_set->type_guid, spa->range_guid); 2357 2358 info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL); 2359 if (!info) 2360 return -ENOMEM; 2361 2362 info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL); 2363 if (!info2) 2364 return -ENOMEM; 2365 2366 for (i = 0; i < nr; i++) { 2367 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 2368 struct nvdimm *nvdimm = mapping->nvdimm; 2369 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2370 struct nfit_set_info *map = &info[i]; 2371 struct nfit_set_info2 *map2 = &info2[i]; 2372 struct acpi_nfit_memory_map *memdev = 2373 memdev_from_spa(acpi_desc, spa->range_index, i); 2374 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2375 2376 if (!memdev || !nfit_mem->dcr) { 2377 dev_err(dev, "%s: failed to find DCR\n", __func__); 2378 return -ENODEV; 2379 } 2380 2381 map->region_offset = memdev->region_offset; 2382 map->serial_number = dcr->serial_number; 2383 2384 map2->region_offset = memdev->region_offset; 2385 map2->serial_number = dcr->serial_number; 2386 map2->vendor_id = dcr->vendor_id; 2387 map2->manufacturing_date = dcr->manufacturing_date; 2388 map2->manufacturing_location = dcr->manufacturing_location; 2389 } 2390 2391 /* v1.1 namespaces */ 2392 sort(info, nr, sizeof(*info), cmp_map, NULL); 2393 nd_set->cookie1 = nd_fletcher64(info, sizeof(*info) * nr, 0); 2394 2395 /* v1.2 namespaces */ 2396 sort(info2, nr, sizeof(*info2), cmp_map2, NULL); 2397 nd_set->cookie2 = nd_fletcher64(info2, sizeof(*info2) * nr, 0); 2398 2399 /* support v1.1 namespaces created with the wrong sort order */ 2400 sort(info, nr, sizeof(*info), cmp_map_compat, NULL); 2401 nd_set->altcookie = nd_fletcher64(info, sizeof(*info) * nr, 0); 2402 2403 /* record the result of the sort for the mapping position */ 2404 for (i = 0; i < nr; i++) { 2405 struct nfit_set_info2 *map2 = &info2[i]; 2406 int j; 2407 2408 for (j = 0; j < nr; j++) { 2409 struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; 2410 struct nvdimm *nvdimm = mapping->nvdimm; 2411 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2412 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2413 2414 if (map2->serial_number == dcr->serial_number && 2415 map2->vendor_id == dcr->vendor_id && 2416 map2->manufacturing_date == dcr->manufacturing_date && 2417 map2->manufacturing_location 2418 == dcr->manufacturing_location) { 2419 mapping->position = i; 2420 break; 2421 } 2422 } 2423 } 2424 2425 ndr_desc->nd_set = nd_set; 2426 devm_kfree(dev, info); 2427 devm_kfree(dev, info2); 2428 2429 return 0; 2430 } 2431 2432 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 2433 { 2434 struct acpi_nfit_interleave *idt = mmio->idt; 2435 u32 sub_line_offset, line_index, line_offset; 2436 u64 line_no, table_skip_count, table_offset; 2437 2438 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 2439 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 2440 line_offset = idt->line_offset[line_index] 2441 * mmio->line_size; 2442 table_offset = table_skip_count * mmio->table_size; 2443 2444 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 2445 } 2446 2447 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 2448 { 2449 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2450 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 2451 const u32 STATUS_MASK = 0x80000037; 2452 2453 if (mmio->num_lines) 2454 offset = to_interleave_offset(offset, mmio); 2455 2456 return readl(mmio->addr.base + offset) & STATUS_MASK; 2457 } 2458 2459 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 2460 resource_size_t dpa, unsigned int len, unsigned int write) 2461 { 2462 u64 cmd, offset; 2463 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2464 2465 enum { 2466 BCW_OFFSET_MASK = (1ULL << 48)-1, 2467 BCW_LEN_SHIFT = 48, 2468 BCW_LEN_MASK = (1ULL << 8) - 1, 2469 BCW_CMD_SHIFT = 56, 2470 }; 2471 2472 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 2473 len = len >> L1_CACHE_SHIFT; 2474 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 2475 cmd |= ((u64) write) << BCW_CMD_SHIFT; 2476 2477 offset = nfit_blk->cmd_offset + mmio->size * bw; 2478 if (mmio->num_lines) 2479 offset = to_interleave_offset(offset, mmio); 2480 2481 writeq(cmd, mmio->addr.base + offset); 2482 nvdimm_flush(nfit_blk->nd_region, NULL); 2483 2484 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) 2485 readq(mmio->addr.base + offset); 2486 } 2487 2488 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 2489 resource_size_t dpa, void *iobuf, size_t len, int rw, 2490 unsigned int lane) 2491 { 2492 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2493 unsigned int copied = 0; 2494 u64 base_offset; 2495 int rc; 2496 2497 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 2498 + lane * mmio->size; 2499 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 2500 while (len) { 2501 unsigned int c; 2502 u64 offset; 2503 2504 if (mmio->num_lines) { 2505 u32 line_offset; 2506 2507 offset = to_interleave_offset(base_offset + copied, 2508 mmio); 2509 div_u64_rem(offset, mmio->line_size, &line_offset); 2510 c = min_t(size_t, len, mmio->line_size - line_offset); 2511 } else { 2512 offset = base_offset + nfit_blk->bdw_offset; 2513 c = len; 2514 } 2515 2516 if (rw) 2517 memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); 2518 else { 2519 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) 2520 arch_invalidate_pmem((void __force *) 2521 mmio->addr.aperture + offset, c); 2522 2523 memcpy(iobuf + copied, mmio->addr.aperture + offset, c); 2524 } 2525 2526 copied += c; 2527 len -= c; 2528 } 2529 2530 if (rw) 2531 nvdimm_flush(nfit_blk->nd_region, NULL); 2532 2533 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 2534 return rc; 2535 } 2536 2537 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 2538 resource_size_t dpa, void *iobuf, u64 len, int rw) 2539 { 2540 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 2541 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2542 struct nd_region *nd_region = nfit_blk->nd_region; 2543 unsigned int lane, copied = 0; 2544 int rc = 0; 2545 2546 lane = nd_region_acquire_lane(nd_region); 2547 while (len) { 2548 u64 c = min(len, mmio->size); 2549 2550 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 2551 iobuf + copied, c, rw, lane); 2552 if (rc) 2553 break; 2554 2555 copied += c; 2556 len -= c; 2557 } 2558 nd_region_release_lane(nd_region, lane); 2559 2560 return rc; 2561 } 2562 2563 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 2564 struct acpi_nfit_interleave *idt, u16 interleave_ways) 2565 { 2566 if (idt) { 2567 mmio->num_lines = idt->line_count; 2568 mmio->line_size = idt->line_size; 2569 if (interleave_ways == 0) 2570 return -ENXIO; 2571 mmio->table_size = mmio->num_lines * interleave_ways 2572 * mmio->line_size; 2573 } 2574 2575 return 0; 2576 } 2577 2578 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 2579 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 2580 { 2581 struct nd_cmd_dimm_flags flags; 2582 int rc; 2583 2584 memset(&flags, 0, sizeof(flags)); 2585 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 2586 sizeof(flags), NULL); 2587 2588 if (rc >= 0 && flags.status == 0) 2589 nfit_blk->dimm_flags = flags.flags; 2590 else if (rc == -ENOTTY) { 2591 /* fall back to a conservative default */ 2592 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; 2593 rc = 0; 2594 } else 2595 rc = -ENXIO; 2596 2597 return rc; 2598 } 2599 2600 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 2601 struct device *dev) 2602 { 2603 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 2604 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 2605 struct nfit_blk_mmio *mmio; 2606 struct nfit_blk *nfit_blk; 2607 struct nfit_mem *nfit_mem; 2608 struct nvdimm *nvdimm; 2609 int rc; 2610 2611 nvdimm = nd_blk_region_to_dimm(ndbr); 2612 nfit_mem = nvdimm_provider_data(nvdimm); 2613 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 2614 dev_dbg(dev, "missing%s%s%s\n", 2615 nfit_mem ? "" : " nfit_mem", 2616 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 2617 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 2618 return -ENXIO; 2619 } 2620 2621 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 2622 if (!nfit_blk) 2623 return -ENOMEM; 2624 nd_blk_region_set_provider_data(ndbr, nfit_blk); 2625 nfit_blk->nd_region = to_nd_region(dev); 2626 2627 /* map block aperture memory */ 2628 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 2629 mmio = &nfit_blk->mmio[BDW]; 2630 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, 2631 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr)); 2632 if (!mmio->addr.base) { 2633 dev_dbg(dev, "%s failed to map bdw\n", 2634 nvdimm_name(nvdimm)); 2635 return -ENOMEM; 2636 } 2637 mmio->size = nfit_mem->bdw->size; 2638 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 2639 mmio->idt = nfit_mem->idt_bdw; 2640 mmio->spa = nfit_mem->spa_bdw; 2641 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 2642 nfit_mem->memdev_bdw->interleave_ways); 2643 if (rc) { 2644 dev_dbg(dev, "%s failed to init bdw interleave\n", 2645 nvdimm_name(nvdimm)); 2646 return rc; 2647 } 2648 2649 /* map block control memory */ 2650 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 2651 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 2652 mmio = &nfit_blk->mmio[DCR]; 2653 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, 2654 nfit_mem->spa_dcr->length); 2655 if (!mmio->addr.base) { 2656 dev_dbg(dev, "%s failed to map dcr\n", 2657 nvdimm_name(nvdimm)); 2658 return -ENOMEM; 2659 } 2660 mmio->size = nfit_mem->dcr->window_size; 2661 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 2662 mmio->idt = nfit_mem->idt_dcr; 2663 mmio->spa = nfit_mem->spa_dcr; 2664 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 2665 nfit_mem->memdev_dcr->interleave_ways); 2666 if (rc) { 2667 dev_dbg(dev, "%s failed to init dcr interleave\n", 2668 nvdimm_name(nvdimm)); 2669 return rc; 2670 } 2671 2672 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 2673 if (rc < 0) { 2674 dev_dbg(dev, "%s failed get DIMM flags\n", 2675 nvdimm_name(nvdimm)); 2676 return rc; 2677 } 2678 2679 if (nvdimm_has_flush(nfit_blk->nd_region) < 0) 2680 dev_warn(dev, "unable to guarantee persistence of writes\n"); 2681 2682 if (mmio->line_size == 0) 2683 return 0; 2684 2685 if ((u32) nfit_blk->cmd_offset % mmio->line_size 2686 + 8 > mmio->line_size) { 2687 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 2688 return -ENXIO; 2689 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 2690 + 8 > mmio->line_size) { 2691 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 2692 return -ENXIO; 2693 } 2694 2695 return 0; 2696 } 2697 2698 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 2699 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 2700 { 2701 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2702 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2703 int cmd_rc, rc; 2704 2705 cmd->address = spa->address; 2706 cmd->length = spa->length; 2707 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 2708 sizeof(*cmd), &cmd_rc); 2709 if (rc < 0) 2710 return rc; 2711 return cmd_rc; 2712 } 2713 2714 static int ars_start(struct acpi_nfit_desc *acpi_desc, 2715 struct nfit_spa *nfit_spa, enum nfit_ars_state req_type) 2716 { 2717 int rc; 2718 int cmd_rc; 2719 struct nd_cmd_ars_start ars_start; 2720 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2721 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2722 2723 memset(&ars_start, 0, sizeof(ars_start)); 2724 ars_start.address = spa->address; 2725 ars_start.length = spa->length; 2726 if (req_type == ARS_REQ_SHORT) 2727 ars_start.flags = ND_ARS_RETURN_PREV_DATA; 2728 if (nfit_spa_type(spa) == NFIT_SPA_PM) 2729 ars_start.type = ND_ARS_PERSISTENT; 2730 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) 2731 ars_start.type = ND_ARS_VOLATILE; 2732 else 2733 return -ENOTTY; 2734 2735 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2736 sizeof(ars_start), &cmd_rc); 2737 2738 if (rc < 0) 2739 return rc; 2740 if (cmd_rc < 0) 2741 return cmd_rc; 2742 set_bit(ARS_VALID, &acpi_desc->scrub_flags); 2743 return 0; 2744 } 2745 2746 static int ars_continue(struct acpi_nfit_desc *acpi_desc) 2747 { 2748 int rc, cmd_rc; 2749 struct nd_cmd_ars_start ars_start; 2750 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2751 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2752 2753 ars_start = (struct nd_cmd_ars_start) { 2754 .address = ars_status->restart_address, 2755 .length = ars_status->restart_length, 2756 .type = ars_status->type, 2757 }; 2758 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2759 sizeof(ars_start), &cmd_rc); 2760 if (rc < 0) 2761 return rc; 2762 return cmd_rc; 2763 } 2764 2765 static int ars_get_status(struct acpi_nfit_desc *acpi_desc) 2766 { 2767 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2768 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2769 int rc, cmd_rc; 2770 2771 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, 2772 acpi_desc->max_ars, &cmd_rc); 2773 if (rc < 0) 2774 return rc; 2775 return cmd_rc; 2776 } 2777 2778 static void ars_complete(struct acpi_nfit_desc *acpi_desc, 2779 struct nfit_spa *nfit_spa) 2780 { 2781 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2782 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2783 struct nd_region *nd_region = nfit_spa->nd_region; 2784 struct device *dev; 2785 2786 lockdep_assert_held(&acpi_desc->init_mutex); 2787 /* 2788 * Only advance the ARS state for ARS runs initiated by the 2789 * kernel, ignore ARS results from BIOS initiated runs for scrub 2790 * completion tracking. 2791 */ 2792 if (acpi_desc->scrub_spa != nfit_spa) 2793 return; 2794 2795 if ((ars_status->address >= spa->address && ars_status->address 2796 < spa->address + spa->length) 2797 || (ars_status->address < spa->address)) { 2798 /* 2799 * Assume that if a scrub starts at an offset from the 2800 * start of nfit_spa that we are in the continuation 2801 * case. 2802 * 2803 * Otherwise, if the scrub covers the spa range, mark 2804 * any pending request complete. 2805 */ 2806 if (ars_status->address + ars_status->length 2807 >= spa->address + spa->length) 2808 /* complete */; 2809 else 2810 return; 2811 } else 2812 return; 2813 2814 acpi_desc->scrub_spa = NULL; 2815 if (nd_region) { 2816 dev = nd_region_dev(nd_region); 2817 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON); 2818 } else 2819 dev = acpi_desc->dev; 2820 dev_dbg(dev, "ARS: range %d complete\n", spa->range_index); 2821 } 2822 2823 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) 2824 { 2825 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; 2826 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2827 int rc; 2828 u32 i; 2829 2830 /* 2831 * First record starts at 44 byte offset from the start of the 2832 * payload. 2833 */ 2834 if (ars_status->out_length < 44) 2835 return 0; 2836 2837 /* 2838 * Ignore potentially stale results that are only refreshed 2839 * after a start-ARS event. 2840 */ 2841 if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) { 2842 dev_dbg(acpi_desc->dev, "skip %d stale records\n", 2843 ars_status->num_records); 2844 return 0; 2845 } 2846 2847 for (i = 0; i < ars_status->num_records; i++) { 2848 /* only process full records */ 2849 if (ars_status->out_length 2850 < 44 + sizeof(struct nd_ars_record) * (i + 1)) 2851 break; 2852 rc = nvdimm_bus_add_badrange(nvdimm_bus, 2853 ars_status->records[i].err_address, 2854 ars_status->records[i].length); 2855 if (rc) 2856 return rc; 2857 } 2858 if (i < ars_status->num_records) 2859 dev_warn(acpi_desc->dev, "detected truncated ars results\n"); 2860 2861 return 0; 2862 } 2863 2864 static void acpi_nfit_remove_resource(void *data) 2865 { 2866 struct resource *res = data; 2867 2868 remove_resource(res); 2869 } 2870 2871 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, 2872 struct nd_region_desc *ndr_desc) 2873 { 2874 struct resource *res, *nd_res = ndr_desc->res; 2875 int is_pmem, ret; 2876 2877 /* No operation if the region is already registered as PMEM */ 2878 is_pmem = region_intersects(nd_res->start, resource_size(nd_res), 2879 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); 2880 if (is_pmem == REGION_INTERSECTS) 2881 return 0; 2882 2883 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); 2884 if (!res) 2885 return -ENOMEM; 2886 2887 res->name = "Persistent Memory"; 2888 res->start = nd_res->start; 2889 res->end = nd_res->end; 2890 res->flags = IORESOURCE_MEM; 2891 res->desc = IORES_DESC_PERSISTENT_MEMORY; 2892 2893 ret = insert_resource(&iomem_resource, res); 2894 if (ret) 2895 return ret; 2896 2897 ret = devm_add_action_or_reset(acpi_desc->dev, 2898 acpi_nfit_remove_resource, 2899 res); 2900 if (ret) 2901 return ret; 2902 2903 return 0; 2904 } 2905 2906 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 2907 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, 2908 struct acpi_nfit_memory_map *memdev, 2909 struct nfit_spa *nfit_spa) 2910 { 2911 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 2912 memdev->device_handle); 2913 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2914 struct nd_blk_region_desc *ndbr_desc; 2915 struct nfit_mem *nfit_mem; 2916 int rc; 2917 2918 if (!nvdimm) { 2919 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 2920 spa->range_index, memdev->device_handle); 2921 return -ENODEV; 2922 } 2923 2924 mapping->nvdimm = nvdimm; 2925 switch (nfit_spa_type(spa)) { 2926 case NFIT_SPA_PM: 2927 case NFIT_SPA_VOLATILE: 2928 mapping->start = memdev->address; 2929 mapping->size = memdev->region_size; 2930 break; 2931 case NFIT_SPA_DCR: 2932 nfit_mem = nvdimm_provider_data(nvdimm); 2933 if (!nfit_mem || !nfit_mem->bdw) { 2934 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 2935 spa->range_index, nvdimm_name(nvdimm)); 2936 break; 2937 } 2938 2939 mapping->size = nfit_mem->bdw->capacity; 2940 mapping->start = nfit_mem->bdw->start_address; 2941 ndr_desc->num_lanes = nfit_mem->bdw->windows; 2942 ndr_desc->mapping = mapping; 2943 ndr_desc->num_mappings = 1; 2944 ndbr_desc = to_blk_region_desc(ndr_desc); 2945 ndbr_desc->enable = acpi_nfit_blk_region_enable; 2946 ndbr_desc->do_io = acpi_desc->blk_do_io; 2947 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2948 if (rc) 2949 return rc; 2950 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 2951 ndr_desc); 2952 if (!nfit_spa->nd_region) 2953 return -ENOMEM; 2954 break; 2955 } 2956 2957 return 0; 2958 } 2959 2960 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) 2961 { 2962 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2963 nfit_spa_type(spa) == NFIT_SPA_VCD || 2964 nfit_spa_type(spa) == NFIT_SPA_PDISK || 2965 nfit_spa_type(spa) == NFIT_SPA_PCD); 2966 } 2967 2968 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa) 2969 { 2970 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2971 nfit_spa_type(spa) == NFIT_SPA_VCD || 2972 nfit_spa_type(spa) == NFIT_SPA_VOLATILE); 2973 } 2974 2975 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 2976 struct nfit_spa *nfit_spa) 2977 { 2978 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; 2979 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2980 struct nd_blk_region_desc ndbr_desc; 2981 struct nd_region_desc *ndr_desc; 2982 struct nfit_memdev *nfit_memdev; 2983 struct nvdimm_bus *nvdimm_bus; 2984 struct resource res; 2985 int count = 0, rc; 2986 2987 if (nfit_spa->nd_region) 2988 return 0; 2989 2990 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { 2991 dev_dbg(acpi_desc->dev, "detected invalid spa index\n"); 2992 return 0; 2993 } 2994 2995 memset(&res, 0, sizeof(res)); 2996 memset(&mappings, 0, sizeof(mappings)); 2997 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 2998 res.start = spa->address; 2999 res.end = res.start + spa->length - 1; 3000 ndr_desc = &ndbr_desc.ndr_desc; 3001 ndr_desc->res = &res; 3002 ndr_desc->provider_data = nfit_spa; 3003 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 3004 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) { 3005 ndr_desc->numa_node = pxm_to_online_node(spa->proximity_domain); 3006 ndr_desc->target_node = pxm_to_node(spa->proximity_domain); 3007 } else { 3008 ndr_desc->numa_node = NUMA_NO_NODE; 3009 ndr_desc->target_node = NUMA_NO_NODE; 3010 } 3011 3012 /* Fallback to address based numa information if node lookup failed */ 3013 if (ndr_desc->numa_node == NUMA_NO_NODE) { 3014 ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address); 3015 dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]", 3016 NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); 3017 } 3018 if (ndr_desc->target_node == NUMA_NO_NODE) { 3019 ndr_desc->target_node = phys_to_target_node(spa->address); 3020 dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]", 3021 NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); 3022 } 3023 3024 /* 3025 * Persistence domain bits are hierarchical, if 3026 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then 3027 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. 3028 */ 3029 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) 3030 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); 3031 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) 3032 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); 3033 3034 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 3035 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 3036 struct nd_mapping_desc *mapping; 3037 3038 /* range index 0 == unmapped in SPA or invalid-SPA */ 3039 if (memdev->range_index == 0 || spa->range_index == 0) 3040 continue; 3041 if (memdev->range_index != spa->range_index) 3042 continue; 3043 if (count >= ND_MAX_MAPPINGS) { 3044 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 3045 spa->range_index, ND_MAX_MAPPINGS); 3046 return -ENXIO; 3047 } 3048 mapping = &mappings[count++]; 3049 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, 3050 memdev, nfit_spa); 3051 if (rc) 3052 goto out; 3053 } 3054 3055 ndr_desc->mapping = mappings; 3056 ndr_desc->num_mappings = count; 3057 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 3058 if (rc) 3059 goto out; 3060 3061 nvdimm_bus = acpi_desc->nvdimm_bus; 3062 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 3063 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); 3064 if (rc) { 3065 dev_warn(acpi_desc->dev, 3066 "failed to insert pmem resource to iomem: %d\n", 3067 rc); 3068 goto out; 3069 } 3070 3071 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 3072 ndr_desc); 3073 if (!nfit_spa->nd_region) 3074 rc = -ENOMEM; 3075 } else if (nfit_spa_is_volatile(spa)) { 3076 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, 3077 ndr_desc); 3078 if (!nfit_spa->nd_region) 3079 rc = -ENOMEM; 3080 } else if (nfit_spa_is_virtual(spa)) { 3081 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 3082 ndr_desc); 3083 if (!nfit_spa->nd_region) 3084 rc = -ENOMEM; 3085 } 3086 3087 out: 3088 if (rc) 3089 dev_err(acpi_desc->dev, "failed to register spa range %d\n", 3090 nfit_spa->spa->range_index); 3091 return rc; 3092 } 3093 3094 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc) 3095 { 3096 struct device *dev = acpi_desc->dev; 3097 struct nd_cmd_ars_status *ars_status; 3098 3099 if (acpi_desc->ars_status) { 3100 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 3101 return 0; 3102 } 3103 3104 ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL); 3105 if (!ars_status) 3106 return -ENOMEM; 3107 acpi_desc->ars_status = ars_status; 3108 return 0; 3109 } 3110 3111 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) 3112 { 3113 int rc; 3114 3115 if (ars_status_alloc(acpi_desc)) 3116 return -ENOMEM; 3117 3118 rc = ars_get_status(acpi_desc); 3119 3120 if (rc < 0 && rc != -ENOSPC) 3121 return rc; 3122 3123 if (ars_status_process_records(acpi_desc)) 3124 dev_err(acpi_desc->dev, "Failed to process ARS records\n"); 3125 3126 return rc; 3127 } 3128 3129 static int ars_register(struct acpi_nfit_desc *acpi_desc, 3130 struct nfit_spa *nfit_spa) 3131 { 3132 int rc; 3133 3134 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3135 return acpi_nfit_register_region(acpi_desc, nfit_spa); 3136 3137 set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); 3138 if (!no_init_ars) 3139 set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); 3140 3141 switch (acpi_nfit_query_poison(acpi_desc)) { 3142 case 0: 3143 case -ENOSPC: 3144 case -EAGAIN: 3145 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); 3146 /* shouldn't happen, try again later */ 3147 if (rc == -EBUSY) 3148 break; 3149 if (rc) { 3150 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3151 break; 3152 } 3153 clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); 3154 rc = acpi_nfit_query_poison(acpi_desc); 3155 if (rc) 3156 break; 3157 acpi_desc->scrub_spa = nfit_spa; 3158 ars_complete(acpi_desc, nfit_spa); 3159 /* 3160 * If ars_complete() says we didn't complete the 3161 * short scrub, we'll try again with a long 3162 * request. 3163 */ 3164 acpi_desc->scrub_spa = NULL; 3165 break; 3166 case -EBUSY: 3167 case -ENOMEM: 3168 /* 3169 * BIOS was using ARS, wait for it to complete (or 3170 * resources to become available) and then perform our 3171 * own scrubs. 3172 */ 3173 break; 3174 default: 3175 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3176 break; 3177 } 3178 3179 return acpi_nfit_register_region(acpi_desc, nfit_spa); 3180 } 3181 3182 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc) 3183 { 3184 struct nfit_spa *nfit_spa; 3185 3186 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3187 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3188 continue; 3189 ars_complete(acpi_desc, nfit_spa); 3190 } 3191 } 3192 3193 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, 3194 int query_rc) 3195 { 3196 unsigned int tmo = acpi_desc->scrub_tmo; 3197 struct device *dev = acpi_desc->dev; 3198 struct nfit_spa *nfit_spa; 3199 3200 lockdep_assert_held(&acpi_desc->init_mutex); 3201 3202 if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) 3203 return 0; 3204 3205 if (query_rc == -EBUSY) { 3206 dev_dbg(dev, "ARS: ARS busy\n"); 3207 return min(30U * 60U, tmo * 2); 3208 } 3209 if (query_rc == -ENOSPC) { 3210 dev_dbg(dev, "ARS: ARS continue\n"); 3211 ars_continue(acpi_desc); 3212 return 1; 3213 } 3214 if (query_rc && query_rc != -EAGAIN) { 3215 unsigned long long addr, end; 3216 3217 addr = acpi_desc->ars_status->address; 3218 end = addr + acpi_desc->ars_status->length; 3219 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end, 3220 query_rc); 3221 } 3222 3223 ars_complete_all(acpi_desc); 3224 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3225 enum nfit_ars_state req_type; 3226 int rc; 3227 3228 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3229 continue; 3230 3231 /* prefer short ARS requests first */ 3232 if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)) 3233 req_type = ARS_REQ_SHORT; 3234 else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) 3235 req_type = ARS_REQ_LONG; 3236 else 3237 continue; 3238 rc = ars_start(acpi_desc, nfit_spa, req_type); 3239 3240 dev = nd_region_dev(nfit_spa->nd_region); 3241 dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n", 3242 nfit_spa->spa->range_index, 3243 req_type == ARS_REQ_SHORT ? "short" : "long", 3244 rc); 3245 /* 3246 * Hmm, we raced someone else starting ARS? Try again in 3247 * a bit. 3248 */ 3249 if (rc == -EBUSY) 3250 return 1; 3251 if (rc == 0) { 3252 dev_WARN_ONCE(dev, acpi_desc->scrub_spa, 3253 "scrub start while range %d active\n", 3254 acpi_desc->scrub_spa->spa->range_index); 3255 clear_bit(req_type, &nfit_spa->ars_state); 3256 acpi_desc->scrub_spa = nfit_spa; 3257 /* 3258 * Consider this spa last for future scrub 3259 * requests 3260 */ 3261 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 3262 return 1; 3263 } 3264 3265 dev_err(dev, "ARS: range %d ARS failed (%d)\n", 3266 nfit_spa->spa->range_index, rc); 3267 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3268 } 3269 return 0; 3270 } 3271 3272 static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) 3273 { 3274 lockdep_assert_held(&acpi_desc->init_mutex); 3275 3276 set_bit(ARS_BUSY, &acpi_desc->scrub_flags); 3277 /* note this should only be set from within the workqueue */ 3278 if (tmo) 3279 acpi_desc->scrub_tmo = tmo; 3280 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); 3281 } 3282 3283 static void sched_ars(struct acpi_nfit_desc *acpi_desc) 3284 { 3285 __sched_ars(acpi_desc, 0); 3286 } 3287 3288 static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) 3289 { 3290 lockdep_assert_held(&acpi_desc->init_mutex); 3291 3292 clear_bit(ARS_BUSY, &acpi_desc->scrub_flags); 3293 acpi_desc->scrub_count++; 3294 if (acpi_desc->scrub_count_state) 3295 sysfs_notify_dirent(acpi_desc->scrub_count_state); 3296 } 3297 3298 static void acpi_nfit_scrub(struct work_struct *work) 3299 { 3300 struct acpi_nfit_desc *acpi_desc; 3301 unsigned int tmo; 3302 int query_rc; 3303 3304 acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); 3305 mutex_lock(&acpi_desc->init_mutex); 3306 query_rc = acpi_nfit_query_poison(acpi_desc); 3307 tmo = __acpi_nfit_scrub(acpi_desc, query_rc); 3308 if (tmo) 3309 __sched_ars(acpi_desc, tmo); 3310 else 3311 notify_ars_done(acpi_desc); 3312 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 3313 clear_bit(ARS_POLL, &acpi_desc->scrub_flags); 3314 mutex_unlock(&acpi_desc->init_mutex); 3315 } 3316 3317 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, 3318 struct nfit_spa *nfit_spa) 3319 { 3320 int type = nfit_spa_type(nfit_spa->spa); 3321 struct nd_cmd_ars_cap ars_cap; 3322 int rc; 3323 3324 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3325 memset(&ars_cap, 0, sizeof(ars_cap)); 3326 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); 3327 if (rc < 0) 3328 return; 3329 /* check that the supported scrub types match the spa type */ 3330 if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16) 3331 & ND_ARS_VOLATILE) == 0) 3332 return; 3333 if (type == NFIT_SPA_PM && ((ars_cap.status >> 16) 3334 & ND_ARS_PERSISTENT) == 0) 3335 return; 3336 3337 nfit_spa->max_ars = ars_cap.max_ars_out; 3338 nfit_spa->clear_err_unit = ars_cap.clear_err_unit; 3339 acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); 3340 clear_bit(ARS_FAILED, &nfit_spa->ars_state); 3341 } 3342 3343 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 3344 { 3345 struct nfit_spa *nfit_spa; 3346 int rc, do_sched_ars = 0; 3347 3348 set_bit(ARS_VALID, &acpi_desc->scrub_flags); 3349 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3350 switch (nfit_spa_type(nfit_spa->spa)) { 3351 case NFIT_SPA_VOLATILE: 3352 case NFIT_SPA_PM: 3353 acpi_nfit_init_ars(acpi_desc, nfit_spa); 3354 break; 3355 } 3356 } 3357 3358 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3359 switch (nfit_spa_type(nfit_spa->spa)) { 3360 case NFIT_SPA_VOLATILE: 3361 case NFIT_SPA_PM: 3362 /* register regions and kick off initial ARS run */ 3363 rc = ars_register(acpi_desc, nfit_spa); 3364 if (rc) 3365 return rc; 3366 3367 /* 3368 * Kick off background ARS if at least one 3369 * region successfully registered ARS 3370 */ 3371 if (!test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3372 do_sched_ars++; 3373 break; 3374 case NFIT_SPA_BDW: 3375 /* nothing to register */ 3376 break; 3377 case NFIT_SPA_DCR: 3378 case NFIT_SPA_VDISK: 3379 case NFIT_SPA_VCD: 3380 case NFIT_SPA_PDISK: 3381 case NFIT_SPA_PCD: 3382 /* register known regions that don't support ARS */ 3383 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 3384 if (rc) 3385 return rc; 3386 break; 3387 default: 3388 /* don't register unknown regions */ 3389 break; 3390 } 3391 } 3392 3393 if (do_sched_ars) 3394 sched_ars(acpi_desc); 3395 return 0; 3396 } 3397 3398 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 3399 struct nfit_table_prev *prev) 3400 { 3401 struct device *dev = acpi_desc->dev; 3402 3403 if (!list_empty(&prev->spas) || 3404 !list_empty(&prev->memdevs) || 3405 !list_empty(&prev->dcrs) || 3406 !list_empty(&prev->bdws) || 3407 !list_empty(&prev->idts) || 3408 !list_empty(&prev->flushes)) { 3409 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 3410 return -ENXIO; 3411 } 3412 return 0; 3413 } 3414 3415 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) 3416 { 3417 struct device *dev = acpi_desc->dev; 3418 struct kernfs_node *nfit; 3419 struct device *bus_dev; 3420 3421 if (!ars_supported(acpi_desc->nvdimm_bus)) 3422 return 0; 3423 3424 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3425 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); 3426 if (!nfit) { 3427 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); 3428 return -ENODEV; 3429 } 3430 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); 3431 sysfs_put(nfit); 3432 if (!acpi_desc->scrub_count_state) { 3433 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); 3434 return -ENODEV; 3435 } 3436 3437 return 0; 3438 } 3439 3440 static void acpi_nfit_unregister(void *data) 3441 { 3442 struct acpi_nfit_desc *acpi_desc = data; 3443 3444 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 3445 } 3446 3447 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) 3448 { 3449 struct device *dev = acpi_desc->dev; 3450 struct nfit_table_prev prev; 3451 const void *end; 3452 int rc; 3453 3454 if (!acpi_desc->nvdimm_bus) { 3455 acpi_nfit_init_dsms(acpi_desc); 3456 3457 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, 3458 &acpi_desc->nd_desc); 3459 if (!acpi_desc->nvdimm_bus) 3460 return -ENOMEM; 3461 3462 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, 3463 acpi_desc); 3464 if (rc) 3465 return rc; 3466 3467 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); 3468 if (rc) 3469 return rc; 3470 3471 /* register this acpi_desc for mce notifications */ 3472 mutex_lock(&acpi_desc_lock); 3473 list_add_tail(&acpi_desc->list, &acpi_descs); 3474 mutex_unlock(&acpi_desc_lock); 3475 } 3476 3477 mutex_lock(&acpi_desc->init_mutex); 3478 3479 INIT_LIST_HEAD(&prev.spas); 3480 INIT_LIST_HEAD(&prev.memdevs); 3481 INIT_LIST_HEAD(&prev.dcrs); 3482 INIT_LIST_HEAD(&prev.bdws); 3483 INIT_LIST_HEAD(&prev.idts); 3484 INIT_LIST_HEAD(&prev.flushes); 3485 3486 list_cut_position(&prev.spas, &acpi_desc->spas, 3487 acpi_desc->spas.prev); 3488 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 3489 acpi_desc->memdevs.prev); 3490 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 3491 acpi_desc->dcrs.prev); 3492 list_cut_position(&prev.bdws, &acpi_desc->bdws, 3493 acpi_desc->bdws.prev); 3494 list_cut_position(&prev.idts, &acpi_desc->idts, 3495 acpi_desc->idts.prev); 3496 list_cut_position(&prev.flushes, &acpi_desc->flushes, 3497 acpi_desc->flushes.prev); 3498 3499 end = data + sz; 3500 while (!IS_ERR_OR_NULL(data)) 3501 data = add_table(acpi_desc, &prev, data, end); 3502 3503 if (IS_ERR(data)) { 3504 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data)); 3505 rc = PTR_ERR(data); 3506 goto out_unlock; 3507 } 3508 3509 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 3510 if (rc) 3511 goto out_unlock; 3512 3513 rc = nfit_mem_init(acpi_desc); 3514 if (rc) 3515 goto out_unlock; 3516 3517 rc = acpi_nfit_register_dimms(acpi_desc); 3518 if (rc) 3519 goto out_unlock; 3520 3521 rc = acpi_nfit_register_regions(acpi_desc); 3522 3523 out_unlock: 3524 mutex_unlock(&acpi_desc->init_mutex); 3525 return rc; 3526 } 3527 EXPORT_SYMBOL_GPL(acpi_nfit_init); 3528 3529 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3530 { 3531 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 3532 struct device *dev = acpi_desc->dev; 3533 3534 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3535 nfit_device_lock(dev); 3536 nfit_device_unlock(dev); 3537 3538 /* Bounce the init_mutex to complete initial registration */ 3539 mutex_lock(&acpi_desc->init_mutex); 3540 mutex_unlock(&acpi_desc->init_mutex); 3541 3542 return 0; 3543 } 3544 3545 static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3546 struct nvdimm *nvdimm, unsigned int cmd) 3547 { 3548 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 3549 3550 if (nvdimm) 3551 return 0; 3552 if (cmd != ND_CMD_ARS_START) 3553 return 0; 3554 3555 /* 3556 * The kernel and userspace may race to initiate a scrub, but 3557 * the scrub thread is prepared to lose that initial race. It 3558 * just needs guarantees that any ARS it initiates are not 3559 * interrupted by any intervening start requests from userspace. 3560 */ 3561 if (work_busy(&acpi_desc->dwork.work)) 3562 return -EBUSY; 3563 3564 return 0; 3565 } 3566 3567 /* 3568 * Prevent security and firmware activate commands from being issued via 3569 * ioctl. 3570 */ 3571 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3572 struct nvdimm *nvdimm, unsigned int cmd, void *buf) 3573 { 3574 struct nd_cmd_pkg *call_pkg = buf; 3575 unsigned int func; 3576 3577 if (nvdimm && cmd == ND_CMD_CALL && 3578 call_pkg->nd_family == NVDIMM_FAMILY_INTEL) { 3579 func = call_pkg->nd_command; 3580 if (func > NVDIMM_CMD_MAX || 3581 (1 << func) & NVDIMM_INTEL_DENY_CMDMASK) 3582 return -EOPNOTSUPP; 3583 } 3584 3585 /* block all non-nfit bus commands */ 3586 if (!nvdimm && cmd == ND_CMD_CALL && 3587 call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT) 3588 return -EOPNOTSUPP; 3589 3590 return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd); 3591 } 3592 3593 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, 3594 enum nfit_ars_state req_type) 3595 { 3596 struct device *dev = acpi_desc->dev; 3597 int scheduled = 0, busy = 0; 3598 struct nfit_spa *nfit_spa; 3599 3600 mutex_lock(&acpi_desc->init_mutex); 3601 if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) { 3602 mutex_unlock(&acpi_desc->init_mutex); 3603 return 0; 3604 } 3605 3606 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3607 int type = nfit_spa_type(nfit_spa->spa); 3608 3609 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE) 3610 continue; 3611 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3612 continue; 3613 3614 if (test_and_set_bit(req_type, &nfit_spa->ars_state)) 3615 busy++; 3616 else 3617 scheduled++; 3618 } 3619 if (scheduled) { 3620 sched_ars(acpi_desc); 3621 dev_dbg(dev, "ars_scan triggered\n"); 3622 } 3623 mutex_unlock(&acpi_desc->init_mutex); 3624 3625 if (scheduled) 3626 return 0; 3627 if (busy) 3628 return -EBUSY; 3629 return -ENOTTY; 3630 } 3631 3632 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 3633 { 3634 struct nvdimm_bus_descriptor *nd_desc; 3635 3636 dev_set_drvdata(dev, acpi_desc); 3637 acpi_desc->dev = dev; 3638 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 3639 nd_desc = &acpi_desc->nd_desc; 3640 nd_desc->provider_name = "ACPI.NFIT"; 3641 nd_desc->module = THIS_MODULE; 3642 nd_desc->ndctl = acpi_nfit_ctl; 3643 nd_desc->flush_probe = acpi_nfit_flush_probe; 3644 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 3645 nd_desc->attr_groups = acpi_nfit_attribute_groups; 3646 3647 INIT_LIST_HEAD(&acpi_desc->spas); 3648 INIT_LIST_HEAD(&acpi_desc->dcrs); 3649 INIT_LIST_HEAD(&acpi_desc->bdws); 3650 INIT_LIST_HEAD(&acpi_desc->idts); 3651 INIT_LIST_HEAD(&acpi_desc->flushes); 3652 INIT_LIST_HEAD(&acpi_desc->memdevs); 3653 INIT_LIST_HEAD(&acpi_desc->dimms); 3654 INIT_LIST_HEAD(&acpi_desc->list); 3655 mutex_init(&acpi_desc->init_mutex); 3656 acpi_desc->scrub_tmo = 1; 3657 INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub); 3658 } 3659 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); 3660 3661 static void acpi_nfit_put_table(void *table) 3662 { 3663 acpi_put_table(table); 3664 } 3665 3666 void acpi_nfit_shutdown(void *data) 3667 { 3668 struct acpi_nfit_desc *acpi_desc = data; 3669 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3670 3671 /* 3672 * Destruct under acpi_desc_lock so that nfit_handle_mce does not 3673 * race teardown 3674 */ 3675 mutex_lock(&acpi_desc_lock); 3676 list_del(&acpi_desc->list); 3677 mutex_unlock(&acpi_desc_lock); 3678 3679 mutex_lock(&acpi_desc->init_mutex); 3680 set_bit(ARS_CANCEL, &acpi_desc->scrub_flags); 3681 cancel_delayed_work_sync(&acpi_desc->dwork); 3682 mutex_unlock(&acpi_desc->init_mutex); 3683 3684 /* 3685 * Bounce the nvdimm bus lock to make sure any in-flight 3686 * acpi_nfit_ars_rescan() submissions have had a chance to 3687 * either submit or see ->cancel set. 3688 */ 3689 nfit_device_lock(bus_dev); 3690 nfit_device_unlock(bus_dev); 3691 3692 flush_workqueue(nfit_wq); 3693 } 3694 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); 3695 3696 static int acpi_nfit_add(struct acpi_device *adev) 3697 { 3698 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3699 struct acpi_nfit_desc *acpi_desc; 3700 struct device *dev = &adev->dev; 3701 struct acpi_table_header *tbl; 3702 acpi_status status = AE_OK; 3703 acpi_size sz; 3704 int rc = 0; 3705 3706 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); 3707 if (ACPI_FAILURE(status)) { 3708 /* The NVDIMM root device allows OS to trigger enumeration of 3709 * NVDIMMs through NFIT at boot time and re-enumeration at 3710 * root level via the _FIT method during runtime. 3711 * This is ok to return 0 here, we could have an nvdimm 3712 * hotplugged later and evaluate _FIT method which returns 3713 * data in the format of a series of NFIT Structures. 3714 */ 3715 dev_dbg(dev, "failed to find NFIT at startup\n"); 3716 return 0; 3717 } 3718 3719 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); 3720 if (rc) 3721 return rc; 3722 sz = tbl->length; 3723 3724 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3725 if (!acpi_desc) 3726 return -ENOMEM; 3727 acpi_nfit_desc_init(acpi_desc, &adev->dev); 3728 3729 /* Save the acpi header for exporting the revision via sysfs */ 3730 acpi_desc->acpi_header = *tbl; 3731 3732 /* Evaluate _FIT and override with that if present */ 3733 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 3734 if (ACPI_SUCCESS(status) && buf.length > 0) { 3735 union acpi_object *obj = buf.pointer; 3736 3737 if (obj->type == ACPI_TYPE_BUFFER) 3738 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3739 obj->buffer.length); 3740 else 3741 dev_dbg(dev, "invalid type %d, ignoring _FIT\n", 3742 (int) obj->type); 3743 kfree(buf.pointer); 3744 } else 3745 /* skip over the lead-in header table */ 3746 rc = acpi_nfit_init(acpi_desc, (void *) tbl 3747 + sizeof(struct acpi_table_nfit), 3748 sz - sizeof(struct acpi_table_nfit)); 3749 3750 if (rc) 3751 return rc; 3752 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); 3753 } 3754 3755 static int acpi_nfit_remove(struct acpi_device *adev) 3756 { 3757 /* see acpi_nfit_unregister */ 3758 return 0; 3759 } 3760 3761 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) 3762 { 3763 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3764 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3765 union acpi_object *obj; 3766 acpi_status status; 3767 int ret; 3768 3769 if (!dev->driver) { 3770 /* dev->driver may be null if we're being removed */ 3771 dev_dbg(dev, "no driver found for dev\n"); 3772 return; 3773 } 3774 3775 if (!acpi_desc) { 3776 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3777 if (!acpi_desc) 3778 return; 3779 acpi_nfit_desc_init(acpi_desc, dev); 3780 } else { 3781 /* 3782 * Finish previous registration before considering new 3783 * regions. 3784 */ 3785 flush_workqueue(nfit_wq); 3786 } 3787 3788 /* Evaluate _FIT */ 3789 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); 3790 if (ACPI_FAILURE(status)) { 3791 dev_err(dev, "failed to evaluate _FIT\n"); 3792 return; 3793 } 3794 3795 obj = buf.pointer; 3796 if (obj->type == ACPI_TYPE_BUFFER) { 3797 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3798 obj->buffer.length); 3799 if (ret) 3800 dev_err(dev, "failed to merge updated NFIT\n"); 3801 } else 3802 dev_err(dev, "Invalid _FIT\n"); 3803 kfree(buf.pointer); 3804 } 3805 3806 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) 3807 { 3808 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3809 3810 if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) 3811 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); 3812 else 3813 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT); 3814 } 3815 3816 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) 3817 { 3818 dev_dbg(dev, "event: 0x%x\n", event); 3819 3820 switch (event) { 3821 case NFIT_NOTIFY_UPDATE: 3822 return acpi_nfit_update_notify(dev, handle); 3823 case NFIT_NOTIFY_UC_MEMORY_ERROR: 3824 return acpi_nfit_uc_error_notify(dev, handle); 3825 default: 3826 return; 3827 } 3828 } 3829 EXPORT_SYMBOL_GPL(__acpi_nfit_notify); 3830 3831 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 3832 { 3833 nfit_device_lock(&adev->dev); 3834 __acpi_nfit_notify(&adev->dev, adev->handle, event); 3835 nfit_device_unlock(&adev->dev); 3836 } 3837 3838 static const struct acpi_device_id acpi_nfit_ids[] = { 3839 { "ACPI0012", 0 }, 3840 { "", 0 }, 3841 }; 3842 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 3843 3844 static struct acpi_driver acpi_nfit_driver = { 3845 .name = KBUILD_MODNAME, 3846 .ids = acpi_nfit_ids, 3847 .ops = { 3848 .add = acpi_nfit_add, 3849 .remove = acpi_nfit_remove, 3850 .notify = acpi_nfit_notify, 3851 }, 3852 }; 3853 3854 static __init int nfit_init(void) 3855 { 3856 int ret; 3857 3858 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 3859 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 64); 3860 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 3861 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 3862 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 3863 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 3864 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 3865 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); 3866 3867 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]); 3868 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]); 3869 guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]); 3870 guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]); 3871 guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]); 3872 guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]); 3873 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]); 3874 guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]); 3875 guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]); 3876 guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]); 3877 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 3878 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 3879 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); 3880 guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]); 3881 guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]); 3882 3883 nfit_wq = create_singlethread_workqueue("nfit"); 3884 if (!nfit_wq) 3885 return -ENOMEM; 3886 3887 nfit_mce_register(); 3888 ret = acpi_bus_register_driver(&acpi_nfit_driver); 3889 if (ret) { 3890 nfit_mce_unregister(); 3891 destroy_workqueue(nfit_wq); 3892 } 3893 3894 return ret; 3895 3896 } 3897 3898 static __exit void nfit_exit(void) 3899 { 3900 nfit_mce_unregister(); 3901 acpi_bus_unregister_driver(&acpi_nfit_driver); 3902 destroy_workqueue(nfit_wq); 3903 WARN_ON(!list_empty(&acpi_descs)); 3904 } 3905 3906 module_init(nfit_init); 3907 module_exit(nfit_exit); 3908 MODULE_LICENSE("GPL v2"); 3909 MODULE_AUTHOR("Intel Corporation"); 3910