1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/libnvdimm.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/ndctl.h> 18 #include <linux/sysfs.h> 19 #include <linux/delay.h> 20 #include <linux/list.h> 21 #include <linux/acpi.h> 22 #include <linux/sort.h> 23 #include <linux/io.h> 24 #include <linux/nd.h> 25 #include <asm/cacheflush.h> 26 #include <acpi/nfit.h> 27 #include "intel.h" 28 #include "nfit.h" 29 30 /* 31 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 32 * irrelevant. 33 */ 34 #include <linux/io-64-nonatomic-hi-lo.h> 35 36 static bool force_enable_dimms; 37 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 38 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 39 40 static bool disable_vendor_specific; 41 module_param(disable_vendor_specific, bool, S_IRUGO); 42 MODULE_PARM_DESC(disable_vendor_specific, 43 "Limit commands to the publicly specified set"); 44 45 static unsigned long override_dsm_mask; 46 module_param(override_dsm_mask, ulong, S_IRUGO); 47 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); 48 49 static int default_dsm_family = -1; 50 module_param(default_dsm_family, int, S_IRUGO); 51 MODULE_PARM_DESC(default_dsm_family, 52 "Try this DSM type first when identifying NVDIMM family"); 53 54 static bool no_init_ars; 55 module_param(no_init_ars, bool, 0644); 56 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); 57 58 LIST_HEAD(acpi_descs); 59 DEFINE_MUTEX(acpi_desc_lock); 60 61 static struct workqueue_struct *nfit_wq; 62 63 struct nfit_table_prev { 64 struct list_head spas; 65 struct list_head memdevs; 66 struct list_head dcrs; 67 struct list_head bdws; 68 struct list_head idts; 69 struct list_head flushes; 70 }; 71 72 static guid_t nfit_uuid[NFIT_UUID_MAX]; 73 74 const guid_t *to_nfit_uuid(enum nfit_uuids id) 75 { 76 return &nfit_uuid[id]; 77 } 78 EXPORT_SYMBOL(to_nfit_uuid); 79 80 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 81 { 82 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 83 84 /* 85 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 86 * acpi_device. 87 */ 88 if (!nd_desc->provider_name 89 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 90 return NULL; 91 92 return to_acpi_device(acpi_desc->dev); 93 } 94 95 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) 96 { 97 struct nd_cmd_clear_error *clear_err; 98 struct nd_cmd_ars_status *ars_status; 99 u16 flags; 100 101 switch (cmd) { 102 case ND_CMD_ARS_CAP: 103 if ((status & 0xffff) == NFIT_ARS_CAP_NONE) 104 return -ENOTTY; 105 106 /* Command failed */ 107 if (status & 0xffff) 108 return -EIO; 109 110 /* No supported scan types for this range */ 111 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 112 if ((status >> 16 & flags) == 0) 113 return -ENOTTY; 114 return 0; 115 case ND_CMD_ARS_START: 116 /* ARS is in progress */ 117 if ((status & 0xffff) == NFIT_ARS_START_BUSY) 118 return -EBUSY; 119 120 /* Command failed */ 121 if (status & 0xffff) 122 return -EIO; 123 return 0; 124 case ND_CMD_ARS_STATUS: 125 ars_status = buf; 126 /* Command failed */ 127 if (status & 0xffff) 128 return -EIO; 129 /* Check extended status (Upper two bytes) */ 130 if (status == NFIT_ARS_STATUS_DONE) 131 return 0; 132 133 /* ARS is in progress */ 134 if (status == NFIT_ARS_STATUS_BUSY) 135 return -EBUSY; 136 137 /* No ARS performed for the current boot */ 138 if (status == NFIT_ARS_STATUS_NONE) 139 return -EAGAIN; 140 141 /* 142 * ARS interrupted, either we overflowed or some other 143 * agent wants the scan to stop. If we didn't overflow 144 * then just continue with the returned results. 145 */ 146 if (status == NFIT_ARS_STATUS_INTR) { 147 if (ars_status->out_length >= 40 && (ars_status->flags 148 & NFIT_ARS_F_OVERFLOW)) 149 return -ENOSPC; 150 return 0; 151 } 152 153 /* Unknown status */ 154 if (status >> 16) 155 return -EIO; 156 return 0; 157 case ND_CMD_CLEAR_ERROR: 158 clear_err = buf; 159 if (status & 0xffff) 160 return -EIO; 161 if (!clear_err->cleared) 162 return -EIO; 163 if (clear_err->length > clear_err->cleared) 164 return clear_err->cleared; 165 return 0; 166 default: 167 break; 168 } 169 170 /* all other non-zero status results in an error */ 171 if (status) 172 return -EIO; 173 return 0; 174 } 175 176 #define ACPI_LABELS_LOCKED 3 177 178 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 179 u32 status) 180 { 181 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 182 183 switch (cmd) { 184 case ND_CMD_GET_CONFIG_SIZE: 185 /* 186 * In the _LSI, _LSR, _LSW case the locked status is 187 * communicated via the read/write commands 188 */ 189 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) 190 break; 191 192 if (status >> 16 & ND_CONFIG_LOCKED) 193 return -EACCES; 194 break; 195 case ND_CMD_GET_CONFIG_DATA: 196 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) 197 && status == ACPI_LABELS_LOCKED) 198 return -EACCES; 199 break; 200 case ND_CMD_SET_CONFIG_DATA: 201 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags) 202 && status == ACPI_LABELS_LOCKED) 203 return -EACCES; 204 break; 205 default: 206 break; 207 } 208 209 /* all other non-zero status results in an error */ 210 if (status) 211 return -EIO; 212 return 0; 213 } 214 215 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 216 u32 status) 217 { 218 if (!nvdimm) 219 return xlat_bus_status(buf, cmd, status); 220 return xlat_nvdimm_status(nvdimm, buf, cmd, status); 221 } 222 223 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */ 224 static union acpi_object *pkg_to_buf(union acpi_object *pkg) 225 { 226 int i; 227 void *dst; 228 size_t size = 0; 229 union acpi_object *buf = NULL; 230 231 if (pkg->type != ACPI_TYPE_PACKAGE) { 232 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 233 pkg->type); 234 goto err; 235 } 236 237 for (i = 0; i < pkg->package.count; i++) { 238 union acpi_object *obj = &pkg->package.elements[i]; 239 240 if (obj->type == ACPI_TYPE_INTEGER) 241 size += 4; 242 else if (obj->type == ACPI_TYPE_BUFFER) 243 size += obj->buffer.length; 244 else { 245 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 246 obj->type); 247 goto err; 248 } 249 } 250 251 buf = ACPI_ALLOCATE(sizeof(*buf) + size); 252 if (!buf) 253 goto err; 254 255 dst = buf + 1; 256 buf->type = ACPI_TYPE_BUFFER; 257 buf->buffer.length = size; 258 buf->buffer.pointer = dst; 259 for (i = 0; i < pkg->package.count; i++) { 260 union acpi_object *obj = &pkg->package.elements[i]; 261 262 if (obj->type == ACPI_TYPE_INTEGER) { 263 memcpy(dst, &obj->integer.value, 4); 264 dst += 4; 265 } else if (obj->type == ACPI_TYPE_BUFFER) { 266 memcpy(dst, obj->buffer.pointer, obj->buffer.length); 267 dst += obj->buffer.length; 268 } 269 } 270 err: 271 ACPI_FREE(pkg); 272 return buf; 273 } 274 275 static union acpi_object *int_to_buf(union acpi_object *integer) 276 { 277 union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4); 278 void *dst = NULL; 279 280 if (!buf) 281 goto err; 282 283 if (integer->type != ACPI_TYPE_INTEGER) { 284 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 285 integer->type); 286 goto err; 287 } 288 289 dst = buf + 1; 290 buf->type = ACPI_TYPE_BUFFER; 291 buf->buffer.length = 4; 292 buf->buffer.pointer = dst; 293 memcpy(dst, &integer->integer.value, 4); 294 err: 295 ACPI_FREE(integer); 296 return buf; 297 } 298 299 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset, 300 u32 len, void *data) 301 { 302 acpi_status rc; 303 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 304 struct acpi_object_list input = { 305 .count = 3, 306 .pointer = (union acpi_object []) { 307 [0] = { 308 .integer.type = ACPI_TYPE_INTEGER, 309 .integer.value = offset, 310 }, 311 [1] = { 312 .integer.type = ACPI_TYPE_INTEGER, 313 .integer.value = len, 314 }, 315 [2] = { 316 .buffer.type = ACPI_TYPE_BUFFER, 317 .buffer.pointer = data, 318 .buffer.length = len, 319 }, 320 }, 321 }; 322 323 rc = acpi_evaluate_object(handle, "_LSW", &input, &buf); 324 if (ACPI_FAILURE(rc)) 325 return NULL; 326 return int_to_buf(buf.pointer); 327 } 328 329 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset, 330 u32 len) 331 { 332 acpi_status rc; 333 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 334 struct acpi_object_list input = { 335 .count = 2, 336 .pointer = (union acpi_object []) { 337 [0] = { 338 .integer.type = ACPI_TYPE_INTEGER, 339 .integer.value = offset, 340 }, 341 [1] = { 342 .integer.type = ACPI_TYPE_INTEGER, 343 .integer.value = len, 344 }, 345 }, 346 }; 347 348 rc = acpi_evaluate_object(handle, "_LSR", &input, &buf); 349 if (ACPI_FAILURE(rc)) 350 return NULL; 351 return pkg_to_buf(buf.pointer); 352 } 353 354 static union acpi_object *acpi_label_info(acpi_handle handle) 355 { 356 acpi_status rc; 357 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 358 359 rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf); 360 if (ACPI_FAILURE(rc)) 361 return NULL; 362 return pkg_to_buf(buf.pointer); 363 } 364 365 static u8 nfit_dsm_revid(unsigned family, unsigned func) 366 { 367 static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { 368 [NVDIMM_FAMILY_INTEL] = { 369 [NVDIMM_INTEL_GET_MODES] = 2, 370 [NVDIMM_INTEL_GET_FWINFO] = 2, 371 [NVDIMM_INTEL_START_FWUPDATE] = 2, 372 [NVDIMM_INTEL_SEND_FWUPDATE] = 2, 373 [NVDIMM_INTEL_FINISH_FWUPDATE] = 2, 374 [NVDIMM_INTEL_QUERY_FWUPDATE] = 2, 375 [NVDIMM_INTEL_SET_THRESHOLD] = 2, 376 [NVDIMM_INTEL_INJECT_ERROR] = 2, 377 [NVDIMM_INTEL_GET_SECURITY_STATE] = 2, 378 [NVDIMM_INTEL_SET_PASSPHRASE] = 2, 379 [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2, 380 [NVDIMM_INTEL_UNLOCK_UNIT] = 2, 381 [NVDIMM_INTEL_FREEZE_LOCK] = 2, 382 [NVDIMM_INTEL_SECURE_ERASE] = 2, 383 [NVDIMM_INTEL_OVERWRITE] = 2, 384 [NVDIMM_INTEL_QUERY_OVERWRITE] = 2, 385 [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2, 386 [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2, 387 }, 388 }; 389 u8 id; 390 391 if (family > NVDIMM_FAMILY_MAX) 392 return 0; 393 if (func > 31) 394 return 0; 395 id = revid_table[family][func]; 396 if (id == 0) 397 return 1; /* default */ 398 return id; 399 } 400 401 static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) 402 { 403 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 404 405 if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL 406 && func >= NVDIMM_INTEL_GET_SECURITY_STATE 407 && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE) 408 return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG); 409 return true; 410 } 411 412 static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, 413 struct nd_cmd_pkg *call_pkg) 414 { 415 if (call_pkg) { 416 int i; 417 418 if (nfit_mem->family != call_pkg->nd_family) 419 return -ENOTTY; 420 421 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) 422 if (call_pkg->nd_reserved2[i]) 423 return -EINVAL; 424 return call_pkg->nd_command; 425 } 426 427 /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ 428 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) 429 return cmd; 430 431 /* 432 * Force function number validation to fail since 0 is never 433 * published as a valid function in dsm_mask. 434 */ 435 return 0; 436 } 437 438 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 439 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 440 { 441 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 442 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 443 union acpi_object in_obj, in_buf, *out_obj; 444 const struct nd_cmd_desc *desc = NULL; 445 struct device *dev = acpi_desc->dev; 446 struct nd_cmd_pkg *call_pkg = NULL; 447 const char *cmd_name, *dimm_name; 448 unsigned long cmd_mask, dsm_mask; 449 u32 offset, fw_status = 0; 450 acpi_handle handle; 451 const guid_t *guid; 452 int func, rc, i; 453 454 if (cmd_rc) 455 *cmd_rc = -EINVAL; 456 457 if (nvdimm) { 458 struct acpi_device *adev = nfit_mem->adev; 459 460 if (!adev) 461 return -ENOTTY; 462 463 if (cmd == ND_CMD_CALL) 464 call_pkg = buf; 465 func = cmd_to_func(nfit_mem, cmd, call_pkg); 466 if (func < 0) 467 return func; 468 dimm_name = nvdimm_name(nvdimm); 469 cmd_name = nvdimm_cmd_name(cmd); 470 cmd_mask = nvdimm_cmd_mask(nvdimm); 471 dsm_mask = nfit_mem->dsm_mask; 472 desc = nd_cmd_dimm_desc(cmd); 473 guid = to_nfit_uuid(nfit_mem->family); 474 handle = adev->handle; 475 } else { 476 struct acpi_device *adev = to_acpi_dev(acpi_desc); 477 478 func = cmd; 479 cmd_name = nvdimm_bus_cmd_name(cmd); 480 cmd_mask = nd_desc->cmd_mask; 481 dsm_mask = cmd_mask; 482 if (cmd == ND_CMD_CALL) 483 dsm_mask = nd_desc->bus_dsm_mask; 484 desc = nd_cmd_bus_desc(cmd); 485 guid = to_nfit_uuid(NFIT_DEV_BUS); 486 handle = adev->handle; 487 dimm_name = "bus"; 488 } 489 490 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 491 return -ENOTTY; 492 493 /* 494 * Check for a valid command. For ND_CMD_CALL, we also have to 495 * make sure that the DSM function is supported. 496 */ 497 if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask)) 498 return -ENOTTY; 499 else if (!test_bit(cmd, &cmd_mask)) 500 return -ENOTTY; 501 502 in_obj.type = ACPI_TYPE_PACKAGE; 503 in_obj.package.count = 1; 504 in_obj.package.elements = &in_buf; 505 in_buf.type = ACPI_TYPE_BUFFER; 506 in_buf.buffer.pointer = buf; 507 in_buf.buffer.length = 0; 508 509 /* libnvdimm has already validated the input envelope */ 510 for (i = 0; i < desc->in_num; i++) 511 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 512 i, buf); 513 514 if (call_pkg) { 515 /* skip over package wrapper */ 516 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; 517 in_buf.buffer.length = call_pkg->nd_size_in; 518 } 519 520 dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n", 521 dimm_name, cmd, func, in_buf.buffer.length); 522 if (payload_dumpable(nvdimm, func)) 523 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, 524 in_buf.buffer.pointer, 525 min_t(u32, 256, in_buf.buffer.length), true); 526 527 /* call the BIOS, prefer the named methods over _DSM if available */ 528 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE 529 && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) 530 out_obj = acpi_label_info(handle); 531 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA 532 && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { 533 struct nd_cmd_get_config_data_hdr *p = buf; 534 535 out_obj = acpi_label_read(handle, p->in_offset, p->in_length); 536 } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA 537 && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) { 538 struct nd_cmd_set_config_hdr *p = buf; 539 540 out_obj = acpi_label_write(handle, p->in_offset, p->in_length, 541 p->in_buf); 542 } else { 543 u8 revid; 544 545 if (nvdimm) 546 revid = nfit_dsm_revid(nfit_mem->family, func); 547 else 548 revid = 1; 549 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 550 } 551 552 if (!out_obj) { 553 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name); 554 return -EINVAL; 555 } 556 557 if (call_pkg) { 558 call_pkg->nd_fw_size = out_obj->buffer.length; 559 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, 560 out_obj->buffer.pointer, 561 min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); 562 563 ACPI_FREE(out_obj); 564 /* 565 * Need to support FW function w/o known size in advance. 566 * Caller can determine required size based upon nd_fw_size. 567 * If we return an error (like elsewhere) then caller wouldn't 568 * be able to rely upon data returned to make calculation. 569 */ 570 if (cmd_rc) 571 *cmd_rc = 0; 572 return 0; 573 } 574 575 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 576 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", 577 dimm_name, cmd_name, out_obj->type); 578 rc = -EINVAL; 579 goto out; 580 } 581 582 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, 583 cmd_name, out_obj->buffer.length); 584 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, 585 out_obj->buffer.pointer, 586 min_t(u32, 128, out_obj->buffer.length), true); 587 588 for (i = 0, offset = 0; i < desc->out_num; i++) { 589 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 590 (u32 *) out_obj->buffer.pointer, 591 out_obj->buffer.length - offset); 592 593 if (offset + out_size > out_obj->buffer.length) { 594 dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n", 595 dimm_name, cmd_name, i); 596 break; 597 } 598 599 if (in_buf.buffer.length + offset + out_size > buf_len) { 600 dev_dbg(dev, "%s output overrun cmd: %s field: %d\n", 601 dimm_name, cmd_name, i); 602 rc = -ENXIO; 603 goto out; 604 } 605 memcpy(buf + in_buf.buffer.length + offset, 606 out_obj->buffer.pointer + offset, out_size); 607 offset += out_size; 608 } 609 610 /* 611 * Set fw_status for all the commands with a known format to be 612 * later interpreted by xlat_status(). 613 */ 614 if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP 615 && cmd <= ND_CMD_CLEAR_ERROR) 616 || (nvdimm && cmd >= ND_CMD_SMART 617 && cmd <= ND_CMD_VENDOR))) 618 fw_status = *(u32 *) out_obj->buffer.pointer; 619 620 if (offset + in_buf.buffer.length < buf_len) { 621 if (i >= 1) { 622 /* 623 * status valid, return the number of bytes left 624 * unfilled in the output buffer 625 */ 626 rc = buf_len - offset - in_buf.buffer.length; 627 if (cmd_rc) 628 *cmd_rc = xlat_status(nvdimm, buf, cmd, 629 fw_status); 630 } else { 631 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 632 __func__, dimm_name, cmd_name, buf_len, 633 offset); 634 rc = -ENXIO; 635 } 636 } else { 637 rc = 0; 638 if (cmd_rc) 639 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); 640 } 641 642 out: 643 ACPI_FREE(out_obj); 644 645 return rc; 646 } 647 EXPORT_SYMBOL_GPL(acpi_nfit_ctl); 648 649 static const char *spa_type_name(u16 type) 650 { 651 static const char *to_name[] = { 652 [NFIT_SPA_VOLATILE] = "volatile", 653 [NFIT_SPA_PM] = "pmem", 654 [NFIT_SPA_DCR] = "dimm-control-region", 655 [NFIT_SPA_BDW] = "block-data-window", 656 [NFIT_SPA_VDISK] = "volatile-disk", 657 [NFIT_SPA_VCD] = "volatile-cd", 658 [NFIT_SPA_PDISK] = "persistent-disk", 659 [NFIT_SPA_PCD] = "persistent-cd", 660 661 }; 662 663 if (type > NFIT_SPA_PCD) 664 return "unknown"; 665 666 return to_name[type]; 667 } 668 669 int nfit_spa_type(struct acpi_nfit_system_address *spa) 670 { 671 int i; 672 673 for (i = 0; i < NFIT_UUID_MAX; i++) 674 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid)) 675 return i; 676 return -1; 677 } 678 679 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 680 struct nfit_table_prev *prev, 681 struct acpi_nfit_system_address *spa) 682 { 683 struct device *dev = acpi_desc->dev; 684 struct nfit_spa *nfit_spa; 685 686 if (spa->header.length != sizeof(*spa)) 687 return false; 688 689 list_for_each_entry(nfit_spa, &prev->spas, list) { 690 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 691 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 692 return true; 693 } 694 } 695 696 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), 697 GFP_KERNEL); 698 if (!nfit_spa) 699 return false; 700 INIT_LIST_HEAD(&nfit_spa->list); 701 memcpy(nfit_spa->spa, spa, sizeof(*spa)); 702 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 703 dev_dbg(dev, "spa index: %d type: %s\n", 704 spa->range_index, 705 spa_type_name(nfit_spa_type(spa))); 706 return true; 707 } 708 709 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 710 struct nfit_table_prev *prev, 711 struct acpi_nfit_memory_map *memdev) 712 { 713 struct device *dev = acpi_desc->dev; 714 struct nfit_memdev *nfit_memdev; 715 716 if (memdev->header.length != sizeof(*memdev)) 717 return false; 718 719 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 720 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 721 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 722 return true; 723 } 724 725 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), 726 GFP_KERNEL); 727 if (!nfit_memdev) 728 return false; 729 INIT_LIST_HEAD(&nfit_memdev->list); 730 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); 731 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 732 dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n", 733 memdev->device_handle, memdev->range_index, 734 memdev->region_index, memdev->flags); 735 return true; 736 } 737 738 int nfit_get_smbios_id(u32 device_handle, u16 *flags) 739 { 740 struct acpi_nfit_memory_map *memdev; 741 struct acpi_nfit_desc *acpi_desc; 742 struct nfit_mem *nfit_mem; 743 u16 physical_id; 744 745 mutex_lock(&acpi_desc_lock); 746 list_for_each_entry(acpi_desc, &acpi_descs, list) { 747 mutex_lock(&acpi_desc->init_mutex); 748 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 749 memdev = __to_nfit_memdev(nfit_mem); 750 if (memdev->device_handle == device_handle) { 751 *flags = memdev->flags; 752 physical_id = memdev->physical_id; 753 mutex_unlock(&acpi_desc->init_mutex); 754 mutex_unlock(&acpi_desc_lock); 755 return physical_id; 756 } 757 } 758 mutex_unlock(&acpi_desc->init_mutex); 759 } 760 mutex_unlock(&acpi_desc_lock); 761 762 return -ENODEV; 763 } 764 EXPORT_SYMBOL_GPL(nfit_get_smbios_id); 765 766 /* 767 * An implementation may provide a truncated control region if no block windows 768 * are defined. 769 */ 770 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) 771 { 772 if (dcr->header.length < offsetof(struct acpi_nfit_control_region, 773 window_size)) 774 return 0; 775 if (dcr->windows) 776 return sizeof(*dcr); 777 return offsetof(struct acpi_nfit_control_region, window_size); 778 } 779 780 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 781 struct nfit_table_prev *prev, 782 struct acpi_nfit_control_region *dcr) 783 { 784 struct device *dev = acpi_desc->dev; 785 struct nfit_dcr *nfit_dcr; 786 787 if (!sizeof_dcr(dcr)) 788 return false; 789 790 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 791 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { 792 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 793 return true; 794 } 795 796 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), 797 GFP_KERNEL); 798 if (!nfit_dcr) 799 return false; 800 INIT_LIST_HEAD(&nfit_dcr->list); 801 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); 802 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 803 dev_dbg(dev, "dcr index: %d windows: %d\n", 804 dcr->region_index, dcr->windows); 805 return true; 806 } 807 808 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 809 struct nfit_table_prev *prev, 810 struct acpi_nfit_data_region *bdw) 811 { 812 struct device *dev = acpi_desc->dev; 813 struct nfit_bdw *nfit_bdw; 814 815 if (bdw->header.length != sizeof(*bdw)) 816 return false; 817 list_for_each_entry(nfit_bdw, &prev->bdws, list) 818 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 819 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 820 return true; 821 } 822 823 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), 824 GFP_KERNEL); 825 if (!nfit_bdw) 826 return false; 827 INIT_LIST_HEAD(&nfit_bdw->list); 828 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); 829 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 830 dev_dbg(dev, "bdw dcr: %d windows: %d\n", 831 bdw->region_index, bdw->windows); 832 return true; 833 } 834 835 static size_t sizeof_idt(struct acpi_nfit_interleave *idt) 836 { 837 if (idt->header.length < sizeof(*idt)) 838 return 0; 839 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); 840 } 841 842 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 843 struct nfit_table_prev *prev, 844 struct acpi_nfit_interleave *idt) 845 { 846 struct device *dev = acpi_desc->dev; 847 struct nfit_idt *nfit_idt; 848 849 if (!sizeof_idt(idt)) 850 return false; 851 852 list_for_each_entry(nfit_idt, &prev->idts, list) { 853 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) 854 continue; 855 856 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { 857 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 858 return true; 859 } 860 } 861 862 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), 863 GFP_KERNEL); 864 if (!nfit_idt) 865 return false; 866 INIT_LIST_HEAD(&nfit_idt->list); 867 memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); 868 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 869 dev_dbg(dev, "idt index: %d num_lines: %d\n", 870 idt->interleave_index, idt->line_count); 871 return true; 872 } 873 874 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) 875 { 876 if (flush->header.length < sizeof(*flush)) 877 return 0; 878 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); 879 } 880 881 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 882 struct nfit_table_prev *prev, 883 struct acpi_nfit_flush_address *flush) 884 { 885 struct device *dev = acpi_desc->dev; 886 struct nfit_flush *nfit_flush; 887 888 if (!sizeof_flush(flush)) 889 return false; 890 891 list_for_each_entry(nfit_flush, &prev->flushes, list) { 892 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) 893 continue; 894 895 if (memcmp(nfit_flush->flush, flush, 896 sizeof_flush(flush)) == 0) { 897 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 898 return true; 899 } 900 } 901 902 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) 903 + sizeof_flush(flush), GFP_KERNEL); 904 if (!nfit_flush) 905 return false; 906 INIT_LIST_HEAD(&nfit_flush->list); 907 memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); 908 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 909 dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n", 910 flush->device_handle, flush->hint_count); 911 return true; 912 } 913 914 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, 915 struct acpi_nfit_capabilities *pcap) 916 { 917 struct device *dev = acpi_desc->dev; 918 u32 mask; 919 920 mask = (1 << (pcap->highest_capability + 1)) - 1; 921 acpi_desc->platform_cap = pcap->capabilities & mask; 922 dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap); 923 return true; 924 } 925 926 static void *add_table(struct acpi_nfit_desc *acpi_desc, 927 struct nfit_table_prev *prev, void *table, const void *end) 928 { 929 struct device *dev = acpi_desc->dev; 930 struct acpi_nfit_header *hdr; 931 void *err = ERR_PTR(-ENOMEM); 932 933 if (table >= end) 934 return NULL; 935 936 hdr = table; 937 if (!hdr->length) { 938 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 939 hdr->type); 940 return NULL; 941 } 942 943 switch (hdr->type) { 944 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 945 if (!add_spa(acpi_desc, prev, table)) 946 return err; 947 break; 948 case ACPI_NFIT_TYPE_MEMORY_MAP: 949 if (!add_memdev(acpi_desc, prev, table)) 950 return err; 951 break; 952 case ACPI_NFIT_TYPE_CONTROL_REGION: 953 if (!add_dcr(acpi_desc, prev, table)) 954 return err; 955 break; 956 case ACPI_NFIT_TYPE_DATA_REGION: 957 if (!add_bdw(acpi_desc, prev, table)) 958 return err; 959 break; 960 case ACPI_NFIT_TYPE_INTERLEAVE: 961 if (!add_idt(acpi_desc, prev, table)) 962 return err; 963 break; 964 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 965 if (!add_flush(acpi_desc, prev, table)) 966 return err; 967 break; 968 case ACPI_NFIT_TYPE_SMBIOS: 969 dev_dbg(dev, "smbios\n"); 970 break; 971 case ACPI_NFIT_TYPE_CAPABILITIES: 972 if (!add_platform_cap(acpi_desc, table)) 973 return err; 974 break; 975 default: 976 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 977 break; 978 } 979 980 return table + hdr->length; 981 } 982 983 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 984 struct nfit_mem *nfit_mem) 985 { 986 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 987 u16 dcr = nfit_mem->dcr->region_index; 988 struct nfit_spa *nfit_spa; 989 990 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 991 u16 range_index = nfit_spa->spa->range_index; 992 int type = nfit_spa_type(nfit_spa->spa); 993 struct nfit_memdev *nfit_memdev; 994 995 if (type != NFIT_SPA_BDW) 996 continue; 997 998 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 999 if (nfit_memdev->memdev->range_index != range_index) 1000 continue; 1001 if (nfit_memdev->memdev->device_handle != device_handle) 1002 continue; 1003 if (nfit_memdev->memdev->region_index != dcr) 1004 continue; 1005 1006 nfit_mem->spa_bdw = nfit_spa->spa; 1007 return; 1008 } 1009 } 1010 1011 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 1012 nfit_mem->spa_dcr->range_index); 1013 nfit_mem->bdw = NULL; 1014 } 1015 1016 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, 1017 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 1018 { 1019 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 1020 struct nfit_memdev *nfit_memdev; 1021 struct nfit_bdw *nfit_bdw; 1022 struct nfit_idt *nfit_idt; 1023 u16 idt_idx, range_index; 1024 1025 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 1026 if (nfit_bdw->bdw->region_index != dcr) 1027 continue; 1028 nfit_mem->bdw = nfit_bdw->bdw; 1029 break; 1030 } 1031 1032 if (!nfit_mem->bdw) 1033 return; 1034 1035 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 1036 1037 if (!nfit_mem->spa_bdw) 1038 return; 1039 1040 range_index = nfit_mem->spa_bdw->range_index; 1041 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1042 if (nfit_memdev->memdev->range_index != range_index || 1043 nfit_memdev->memdev->region_index != dcr) 1044 continue; 1045 nfit_mem->memdev_bdw = nfit_memdev->memdev; 1046 idt_idx = nfit_memdev->memdev->interleave_index; 1047 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 1048 if (nfit_idt->idt->interleave_index != idt_idx) 1049 continue; 1050 nfit_mem->idt_bdw = nfit_idt->idt; 1051 break; 1052 } 1053 break; 1054 } 1055 } 1056 1057 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, 1058 struct acpi_nfit_system_address *spa) 1059 { 1060 struct nfit_mem *nfit_mem, *found; 1061 struct nfit_memdev *nfit_memdev; 1062 int type = spa ? nfit_spa_type(spa) : 0; 1063 1064 switch (type) { 1065 case NFIT_SPA_DCR: 1066 case NFIT_SPA_PM: 1067 break; 1068 default: 1069 if (spa) 1070 return 0; 1071 } 1072 1073 /* 1074 * This loop runs in two modes, when a dimm is mapped the loop 1075 * adds memdev associations to an existing dimm, or creates a 1076 * dimm. In the unmapped dimm case this loop sweeps for memdev 1077 * instances with an invalid / zero range_index and adds those 1078 * dimms without spa associations. 1079 */ 1080 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1081 struct nfit_flush *nfit_flush; 1082 struct nfit_dcr *nfit_dcr; 1083 u32 device_handle; 1084 u16 dcr; 1085 1086 if (spa && nfit_memdev->memdev->range_index != spa->range_index) 1087 continue; 1088 if (!spa && nfit_memdev->memdev->range_index) 1089 continue; 1090 found = NULL; 1091 dcr = nfit_memdev->memdev->region_index; 1092 device_handle = nfit_memdev->memdev->device_handle; 1093 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1094 if (__to_nfit_memdev(nfit_mem)->device_handle 1095 == device_handle) { 1096 found = nfit_mem; 1097 break; 1098 } 1099 1100 if (found) 1101 nfit_mem = found; 1102 else { 1103 nfit_mem = devm_kzalloc(acpi_desc->dev, 1104 sizeof(*nfit_mem), GFP_KERNEL); 1105 if (!nfit_mem) 1106 return -ENOMEM; 1107 INIT_LIST_HEAD(&nfit_mem->list); 1108 nfit_mem->acpi_desc = acpi_desc; 1109 list_add(&nfit_mem->list, &acpi_desc->dimms); 1110 } 1111 1112 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1113 if (nfit_dcr->dcr->region_index != dcr) 1114 continue; 1115 /* 1116 * Record the control region for the dimm. For 1117 * the ACPI 6.1 case, where there are separate 1118 * control regions for the pmem vs blk 1119 * interfaces, be sure to record the extended 1120 * blk details. 1121 */ 1122 if (!nfit_mem->dcr) 1123 nfit_mem->dcr = nfit_dcr->dcr; 1124 else if (nfit_mem->dcr->windows == 0 1125 && nfit_dcr->dcr->windows) 1126 nfit_mem->dcr = nfit_dcr->dcr; 1127 break; 1128 } 1129 1130 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 1131 struct acpi_nfit_flush_address *flush; 1132 u16 i; 1133 1134 if (nfit_flush->flush->device_handle != device_handle) 1135 continue; 1136 nfit_mem->nfit_flush = nfit_flush; 1137 flush = nfit_flush->flush; 1138 nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev, 1139 flush->hint_count, 1140 sizeof(struct resource), 1141 GFP_KERNEL); 1142 if (!nfit_mem->flush_wpq) 1143 return -ENOMEM; 1144 for (i = 0; i < flush->hint_count; i++) { 1145 struct resource *res = &nfit_mem->flush_wpq[i]; 1146 1147 res->start = flush->hint_address[i]; 1148 res->end = res->start + 8 - 1; 1149 } 1150 break; 1151 } 1152 1153 if (dcr && !nfit_mem->dcr) { 1154 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 1155 spa->range_index, dcr); 1156 return -ENODEV; 1157 } 1158 1159 if (type == NFIT_SPA_DCR) { 1160 struct nfit_idt *nfit_idt; 1161 u16 idt_idx; 1162 1163 /* multiple dimms may share a SPA when interleaved */ 1164 nfit_mem->spa_dcr = spa; 1165 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1166 idt_idx = nfit_memdev->memdev->interleave_index; 1167 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 1168 if (nfit_idt->idt->interleave_index != idt_idx) 1169 continue; 1170 nfit_mem->idt_dcr = nfit_idt->idt; 1171 break; 1172 } 1173 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); 1174 } else if (type == NFIT_SPA_PM) { 1175 /* 1176 * A single dimm may belong to multiple SPA-PM 1177 * ranges, record at least one in addition to 1178 * any SPA-DCR range. 1179 */ 1180 nfit_mem->memdev_pmem = nfit_memdev->memdev; 1181 } else 1182 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1183 } 1184 1185 return 0; 1186 } 1187 1188 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 1189 { 1190 struct nfit_mem *a = container_of(_a, typeof(*a), list); 1191 struct nfit_mem *b = container_of(_b, typeof(*b), list); 1192 u32 handleA, handleB; 1193 1194 handleA = __to_nfit_memdev(a)->device_handle; 1195 handleB = __to_nfit_memdev(b)->device_handle; 1196 if (handleA < handleB) 1197 return -1; 1198 else if (handleA > handleB) 1199 return 1; 1200 return 0; 1201 } 1202 1203 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 1204 { 1205 struct nfit_spa *nfit_spa; 1206 int rc; 1207 1208 1209 /* 1210 * For each SPA-DCR or SPA-PMEM address range find its 1211 * corresponding MEMDEV(s). From each MEMDEV find the 1212 * corresponding DCR. Then, if we're operating on a SPA-DCR, 1213 * try to find a SPA-BDW and a corresponding BDW that references 1214 * the DCR. Throw it all into an nfit_mem object. Note, that 1215 * BDWs are optional. 1216 */ 1217 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1218 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); 1219 if (rc) 1220 return rc; 1221 } 1222 1223 /* 1224 * If a DIMM has failed to be mapped into SPA there will be no 1225 * SPA entries above. Find and register all the unmapped DIMMs 1226 * for reporting and recovery purposes. 1227 */ 1228 rc = __nfit_mem_init(acpi_desc, NULL); 1229 if (rc) 1230 return rc; 1231 1232 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 1233 1234 return 0; 1235 } 1236 1237 static ssize_t bus_dsm_mask_show(struct device *dev, 1238 struct device_attribute *attr, char *buf) 1239 { 1240 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1241 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1242 1243 return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); 1244 } 1245 static struct device_attribute dev_attr_bus_dsm_mask = 1246 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); 1247 1248 static ssize_t revision_show(struct device *dev, 1249 struct device_attribute *attr, char *buf) 1250 { 1251 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1252 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1253 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1254 1255 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 1256 } 1257 static DEVICE_ATTR_RO(revision); 1258 1259 static ssize_t hw_error_scrub_show(struct device *dev, 1260 struct device_attribute *attr, char *buf) 1261 { 1262 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1263 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1264 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1265 1266 return sprintf(buf, "%d\n", acpi_desc->scrub_mode); 1267 } 1268 1269 /* 1270 * The 'hw_error_scrub' attribute can have the following values written to it: 1271 * '0': Switch to the default mode where an exception will only insert 1272 * the address of the memory error into the poison and badblocks lists. 1273 * '1': Enable a full scrub to happen if an exception for a memory error is 1274 * received. 1275 */ 1276 static ssize_t hw_error_scrub_store(struct device *dev, 1277 struct device_attribute *attr, const char *buf, size_t size) 1278 { 1279 struct nvdimm_bus_descriptor *nd_desc; 1280 ssize_t rc; 1281 long val; 1282 1283 rc = kstrtol(buf, 0, &val); 1284 if (rc) 1285 return rc; 1286 1287 device_lock(dev); 1288 nd_desc = dev_get_drvdata(dev); 1289 if (nd_desc) { 1290 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1291 1292 switch (val) { 1293 case HW_ERROR_SCRUB_ON: 1294 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; 1295 break; 1296 case HW_ERROR_SCRUB_OFF: 1297 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; 1298 break; 1299 default: 1300 rc = -EINVAL; 1301 break; 1302 } 1303 } 1304 device_unlock(dev); 1305 if (rc) 1306 return rc; 1307 return size; 1308 } 1309 static DEVICE_ATTR_RW(hw_error_scrub); 1310 1311 /* 1312 * This shows the number of full Address Range Scrubs that have been 1313 * completed since driver load time. Userspace can wait on this using 1314 * select/poll etc. A '+' at the end indicates an ARS is in progress 1315 */ 1316 static ssize_t scrub_show(struct device *dev, 1317 struct device_attribute *attr, char *buf) 1318 { 1319 struct nvdimm_bus_descriptor *nd_desc; 1320 ssize_t rc = -ENXIO; 1321 1322 device_lock(dev); 1323 nd_desc = dev_get_drvdata(dev); 1324 if (nd_desc) { 1325 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1326 1327 mutex_lock(&acpi_desc->init_mutex); 1328 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1329 acpi_desc->scrub_busy 1330 && !acpi_desc->cancel ? "+\n" : "\n"); 1331 mutex_unlock(&acpi_desc->init_mutex); 1332 } 1333 device_unlock(dev); 1334 return rc; 1335 } 1336 1337 static ssize_t scrub_store(struct device *dev, 1338 struct device_attribute *attr, const char *buf, size_t size) 1339 { 1340 struct nvdimm_bus_descriptor *nd_desc; 1341 ssize_t rc; 1342 long val; 1343 1344 rc = kstrtol(buf, 0, &val); 1345 if (rc) 1346 return rc; 1347 if (val != 1) 1348 return -EINVAL; 1349 1350 device_lock(dev); 1351 nd_desc = dev_get_drvdata(dev); 1352 if (nd_desc) { 1353 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1354 1355 rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); 1356 } 1357 device_unlock(dev); 1358 if (rc) 1359 return rc; 1360 return size; 1361 } 1362 static DEVICE_ATTR_RW(scrub); 1363 1364 static bool ars_supported(struct nvdimm_bus *nvdimm_bus) 1365 { 1366 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1367 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START 1368 | 1 << ND_CMD_ARS_STATUS; 1369 1370 return (nd_desc->cmd_mask & mask) == mask; 1371 } 1372 1373 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) 1374 { 1375 struct device *dev = container_of(kobj, struct device, kobj); 1376 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1377 1378 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) 1379 return 0; 1380 return a->mode; 1381 } 1382 1383 static struct attribute *acpi_nfit_attributes[] = { 1384 &dev_attr_revision.attr, 1385 &dev_attr_scrub.attr, 1386 &dev_attr_hw_error_scrub.attr, 1387 &dev_attr_bus_dsm_mask.attr, 1388 NULL, 1389 }; 1390 1391 static const struct attribute_group acpi_nfit_attribute_group = { 1392 .name = "nfit", 1393 .attrs = acpi_nfit_attributes, 1394 .is_visible = nfit_visible, 1395 }; 1396 1397 static const struct attribute_group *acpi_nfit_attribute_groups[] = { 1398 &nvdimm_bus_attribute_group, 1399 &acpi_nfit_attribute_group, 1400 NULL, 1401 }; 1402 1403 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 1404 { 1405 struct nvdimm *nvdimm = to_nvdimm(dev); 1406 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1407 1408 return __to_nfit_memdev(nfit_mem); 1409 } 1410 1411 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 1412 { 1413 struct nvdimm *nvdimm = to_nvdimm(dev); 1414 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1415 1416 return nfit_mem->dcr; 1417 } 1418 1419 static ssize_t handle_show(struct device *dev, 1420 struct device_attribute *attr, char *buf) 1421 { 1422 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1423 1424 return sprintf(buf, "%#x\n", memdev->device_handle); 1425 } 1426 static DEVICE_ATTR_RO(handle); 1427 1428 static ssize_t phys_id_show(struct device *dev, 1429 struct device_attribute *attr, char *buf) 1430 { 1431 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1432 1433 return sprintf(buf, "%#x\n", memdev->physical_id); 1434 } 1435 static DEVICE_ATTR_RO(phys_id); 1436 1437 static ssize_t vendor_show(struct device *dev, 1438 struct device_attribute *attr, char *buf) 1439 { 1440 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1441 1442 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1443 } 1444 static DEVICE_ATTR_RO(vendor); 1445 1446 static ssize_t rev_id_show(struct device *dev, 1447 struct device_attribute *attr, char *buf) 1448 { 1449 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1450 1451 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1452 } 1453 static DEVICE_ATTR_RO(rev_id); 1454 1455 static ssize_t device_show(struct device *dev, 1456 struct device_attribute *attr, char *buf) 1457 { 1458 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1459 1460 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1461 } 1462 static DEVICE_ATTR_RO(device); 1463 1464 static ssize_t subsystem_vendor_show(struct device *dev, 1465 struct device_attribute *attr, char *buf) 1466 { 1467 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1468 1469 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1470 } 1471 static DEVICE_ATTR_RO(subsystem_vendor); 1472 1473 static ssize_t subsystem_rev_id_show(struct device *dev, 1474 struct device_attribute *attr, char *buf) 1475 { 1476 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1477 1478 return sprintf(buf, "0x%04x\n", 1479 be16_to_cpu(dcr->subsystem_revision_id)); 1480 } 1481 static DEVICE_ATTR_RO(subsystem_rev_id); 1482 1483 static ssize_t subsystem_device_show(struct device *dev, 1484 struct device_attribute *attr, char *buf) 1485 { 1486 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1487 1488 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1489 } 1490 static DEVICE_ATTR_RO(subsystem_device); 1491 1492 static int num_nvdimm_formats(struct nvdimm *nvdimm) 1493 { 1494 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1495 int formats = 0; 1496 1497 if (nfit_mem->memdev_pmem) 1498 formats++; 1499 if (nfit_mem->memdev_bdw) 1500 formats++; 1501 return formats; 1502 } 1503 1504 static ssize_t format_show(struct device *dev, 1505 struct device_attribute *attr, char *buf) 1506 { 1507 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1508 1509 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1510 } 1511 static DEVICE_ATTR_RO(format); 1512 1513 static ssize_t format1_show(struct device *dev, 1514 struct device_attribute *attr, char *buf) 1515 { 1516 u32 handle; 1517 ssize_t rc = -ENXIO; 1518 struct nfit_mem *nfit_mem; 1519 struct nfit_memdev *nfit_memdev; 1520 struct acpi_nfit_desc *acpi_desc; 1521 struct nvdimm *nvdimm = to_nvdimm(dev); 1522 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1523 1524 nfit_mem = nvdimm_provider_data(nvdimm); 1525 acpi_desc = nfit_mem->acpi_desc; 1526 handle = to_nfit_memdev(dev)->device_handle; 1527 1528 /* assumes DIMMs have at most 2 published interface codes */ 1529 mutex_lock(&acpi_desc->init_mutex); 1530 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1531 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1532 struct nfit_dcr *nfit_dcr; 1533 1534 if (memdev->device_handle != handle) 1535 continue; 1536 1537 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1538 if (nfit_dcr->dcr->region_index != memdev->region_index) 1539 continue; 1540 if (nfit_dcr->dcr->code == dcr->code) 1541 continue; 1542 rc = sprintf(buf, "0x%04x\n", 1543 le16_to_cpu(nfit_dcr->dcr->code)); 1544 break; 1545 } 1546 if (rc != ENXIO) 1547 break; 1548 } 1549 mutex_unlock(&acpi_desc->init_mutex); 1550 return rc; 1551 } 1552 static DEVICE_ATTR_RO(format1); 1553 1554 static ssize_t formats_show(struct device *dev, 1555 struct device_attribute *attr, char *buf) 1556 { 1557 struct nvdimm *nvdimm = to_nvdimm(dev); 1558 1559 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1560 } 1561 static DEVICE_ATTR_RO(formats); 1562 1563 static ssize_t serial_show(struct device *dev, 1564 struct device_attribute *attr, char *buf) 1565 { 1566 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1567 1568 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1569 } 1570 static DEVICE_ATTR_RO(serial); 1571 1572 static ssize_t family_show(struct device *dev, 1573 struct device_attribute *attr, char *buf) 1574 { 1575 struct nvdimm *nvdimm = to_nvdimm(dev); 1576 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1577 1578 if (nfit_mem->family < 0) 1579 return -ENXIO; 1580 return sprintf(buf, "%d\n", nfit_mem->family); 1581 } 1582 static DEVICE_ATTR_RO(family); 1583 1584 static ssize_t dsm_mask_show(struct device *dev, 1585 struct device_attribute *attr, char *buf) 1586 { 1587 struct nvdimm *nvdimm = to_nvdimm(dev); 1588 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1589 1590 if (nfit_mem->family < 0) 1591 return -ENXIO; 1592 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); 1593 } 1594 static DEVICE_ATTR_RO(dsm_mask); 1595 1596 static ssize_t flags_show(struct device *dev, 1597 struct device_attribute *attr, char *buf) 1598 { 1599 struct nvdimm *nvdimm = to_nvdimm(dev); 1600 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1601 u16 flags = __to_nfit_memdev(nfit_mem)->flags; 1602 1603 if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags)) 1604 flags |= ACPI_NFIT_MEM_FLUSH_FAILED; 1605 1606 return sprintf(buf, "%s%s%s%s%s%s%s\n", 1607 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 1608 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 1609 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 1610 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 1611 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", 1612 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", 1613 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); 1614 } 1615 static DEVICE_ATTR_RO(flags); 1616 1617 static ssize_t id_show(struct device *dev, 1618 struct device_attribute *attr, char *buf) 1619 { 1620 struct nvdimm *nvdimm = to_nvdimm(dev); 1621 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1622 1623 return sprintf(buf, "%s\n", nfit_mem->id); 1624 } 1625 static DEVICE_ATTR_RO(id); 1626 1627 static ssize_t dirty_shutdown_show(struct device *dev, 1628 struct device_attribute *attr, char *buf) 1629 { 1630 struct nvdimm *nvdimm = to_nvdimm(dev); 1631 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1632 1633 return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown); 1634 } 1635 static DEVICE_ATTR_RO(dirty_shutdown); 1636 1637 static struct attribute *acpi_nfit_dimm_attributes[] = { 1638 &dev_attr_handle.attr, 1639 &dev_attr_phys_id.attr, 1640 &dev_attr_vendor.attr, 1641 &dev_attr_device.attr, 1642 &dev_attr_rev_id.attr, 1643 &dev_attr_subsystem_vendor.attr, 1644 &dev_attr_subsystem_device.attr, 1645 &dev_attr_subsystem_rev_id.attr, 1646 &dev_attr_format.attr, 1647 &dev_attr_formats.attr, 1648 &dev_attr_format1.attr, 1649 &dev_attr_serial.attr, 1650 &dev_attr_flags.attr, 1651 &dev_attr_id.attr, 1652 &dev_attr_family.attr, 1653 &dev_attr_dsm_mask.attr, 1654 &dev_attr_dirty_shutdown.attr, 1655 NULL, 1656 }; 1657 1658 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 1659 struct attribute *a, int n) 1660 { 1661 struct device *dev = container_of(kobj, struct device, kobj); 1662 struct nvdimm *nvdimm = to_nvdimm(dev); 1663 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1664 1665 if (!to_nfit_dcr(dev)) { 1666 /* Without a dcr only the memdev attributes can be surfaced */ 1667 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr 1668 || a == &dev_attr_flags.attr 1669 || a == &dev_attr_family.attr 1670 || a == &dev_attr_dsm_mask.attr) 1671 return a->mode; 1672 return 0; 1673 } 1674 1675 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) 1676 return 0; 1677 1678 if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags) 1679 && a == &dev_attr_dirty_shutdown.attr) 1680 return 0; 1681 1682 return a->mode; 1683 } 1684 1685 static const struct attribute_group acpi_nfit_dimm_attribute_group = { 1686 .name = "nfit", 1687 .attrs = acpi_nfit_dimm_attributes, 1688 .is_visible = acpi_nfit_dimm_attr_visible, 1689 }; 1690 1691 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 1692 &nvdimm_attribute_group, 1693 &nd_device_attribute_group, 1694 &acpi_nfit_dimm_attribute_group, 1695 NULL, 1696 }; 1697 1698 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 1699 u32 device_handle) 1700 { 1701 struct nfit_mem *nfit_mem; 1702 1703 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1704 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 1705 return nfit_mem->nvdimm; 1706 1707 return NULL; 1708 } 1709 1710 void __acpi_nvdimm_notify(struct device *dev, u32 event) 1711 { 1712 struct nfit_mem *nfit_mem; 1713 struct acpi_nfit_desc *acpi_desc; 1714 1715 dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev), 1716 event); 1717 1718 if (event != NFIT_NOTIFY_DIMM_HEALTH) { 1719 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), 1720 event); 1721 return; 1722 } 1723 1724 acpi_desc = dev_get_drvdata(dev->parent); 1725 if (!acpi_desc) 1726 return; 1727 1728 /* 1729 * If we successfully retrieved acpi_desc, then we know nfit_mem data 1730 * is still valid. 1731 */ 1732 nfit_mem = dev_get_drvdata(dev); 1733 if (nfit_mem && nfit_mem->flags_attr) 1734 sysfs_notify_dirent(nfit_mem->flags_attr); 1735 } 1736 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); 1737 1738 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) 1739 { 1740 struct acpi_device *adev = data; 1741 struct device *dev = &adev->dev; 1742 1743 device_lock(dev->parent); 1744 __acpi_nvdimm_notify(dev, event); 1745 device_unlock(dev->parent); 1746 } 1747 1748 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) 1749 { 1750 acpi_handle handle; 1751 acpi_status status; 1752 1753 status = acpi_get_handle(adev->handle, method, &handle); 1754 1755 if (ACPI_SUCCESS(status)) 1756 return true; 1757 return false; 1758 } 1759 1760 __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) 1761 { 1762 struct nd_intel_smart smart = { 0 }; 1763 union acpi_object in_buf = { 1764 .type = ACPI_TYPE_BUFFER, 1765 .buffer.pointer = (char *) &smart, 1766 .buffer.length = sizeof(smart), 1767 }; 1768 union acpi_object in_obj = { 1769 .type = ACPI_TYPE_PACKAGE, 1770 .package.count = 1, 1771 .package.elements = &in_buf, 1772 }; 1773 const u8 func = ND_INTEL_SMART; 1774 const guid_t *guid = to_nfit_uuid(nfit_mem->family); 1775 u8 revid = nfit_dsm_revid(nfit_mem->family, func); 1776 struct acpi_device *adev = nfit_mem->adev; 1777 acpi_handle handle = adev->handle; 1778 union acpi_object *out_obj; 1779 1780 if ((nfit_mem->dsm_mask & (1 << func)) == 0) 1781 return; 1782 1783 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 1784 if (!out_obj) 1785 return; 1786 1787 if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) { 1788 if (smart.shutdown_state) 1789 set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags); 1790 } 1791 1792 if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) { 1793 set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags); 1794 nfit_mem->dirty_shutdown = smart.shutdown_count; 1795 } 1796 ACPI_FREE(out_obj); 1797 } 1798 1799 static void populate_shutdown_status(struct nfit_mem *nfit_mem) 1800 { 1801 /* 1802 * For DIMMs that provide a dynamic facility to retrieve a 1803 * dirty-shutdown status and/or a dirty-shutdown count, cache 1804 * these values in nfit_mem. 1805 */ 1806 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) 1807 nfit_intel_shutdown_status(nfit_mem); 1808 } 1809 1810 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1811 struct nfit_mem *nfit_mem, u32 device_handle) 1812 { 1813 struct acpi_device *adev, *adev_dimm; 1814 struct device *dev = acpi_desc->dev; 1815 unsigned long dsm_mask, label_mask; 1816 const guid_t *guid; 1817 int i; 1818 int family = -1; 1819 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 1820 1821 /* nfit test assumes 1:1 relationship between commands and dsms */ 1822 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; 1823 nfit_mem->family = NVDIMM_FAMILY_INTEL; 1824 1825 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) 1826 sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x", 1827 be16_to_cpu(dcr->vendor_id), 1828 dcr->manufacturing_location, 1829 be16_to_cpu(dcr->manufacturing_date), 1830 be32_to_cpu(dcr->serial_number)); 1831 else 1832 sprintf(nfit_mem->id, "%04x-%08x", 1833 be16_to_cpu(dcr->vendor_id), 1834 be32_to_cpu(dcr->serial_number)); 1835 1836 adev = to_acpi_dev(acpi_desc); 1837 if (!adev) { 1838 /* unit test case */ 1839 populate_shutdown_status(nfit_mem); 1840 return 0; 1841 } 1842 1843 adev_dimm = acpi_find_child_device(adev, device_handle, false); 1844 nfit_mem->adev = adev_dimm; 1845 if (!adev_dimm) { 1846 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 1847 device_handle); 1848 return force_enable_dimms ? 0 : -ENODEV; 1849 } 1850 1851 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, 1852 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { 1853 dev_err(dev, "%s: notification registration failed\n", 1854 dev_name(&adev_dimm->dev)); 1855 return -ENXIO; 1856 } 1857 /* 1858 * Record nfit_mem for the notification path to track back to 1859 * the nfit sysfs attributes for this dimm device object. 1860 */ 1861 dev_set_drvdata(&adev_dimm->dev, nfit_mem); 1862 1863 /* 1864 * Until standardization materializes we need to consider 4 1865 * different command sets. Note, that checking for function0 (bit0) 1866 * tells us if any commands are reachable through this GUID. 1867 */ 1868 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) 1869 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1870 if (family < 0 || i == default_dsm_family) 1871 family = i; 1872 1873 /* limit the supported commands to those that are publicly documented */ 1874 nfit_mem->family = family; 1875 if (override_dsm_mask && !disable_vendor_specific) 1876 dsm_mask = override_dsm_mask; 1877 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1878 dsm_mask = NVDIMM_INTEL_CMDMASK; 1879 if (disable_vendor_specific) 1880 dsm_mask &= ~(1 << ND_CMD_VENDOR); 1881 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { 1882 dsm_mask = 0x1c3c76; 1883 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { 1884 dsm_mask = 0x1fe; 1885 if (disable_vendor_specific) 1886 dsm_mask &= ~(1 << 8); 1887 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { 1888 dsm_mask = 0xffffffff; 1889 } else { 1890 dev_dbg(dev, "unknown dimm command family\n"); 1891 nfit_mem->family = -1; 1892 /* DSMs are optional, continue loading the driver... */ 1893 return 0; 1894 } 1895 1896 /* 1897 * Function 0 is the command interrogation function, don't 1898 * export it to potential userspace use, and enable it to be 1899 * used as an error value in acpi_nfit_ctl(). 1900 */ 1901 dsm_mask &= ~1UL; 1902 1903 guid = to_nfit_uuid(nfit_mem->family); 1904 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1905 if (acpi_check_dsm(adev_dimm->handle, guid, 1906 nfit_dsm_revid(nfit_mem->family, i), 1907 1ULL << i)) 1908 set_bit(i, &nfit_mem->dsm_mask); 1909 1910 /* 1911 * Prefer the NVDIMM_FAMILY_INTEL label read commands if present 1912 * due to their better semantics handling locked capacity. 1913 */ 1914 label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA 1915 | 1 << ND_CMD_SET_CONFIG_DATA; 1916 if (family == NVDIMM_FAMILY_INTEL 1917 && (dsm_mask & label_mask) == label_mask) 1918 return 0; 1919 1920 if (acpi_nvdimm_has_method(adev_dimm, "_LSI") 1921 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { 1922 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1923 set_bit(NFIT_MEM_LSR, &nfit_mem->flags); 1924 } 1925 1926 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) 1927 && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { 1928 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); 1929 set_bit(NFIT_MEM_LSW, &nfit_mem->flags); 1930 } 1931 1932 populate_shutdown_status(nfit_mem); 1933 1934 return 0; 1935 } 1936 1937 static void shutdown_dimm_notify(void *data) 1938 { 1939 struct acpi_nfit_desc *acpi_desc = data; 1940 struct nfit_mem *nfit_mem; 1941 1942 mutex_lock(&acpi_desc->init_mutex); 1943 /* 1944 * Clear out the nfit_mem->flags_attr and shut down dimm event 1945 * notifications. 1946 */ 1947 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1948 struct acpi_device *adev_dimm = nfit_mem->adev; 1949 1950 if (nfit_mem->flags_attr) { 1951 sysfs_put(nfit_mem->flags_attr); 1952 nfit_mem->flags_attr = NULL; 1953 } 1954 if (adev_dimm) { 1955 acpi_remove_notify_handler(adev_dimm->handle, 1956 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); 1957 dev_set_drvdata(&adev_dimm->dev, NULL); 1958 } 1959 } 1960 mutex_unlock(&acpi_desc->init_mutex); 1961 } 1962 1963 static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family) 1964 { 1965 switch (family) { 1966 case NVDIMM_FAMILY_INTEL: 1967 return intel_security_ops; 1968 default: 1969 return NULL; 1970 } 1971 } 1972 1973 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 1974 { 1975 struct nfit_mem *nfit_mem; 1976 int dimm_count = 0, rc; 1977 struct nvdimm *nvdimm; 1978 1979 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1980 struct acpi_nfit_flush_address *flush; 1981 unsigned long flags = 0, cmd_mask; 1982 struct nfit_memdev *nfit_memdev; 1983 u32 device_handle; 1984 u16 mem_flags; 1985 1986 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 1987 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 1988 if (nvdimm) { 1989 dimm_count++; 1990 continue; 1991 } 1992 1993 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 1994 set_bit(NDD_ALIASING, &flags); 1995 1996 /* collate flags across all memdevs for this dimm */ 1997 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1998 struct acpi_nfit_memory_map *dimm_memdev; 1999 2000 dimm_memdev = __to_nfit_memdev(nfit_mem); 2001 if (dimm_memdev->device_handle 2002 != nfit_memdev->memdev->device_handle) 2003 continue; 2004 dimm_memdev->flags |= nfit_memdev->memdev->flags; 2005 } 2006 2007 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 2008 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 2009 set_bit(NDD_UNARMED, &flags); 2010 2011 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 2012 if (rc) 2013 continue; 2014 2015 /* 2016 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL 2017 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the 2018 * userspace interface. 2019 */ 2020 cmd_mask = 1UL << ND_CMD_CALL; 2021 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 2022 /* 2023 * These commands have a 1:1 correspondence 2024 * between DSM payload and libnvdimm ioctl 2025 * payload format. 2026 */ 2027 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; 2028 } 2029 2030 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { 2031 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 2032 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); 2033 } 2034 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) 2035 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); 2036 2037 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush 2038 : NULL; 2039 nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 2040 acpi_nfit_dimm_attribute_groups, 2041 flags, cmd_mask, flush ? flush->hint_count : 0, 2042 nfit_mem->flush_wpq, &nfit_mem->id[0], 2043 acpi_nfit_get_security_ops(nfit_mem->family)); 2044 if (!nvdimm) 2045 return -ENOMEM; 2046 2047 nfit_mem->nvdimm = nvdimm; 2048 dimm_count++; 2049 2050 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 2051 continue; 2052 2053 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", 2054 nvdimm_name(nvdimm), 2055 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 2056 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 2057 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 2058 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", 2059 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); 2060 2061 } 2062 2063 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 2064 if (rc) 2065 return rc; 2066 2067 /* 2068 * Now that dimms are successfully registered, and async registration 2069 * is flushed, attempt to enable event notification. 2070 */ 2071 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 2072 struct kernfs_node *nfit_kernfs; 2073 2074 nvdimm = nfit_mem->nvdimm; 2075 if (!nvdimm) 2076 continue; 2077 2078 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 2079 if (nfit_kernfs) 2080 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 2081 "flags"); 2082 sysfs_put(nfit_kernfs); 2083 if (!nfit_mem->flags_attr) 2084 dev_warn(acpi_desc->dev, "%s: notifications disabled\n", 2085 nvdimm_name(nvdimm)); 2086 } 2087 2088 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, 2089 acpi_desc); 2090 } 2091 2092 /* 2093 * These constants are private because there are no kernel consumers of 2094 * these commands. 2095 */ 2096 enum nfit_aux_cmds { 2097 NFIT_CMD_TRANSLATE_SPA = 5, 2098 NFIT_CMD_ARS_INJECT_SET = 7, 2099 NFIT_CMD_ARS_INJECT_CLEAR = 8, 2100 NFIT_CMD_ARS_INJECT_GET = 9, 2101 }; 2102 2103 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 2104 { 2105 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2106 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); 2107 struct acpi_device *adev; 2108 unsigned long dsm_mask; 2109 int i; 2110 2111 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; 2112 nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; 2113 adev = to_acpi_dev(acpi_desc); 2114 if (!adev) 2115 return; 2116 2117 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) 2118 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 2119 set_bit(i, &nd_desc->cmd_mask); 2120 set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); 2121 2122 dsm_mask = 2123 (1 << ND_CMD_ARS_CAP) | 2124 (1 << ND_CMD_ARS_START) | 2125 (1 << ND_CMD_ARS_STATUS) | 2126 (1 << ND_CMD_CLEAR_ERROR) | 2127 (1 << NFIT_CMD_TRANSLATE_SPA) | 2128 (1 << NFIT_CMD_ARS_INJECT_SET) | 2129 (1 << NFIT_CMD_ARS_INJECT_CLEAR) | 2130 (1 << NFIT_CMD_ARS_INJECT_GET); 2131 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 2132 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 2133 set_bit(i, &nd_desc->bus_dsm_mask); 2134 } 2135 2136 static ssize_t range_index_show(struct device *dev, 2137 struct device_attribute *attr, char *buf) 2138 { 2139 struct nd_region *nd_region = to_nd_region(dev); 2140 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 2141 2142 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 2143 } 2144 static DEVICE_ATTR_RO(range_index); 2145 2146 static struct attribute *acpi_nfit_region_attributes[] = { 2147 &dev_attr_range_index.attr, 2148 NULL, 2149 }; 2150 2151 static const struct attribute_group acpi_nfit_region_attribute_group = { 2152 .name = "nfit", 2153 .attrs = acpi_nfit_region_attributes, 2154 }; 2155 2156 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 2157 &nd_region_attribute_group, 2158 &nd_mapping_attribute_group, 2159 &nd_device_attribute_group, 2160 &nd_numa_attribute_group, 2161 &acpi_nfit_region_attribute_group, 2162 NULL, 2163 }; 2164 2165 /* enough info to uniquely specify an interleave set */ 2166 struct nfit_set_info { 2167 struct nfit_set_info_map { 2168 u64 region_offset; 2169 u32 serial_number; 2170 u32 pad; 2171 } mapping[0]; 2172 }; 2173 2174 struct nfit_set_info2 { 2175 struct nfit_set_info_map2 { 2176 u64 region_offset; 2177 u32 serial_number; 2178 u16 vendor_id; 2179 u16 manufacturing_date; 2180 u8 manufacturing_location; 2181 u8 reserved[31]; 2182 } mapping[0]; 2183 }; 2184 2185 static size_t sizeof_nfit_set_info(int num_mappings) 2186 { 2187 return sizeof(struct nfit_set_info) 2188 + num_mappings * sizeof(struct nfit_set_info_map); 2189 } 2190 2191 static size_t sizeof_nfit_set_info2(int num_mappings) 2192 { 2193 return sizeof(struct nfit_set_info2) 2194 + num_mappings * sizeof(struct nfit_set_info_map2); 2195 } 2196 2197 static int cmp_map_compat(const void *m0, const void *m1) 2198 { 2199 const struct nfit_set_info_map *map0 = m0; 2200 const struct nfit_set_info_map *map1 = m1; 2201 2202 return memcmp(&map0->region_offset, &map1->region_offset, 2203 sizeof(u64)); 2204 } 2205 2206 static int cmp_map(const void *m0, const void *m1) 2207 { 2208 const struct nfit_set_info_map *map0 = m0; 2209 const struct nfit_set_info_map *map1 = m1; 2210 2211 if (map0->region_offset < map1->region_offset) 2212 return -1; 2213 else if (map0->region_offset > map1->region_offset) 2214 return 1; 2215 return 0; 2216 } 2217 2218 static int cmp_map2(const void *m0, const void *m1) 2219 { 2220 const struct nfit_set_info_map2 *map0 = m0; 2221 const struct nfit_set_info_map2 *map1 = m1; 2222 2223 if (map0->region_offset < map1->region_offset) 2224 return -1; 2225 else if (map0->region_offset > map1->region_offset) 2226 return 1; 2227 return 0; 2228 } 2229 2230 /* Retrieve the nth entry referencing this spa */ 2231 static struct acpi_nfit_memory_map *memdev_from_spa( 2232 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 2233 { 2234 struct nfit_memdev *nfit_memdev; 2235 2236 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 2237 if (nfit_memdev->memdev->range_index == range_index) 2238 if (n-- == 0) 2239 return nfit_memdev->memdev; 2240 return NULL; 2241 } 2242 2243 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 2244 struct nd_region_desc *ndr_desc, 2245 struct acpi_nfit_system_address *spa) 2246 { 2247 struct device *dev = acpi_desc->dev; 2248 struct nd_interleave_set *nd_set; 2249 u16 nr = ndr_desc->num_mappings; 2250 struct nfit_set_info2 *info2; 2251 struct nfit_set_info *info; 2252 int i; 2253 2254 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2255 if (!nd_set) 2256 return -ENOMEM; 2257 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); 2258 2259 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 2260 if (!info) 2261 return -ENOMEM; 2262 2263 info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL); 2264 if (!info2) 2265 return -ENOMEM; 2266 2267 for (i = 0; i < nr; i++) { 2268 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 2269 struct nfit_set_info_map *map = &info->mapping[i]; 2270 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2271 struct nvdimm *nvdimm = mapping->nvdimm; 2272 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2273 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 2274 spa->range_index, i); 2275 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2276 2277 if (!memdev || !nfit_mem->dcr) { 2278 dev_err(dev, "%s: failed to find DCR\n", __func__); 2279 return -ENODEV; 2280 } 2281 2282 map->region_offset = memdev->region_offset; 2283 map->serial_number = dcr->serial_number; 2284 2285 map2->region_offset = memdev->region_offset; 2286 map2->serial_number = dcr->serial_number; 2287 map2->vendor_id = dcr->vendor_id; 2288 map2->manufacturing_date = dcr->manufacturing_date; 2289 map2->manufacturing_location = dcr->manufacturing_location; 2290 } 2291 2292 /* v1.1 namespaces */ 2293 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2294 cmp_map, NULL); 2295 nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2296 2297 /* v1.2 namespaces */ 2298 sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2), 2299 cmp_map2, NULL); 2300 nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0); 2301 2302 /* support v1.1 namespaces created with the wrong sort order */ 2303 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2304 cmp_map_compat, NULL); 2305 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2306 2307 /* record the result of the sort for the mapping position */ 2308 for (i = 0; i < nr; i++) { 2309 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2310 int j; 2311 2312 for (j = 0; j < nr; j++) { 2313 struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; 2314 struct nvdimm *nvdimm = mapping->nvdimm; 2315 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2316 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2317 2318 if (map2->serial_number == dcr->serial_number && 2319 map2->vendor_id == dcr->vendor_id && 2320 map2->manufacturing_date == dcr->manufacturing_date && 2321 map2->manufacturing_location 2322 == dcr->manufacturing_location) { 2323 mapping->position = i; 2324 break; 2325 } 2326 } 2327 } 2328 2329 ndr_desc->nd_set = nd_set; 2330 devm_kfree(dev, info); 2331 devm_kfree(dev, info2); 2332 2333 return 0; 2334 } 2335 2336 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 2337 { 2338 struct acpi_nfit_interleave *idt = mmio->idt; 2339 u32 sub_line_offset, line_index, line_offset; 2340 u64 line_no, table_skip_count, table_offset; 2341 2342 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 2343 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 2344 line_offset = idt->line_offset[line_index] 2345 * mmio->line_size; 2346 table_offset = table_skip_count * mmio->table_size; 2347 2348 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 2349 } 2350 2351 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 2352 { 2353 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2354 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 2355 const u32 STATUS_MASK = 0x80000037; 2356 2357 if (mmio->num_lines) 2358 offset = to_interleave_offset(offset, mmio); 2359 2360 return readl(mmio->addr.base + offset) & STATUS_MASK; 2361 } 2362 2363 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 2364 resource_size_t dpa, unsigned int len, unsigned int write) 2365 { 2366 u64 cmd, offset; 2367 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2368 2369 enum { 2370 BCW_OFFSET_MASK = (1ULL << 48)-1, 2371 BCW_LEN_SHIFT = 48, 2372 BCW_LEN_MASK = (1ULL << 8) - 1, 2373 BCW_CMD_SHIFT = 56, 2374 }; 2375 2376 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 2377 len = len >> L1_CACHE_SHIFT; 2378 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 2379 cmd |= ((u64) write) << BCW_CMD_SHIFT; 2380 2381 offset = nfit_blk->cmd_offset + mmio->size * bw; 2382 if (mmio->num_lines) 2383 offset = to_interleave_offset(offset, mmio); 2384 2385 writeq(cmd, mmio->addr.base + offset); 2386 nvdimm_flush(nfit_blk->nd_region); 2387 2388 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) 2389 readq(mmio->addr.base + offset); 2390 } 2391 2392 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 2393 resource_size_t dpa, void *iobuf, size_t len, int rw, 2394 unsigned int lane) 2395 { 2396 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2397 unsigned int copied = 0; 2398 u64 base_offset; 2399 int rc; 2400 2401 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 2402 + lane * mmio->size; 2403 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 2404 while (len) { 2405 unsigned int c; 2406 u64 offset; 2407 2408 if (mmio->num_lines) { 2409 u32 line_offset; 2410 2411 offset = to_interleave_offset(base_offset + copied, 2412 mmio); 2413 div_u64_rem(offset, mmio->line_size, &line_offset); 2414 c = min_t(size_t, len, mmio->line_size - line_offset); 2415 } else { 2416 offset = base_offset + nfit_blk->bdw_offset; 2417 c = len; 2418 } 2419 2420 if (rw) 2421 memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); 2422 else { 2423 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) 2424 arch_invalidate_pmem((void __force *) 2425 mmio->addr.aperture + offset, c); 2426 2427 memcpy(iobuf + copied, mmio->addr.aperture + offset, c); 2428 } 2429 2430 copied += c; 2431 len -= c; 2432 } 2433 2434 if (rw) 2435 nvdimm_flush(nfit_blk->nd_region); 2436 2437 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 2438 return rc; 2439 } 2440 2441 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 2442 resource_size_t dpa, void *iobuf, u64 len, int rw) 2443 { 2444 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 2445 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2446 struct nd_region *nd_region = nfit_blk->nd_region; 2447 unsigned int lane, copied = 0; 2448 int rc = 0; 2449 2450 lane = nd_region_acquire_lane(nd_region); 2451 while (len) { 2452 u64 c = min(len, mmio->size); 2453 2454 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 2455 iobuf + copied, c, rw, lane); 2456 if (rc) 2457 break; 2458 2459 copied += c; 2460 len -= c; 2461 } 2462 nd_region_release_lane(nd_region, lane); 2463 2464 return rc; 2465 } 2466 2467 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 2468 struct acpi_nfit_interleave *idt, u16 interleave_ways) 2469 { 2470 if (idt) { 2471 mmio->num_lines = idt->line_count; 2472 mmio->line_size = idt->line_size; 2473 if (interleave_ways == 0) 2474 return -ENXIO; 2475 mmio->table_size = mmio->num_lines * interleave_ways 2476 * mmio->line_size; 2477 } 2478 2479 return 0; 2480 } 2481 2482 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 2483 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 2484 { 2485 struct nd_cmd_dimm_flags flags; 2486 int rc; 2487 2488 memset(&flags, 0, sizeof(flags)); 2489 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 2490 sizeof(flags), NULL); 2491 2492 if (rc >= 0 && flags.status == 0) 2493 nfit_blk->dimm_flags = flags.flags; 2494 else if (rc == -ENOTTY) { 2495 /* fall back to a conservative default */ 2496 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; 2497 rc = 0; 2498 } else 2499 rc = -ENXIO; 2500 2501 return rc; 2502 } 2503 2504 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 2505 struct device *dev) 2506 { 2507 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 2508 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 2509 struct nfit_blk_mmio *mmio; 2510 struct nfit_blk *nfit_blk; 2511 struct nfit_mem *nfit_mem; 2512 struct nvdimm *nvdimm; 2513 int rc; 2514 2515 nvdimm = nd_blk_region_to_dimm(ndbr); 2516 nfit_mem = nvdimm_provider_data(nvdimm); 2517 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 2518 dev_dbg(dev, "missing%s%s%s\n", 2519 nfit_mem ? "" : " nfit_mem", 2520 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 2521 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 2522 return -ENXIO; 2523 } 2524 2525 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 2526 if (!nfit_blk) 2527 return -ENOMEM; 2528 nd_blk_region_set_provider_data(ndbr, nfit_blk); 2529 nfit_blk->nd_region = to_nd_region(dev); 2530 2531 /* map block aperture memory */ 2532 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 2533 mmio = &nfit_blk->mmio[BDW]; 2534 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, 2535 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr)); 2536 if (!mmio->addr.base) { 2537 dev_dbg(dev, "%s failed to map bdw\n", 2538 nvdimm_name(nvdimm)); 2539 return -ENOMEM; 2540 } 2541 mmio->size = nfit_mem->bdw->size; 2542 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 2543 mmio->idt = nfit_mem->idt_bdw; 2544 mmio->spa = nfit_mem->spa_bdw; 2545 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 2546 nfit_mem->memdev_bdw->interleave_ways); 2547 if (rc) { 2548 dev_dbg(dev, "%s failed to init bdw interleave\n", 2549 nvdimm_name(nvdimm)); 2550 return rc; 2551 } 2552 2553 /* map block control memory */ 2554 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 2555 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 2556 mmio = &nfit_blk->mmio[DCR]; 2557 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, 2558 nfit_mem->spa_dcr->length); 2559 if (!mmio->addr.base) { 2560 dev_dbg(dev, "%s failed to map dcr\n", 2561 nvdimm_name(nvdimm)); 2562 return -ENOMEM; 2563 } 2564 mmio->size = nfit_mem->dcr->window_size; 2565 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 2566 mmio->idt = nfit_mem->idt_dcr; 2567 mmio->spa = nfit_mem->spa_dcr; 2568 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 2569 nfit_mem->memdev_dcr->interleave_ways); 2570 if (rc) { 2571 dev_dbg(dev, "%s failed to init dcr interleave\n", 2572 nvdimm_name(nvdimm)); 2573 return rc; 2574 } 2575 2576 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 2577 if (rc < 0) { 2578 dev_dbg(dev, "%s failed get DIMM flags\n", 2579 nvdimm_name(nvdimm)); 2580 return rc; 2581 } 2582 2583 if (nvdimm_has_flush(nfit_blk->nd_region) < 0) 2584 dev_warn(dev, "unable to guarantee persistence of writes\n"); 2585 2586 if (mmio->line_size == 0) 2587 return 0; 2588 2589 if ((u32) nfit_blk->cmd_offset % mmio->line_size 2590 + 8 > mmio->line_size) { 2591 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 2592 return -ENXIO; 2593 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 2594 + 8 > mmio->line_size) { 2595 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 2596 return -ENXIO; 2597 } 2598 2599 return 0; 2600 } 2601 2602 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 2603 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 2604 { 2605 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2606 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2607 int cmd_rc, rc; 2608 2609 cmd->address = spa->address; 2610 cmd->length = spa->length; 2611 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 2612 sizeof(*cmd), &cmd_rc); 2613 if (rc < 0) 2614 return rc; 2615 return cmd_rc; 2616 } 2617 2618 static int ars_start(struct acpi_nfit_desc *acpi_desc, 2619 struct nfit_spa *nfit_spa, enum nfit_ars_state req_type) 2620 { 2621 int rc; 2622 int cmd_rc; 2623 struct nd_cmd_ars_start ars_start; 2624 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2625 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2626 2627 memset(&ars_start, 0, sizeof(ars_start)); 2628 ars_start.address = spa->address; 2629 ars_start.length = spa->length; 2630 if (req_type == ARS_REQ_SHORT) 2631 ars_start.flags = ND_ARS_RETURN_PREV_DATA; 2632 if (nfit_spa_type(spa) == NFIT_SPA_PM) 2633 ars_start.type = ND_ARS_PERSISTENT; 2634 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) 2635 ars_start.type = ND_ARS_VOLATILE; 2636 else 2637 return -ENOTTY; 2638 2639 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2640 sizeof(ars_start), &cmd_rc); 2641 2642 if (rc < 0) 2643 return rc; 2644 return cmd_rc; 2645 } 2646 2647 static int ars_continue(struct acpi_nfit_desc *acpi_desc) 2648 { 2649 int rc, cmd_rc; 2650 struct nd_cmd_ars_start ars_start; 2651 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2652 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2653 2654 memset(&ars_start, 0, sizeof(ars_start)); 2655 ars_start.address = ars_status->restart_address; 2656 ars_start.length = ars_status->restart_length; 2657 ars_start.type = ars_status->type; 2658 ars_start.flags = acpi_desc->ars_start_flags; 2659 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2660 sizeof(ars_start), &cmd_rc); 2661 if (rc < 0) 2662 return rc; 2663 return cmd_rc; 2664 } 2665 2666 static int ars_get_status(struct acpi_nfit_desc *acpi_desc) 2667 { 2668 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2669 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2670 int rc, cmd_rc; 2671 2672 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, 2673 acpi_desc->max_ars, &cmd_rc); 2674 if (rc < 0) 2675 return rc; 2676 return cmd_rc; 2677 } 2678 2679 static void ars_complete(struct acpi_nfit_desc *acpi_desc, 2680 struct nfit_spa *nfit_spa) 2681 { 2682 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2683 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2684 struct nd_region *nd_region = nfit_spa->nd_region; 2685 struct device *dev; 2686 2687 lockdep_assert_held(&acpi_desc->init_mutex); 2688 /* 2689 * Only advance the ARS state for ARS runs initiated by the 2690 * kernel, ignore ARS results from BIOS initiated runs for scrub 2691 * completion tracking. 2692 */ 2693 if (acpi_desc->scrub_spa != nfit_spa) 2694 return; 2695 2696 if ((ars_status->address >= spa->address && ars_status->address 2697 < spa->address + spa->length) 2698 || (ars_status->address < spa->address)) { 2699 /* 2700 * Assume that if a scrub starts at an offset from the 2701 * start of nfit_spa that we are in the continuation 2702 * case. 2703 * 2704 * Otherwise, if the scrub covers the spa range, mark 2705 * any pending request complete. 2706 */ 2707 if (ars_status->address + ars_status->length 2708 >= spa->address + spa->length) 2709 /* complete */; 2710 else 2711 return; 2712 } else 2713 return; 2714 2715 acpi_desc->scrub_spa = NULL; 2716 if (nd_region) { 2717 dev = nd_region_dev(nd_region); 2718 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON); 2719 } else 2720 dev = acpi_desc->dev; 2721 dev_dbg(dev, "ARS: range %d complete\n", spa->range_index); 2722 } 2723 2724 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) 2725 { 2726 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; 2727 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2728 int rc; 2729 u32 i; 2730 2731 /* 2732 * First record starts at 44 byte offset from the start of the 2733 * payload. 2734 */ 2735 if (ars_status->out_length < 44) 2736 return 0; 2737 for (i = 0; i < ars_status->num_records; i++) { 2738 /* only process full records */ 2739 if (ars_status->out_length 2740 < 44 + sizeof(struct nd_ars_record) * (i + 1)) 2741 break; 2742 rc = nvdimm_bus_add_badrange(nvdimm_bus, 2743 ars_status->records[i].err_address, 2744 ars_status->records[i].length); 2745 if (rc) 2746 return rc; 2747 } 2748 if (i < ars_status->num_records) 2749 dev_warn(acpi_desc->dev, "detected truncated ars results\n"); 2750 2751 return 0; 2752 } 2753 2754 static void acpi_nfit_remove_resource(void *data) 2755 { 2756 struct resource *res = data; 2757 2758 remove_resource(res); 2759 } 2760 2761 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, 2762 struct nd_region_desc *ndr_desc) 2763 { 2764 struct resource *res, *nd_res = ndr_desc->res; 2765 int is_pmem, ret; 2766 2767 /* No operation if the region is already registered as PMEM */ 2768 is_pmem = region_intersects(nd_res->start, resource_size(nd_res), 2769 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); 2770 if (is_pmem == REGION_INTERSECTS) 2771 return 0; 2772 2773 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); 2774 if (!res) 2775 return -ENOMEM; 2776 2777 res->name = "Persistent Memory"; 2778 res->start = nd_res->start; 2779 res->end = nd_res->end; 2780 res->flags = IORESOURCE_MEM; 2781 res->desc = IORES_DESC_PERSISTENT_MEMORY; 2782 2783 ret = insert_resource(&iomem_resource, res); 2784 if (ret) 2785 return ret; 2786 2787 ret = devm_add_action_or_reset(acpi_desc->dev, 2788 acpi_nfit_remove_resource, 2789 res); 2790 if (ret) 2791 return ret; 2792 2793 return 0; 2794 } 2795 2796 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 2797 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, 2798 struct acpi_nfit_memory_map *memdev, 2799 struct nfit_spa *nfit_spa) 2800 { 2801 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 2802 memdev->device_handle); 2803 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2804 struct nd_blk_region_desc *ndbr_desc; 2805 struct nfit_mem *nfit_mem; 2806 int rc; 2807 2808 if (!nvdimm) { 2809 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 2810 spa->range_index, memdev->device_handle); 2811 return -ENODEV; 2812 } 2813 2814 mapping->nvdimm = nvdimm; 2815 switch (nfit_spa_type(spa)) { 2816 case NFIT_SPA_PM: 2817 case NFIT_SPA_VOLATILE: 2818 mapping->start = memdev->address; 2819 mapping->size = memdev->region_size; 2820 break; 2821 case NFIT_SPA_DCR: 2822 nfit_mem = nvdimm_provider_data(nvdimm); 2823 if (!nfit_mem || !nfit_mem->bdw) { 2824 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 2825 spa->range_index, nvdimm_name(nvdimm)); 2826 break; 2827 } 2828 2829 mapping->size = nfit_mem->bdw->capacity; 2830 mapping->start = nfit_mem->bdw->start_address; 2831 ndr_desc->num_lanes = nfit_mem->bdw->windows; 2832 ndr_desc->mapping = mapping; 2833 ndr_desc->num_mappings = 1; 2834 ndbr_desc = to_blk_region_desc(ndr_desc); 2835 ndbr_desc->enable = acpi_nfit_blk_region_enable; 2836 ndbr_desc->do_io = acpi_desc->blk_do_io; 2837 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2838 if (rc) 2839 return rc; 2840 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 2841 ndr_desc); 2842 if (!nfit_spa->nd_region) 2843 return -ENOMEM; 2844 break; 2845 } 2846 2847 return 0; 2848 } 2849 2850 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) 2851 { 2852 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2853 nfit_spa_type(spa) == NFIT_SPA_VCD || 2854 nfit_spa_type(spa) == NFIT_SPA_PDISK || 2855 nfit_spa_type(spa) == NFIT_SPA_PCD); 2856 } 2857 2858 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa) 2859 { 2860 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2861 nfit_spa_type(spa) == NFIT_SPA_VCD || 2862 nfit_spa_type(spa) == NFIT_SPA_VOLATILE); 2863 } 2864 2865 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 2866 struct nfit_spa *nfit_spa) 2867 { 2868 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; 2869 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2870 struct nd_blk_region_desc ndbr_desc; 2871 struct nd_region_desc *ndr_desc; 2872 struct nfit_memdev *nfit_memdev; 2873 struct nvdimm_bus *nvdimm_bus; 2874 struct resource res; 2875 int count = 0, rc; 2876 2877 if (nfit_spa->nd_region) 2878 return 0; 2879 2880 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { 2881 dev_dbg(acpi_desc->dev, "detected invalid spa index\n"); 2882 return 0; 2883 } 2884 2885 memset(&res, 0, sizeof(res)); 2886 memset(&mappings, 0, sizeof(mappings)); 2887 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 2888 res.start = spa->address; 2889 res.end = res.start + spa->length - 1; 2890 ndr_desc = &ndbr_desc.ndr_desc; 2891 ndr_desc->res = &res; 2892 ndr_desc->provider_data = nfit_spa; 2893 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 2894 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 2895 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 2896 spa->proximity_domain); 2897 else 2898 ndr_desc->numa_node = NUMA_NO_NODE; 2899 2900 /* 2901 * Persistence domain bits are hierarchical, if 2902 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then 2903 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. 2904 */ 2905 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) 2906 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); 2907 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) 2908 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); 2909 2910 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2911 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 2912 struct nd_mapping_desc *mapping; 2913 2914 if (memdev->range_index != spa->range_index) 2915 continue; 2916 if (count >= ND_MAX_MAPPINGS) { 2917 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 2918 spa->range_index, ND_MAX_MAPPINGS); 2919 return -ENXIO; 2920 } 2921 mapping = &mappings[count++]; 2922 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, 2923 memdev, nfit_spa); 2924 if (rc) 2925 goto out; 2926 } 2927 2928 ndr_desc->mapping = mappings; 2929 ndr_desc->num_mappings = count; 2930 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2931 if (rc) 2932 goto out; 2933 2934 nvdimm_bus = acpi_desc->nvdimm_bus; 2935 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 2936 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); 2937 if (rc) { 2938 dev_warn(acpi_desc->dev, 2939 "failed to insert pmem resource to iomem: %d\n", 2940 rc); 2941 goto out; 2942 } 2943 2944 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2945 ndr_desc); 2946 if (!nfit_spa->nd_region) 2947 rc = -ENOMEM; 2948 } else if (nfit_spa_is_volatile(spa)) { 2949 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, 2950 ndr_desc); 2951 if (!nfit_spa->nd_region) 2952 rc = -ENOMEM; 2953 } else if (nfit_spa_is_virtual(spa)) { 2954 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2955 ndr_desc); 2956 if (!nfit_spa->nd_region) 2957 rc = -ENOMEM; 2958 } 2959 2960 out: 2961 if (rc) 2962 dev_err(acpi_desc->dev, "failed to register spa range %d\n", 2963 nfit_spa->spa->range_index); 2964 return rc; 2965 } 2966 2967 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc) 2968 { 2969 struct device *dev = acpi_desc->dev; 2970 struct nd_cmd_ars_status *ars_status; 2971 2972 if (acpi_desc->ars_status) { 2973 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2974 return 0; 2975 } 2976 2977 ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL); 2978 if (!ars_status) 2979 return -ENOMEM; 2980 acpi_desc->ars_status = ars_status; 2981 return 0; 2982 } 2983 2984 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) 2985 { 2986 int rc; 2987 2988 if (ars_status_alloc(acpi_desc)) 2989 return -ENOMEM; 2990 2991 rc = ars_get_status(acpi_desc); 2992 2993 if (rc < 0 && rc != -ENOSPC) 2994 return rc; 2995 2996 if (ars_status_process_records(acpi_desc)) 2997 dev_err(acpi_desc->dev, "Failed to process ARS records\n"); 2998 2999 return rc; 3000 } 3001 3002 static int ars_register(struct acpi_nfit_desc *acpi_desc, 3003 struct nfit_spa *nfit_spa) 3004 { 3005 int rc; 3006 3007 if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3008 return acpi_nfit_register_region(acpi_desc, nfit_spa); 3009 3010 set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); 3011 set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); 3012 3013 switch (acpi_nfit_query_poison(acpi_desc)) { 3014 case 0: 3015 case -EAGAIN: 3016 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); 3017 /* shouldn't happen, try again later */ 3018 if (rc == -EBUSY) 3019 break; 3020 if (rc) { 3021 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3022 break; 3023 } 3024 clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); 3025 rc = acpi_nfit_query_poison(acpi_desc); 3026 if (rc) 3027 break; 3028 acpi_desc->scrub_spa = nfit_spa; 3029 ars_complete(acpi_desc, nfit_spa); 3030 /* 3031 * If ars_complete() says we didn't complete the 3032 * short scrub, we'll try again with a long 3033 * request. 3034 */ 3035 acpi_desc->scrub_spa = NULL; 3036 break; 3037 case -EBUSY: 3038 case -ENOMEM: 3039 case -ENOSPC: 3040 /* 3041 * BIOS was using ARS, wait for it to complete (or 3042 * resources to become available) and then perform our 3043 * own scrubs. 3044 */ 3045 break; 3046 default: 3047 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3048 break; 3049 } 3050 3051 return acpi_nfit_register_region(acpi_desc, nfit_spa); 3052 } 3053 3054 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc) 3055 { 3056 struct nfit_spa *nfit_spa; 3057 3058 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3059 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3060 continue; 3061 ars_complete(acpi_desc, nfit_spa); 3062 } 3063 } 3064 3065 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, 3066 int query_rc) 3067 { 3068 unsigned int tmo = acpi_desc->scrub_tmo; 3069 struct device *dev = acpi_desc->dev; 3070 struct nfit_spa *nfit_spa; 3071 3072 lockdep_assert_held(&acpi_desc->init_mutex); 3073 3074 if (acpi_desc->cancel) 3075 return 0; 3076 3077 if (query_rc == -EBUSY) { 3078 dev_dbg(dev, "ARS: ARS busy\n"); 3079 return min(30U * 60U, tmo * 2); 3080 } 3081 if (query_rc == -ENOSPC) { 3082 dev_dbg(dev, "ARS: ARS continue\n"); 3083 ars_continue(acpi_desc); 3084 return 1; 3085 } 3086 if (query_rc && query_rc != -EAGAIN) { 3087 unsigned long long addr, end; 3088 3089 addr = acpi_desc->ars_status->address; 3090 end = addr + acpi_desc->ars_status->length; 3091 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end, 3092 query_rc); 3093 } 3094 3095 ars_complete_all(acpi_desc); 3096 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3097 enum nfit_ars_state req_type; 3098 int rc; 3099 3100 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3101 continue; 3102 3103 /* prefer short ARS requests first */ 3104 if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)) 3105 req_type = ARS_REQ_SHORT; 3106 else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) 3107 req_type = ARS_REQ_LONG; 3108 else 3109 continue; 3110 rc = ars_start(acpi_desc, nfit_spa, req_type); 3111 3112 dev = nd_region_dev(nfit_spa->nd_region); 3113 dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n", 3114 nfit_spa->spa->range_index, 3115 req_type == ARS_REQ_SHORT ? "short" : "long", 3116 rc); 3117 /* 3118 * Hmm, we raced someone else starting ARS? Try again in 3119 * a bit. 3120 */ 3121 if (rc == -EBUSY) 3122 return 1; 3123 if (rc == 0) { 3124 dev_WARN_ONCE(dev, acpi_desc->scrub_spa, 3125 "scrub start while range %d active\n", 3126 acpi_desc->scrub_spa->spa->range_index); 3127 clear_bit(req_type, &nfit_spa->ars_state); 3128 acpi_desc->scrub_spa = nfit_spa; 3129 /* 3130 * Consider this spa last for future scrub 3131 * requests 3132 */ 3133 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 3134 return 1; 3135 } 3136 3137 dev_err(dev, "ARS: range %d ARS failed (%d)\n", 3138 nfit_spa->spa->range_index, rc); 3139 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3140 } 3141 return 0; 3142 } 3143 3144 static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) 3145 { 3146 lockdep_assert_held(&acpi_desc->init_mutex); 3147 3148 acpi_desc->scrub_busy = 1; 3149 /* note this should only be set from within the workqueue */ 3150 if (tmo) 3151 acpi_desc->scrub_tmo = tmo; 3152 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); 3153 } 3154 3155 static void sched_ars(struct acpi_nfit_desc *acpi_desc) 3156 { 3157 __sched_ars(acpi_desc, 0); 3158 } 3159 3160 static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) 3161 { 3162 lockdep_assert_held(&acpi_desc->init_mutex); 3163 3164 acpi_desc->scrub_busy = 0; 3165 acpi_desc->scrub_count++; 3166 if (acpi_desc->scrub_count_state) 3167 sysfs_notify_dirent(acpi_desc->scrub_count_state); 3168 } 3169 3170 static void acpi_nfit_scrub(struct work_struct *work) 3171 { 3172 struct acpi_nfit_desc *acpi_desc; 3173 unsigned int tmo; 3174 int query_rc; 3175 3176 acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); 3177 mutex_lock(&acpi_desc->init_mutex); 3178 query_rc = acpi_nfit_query_poison(acpi_desc); 3179 tmo = __acpi_nfit_scrub(acpi_desc, query_rc); 3180 if (tmo) 3181 __sched_ars(acpi_desc, tmo); 3182 else 3183 notify_ars_done(acpi_desc); 3184 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 3185 mutex_unlock(&acpi_desc->init_mutex); 3186 } 3187 3188 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, 3189 struct nfit_spa *nfit_spa) 3190 { 3191 int type = nfit_spa_type(nfit_spa->spa); 3192 struct nd_cmd_ars_cap ars_cap; 3193 int rc; 3194 3195 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3196 memset(&ars_cap, 0, sizeof(ars_cap)); 3197 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); 3198 if (rc < 0) 3199 return; 3200 /* check that the supported scrub types match the spa type */ 3201 if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16) 3202 & ND_ARS_VOLATILE) == 0) 3203 return; 3204 if (type == NFIT_SPA_PM && ((ars_cap.status >> 16) 3205 & ND_ARS_PERSISTENT) == 0) 3206 return; 3207 3208 nfit_spa->max_ars = ars_cap.max_ars_out; 3209 nfit_spa->clear_err_unit = ars_cap.clear_err_unit; 3210 acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); 3211 clear_bit(ARS_FAILED, &nfit_spa->ars_state); 3212 } 3213 3214 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 3215 { 3216 struct nfit_spa *nfit_spa; 3217 int rc; 3218 3219 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3220 switch (nfit_spa_type(nfit_spa->spa)) { 3221 case NFIT_SPA_VOLATILE: 3222 case NFIT_SPA_PM: 3223 acpi_nfit_init_ars(acpi_desc, nfit_spa); 3224 break; 3225 } 3226 } 3227 3228 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 3229 switch (nfit_spa_type(nfit_spa->spa)) { 3230 case NFIT_SPA_VOLATILE: 3231 case NFIT_SPA_PM: 3232 /* register regions and kick off initial ARS run */ 3233 rc = ars_register(acpi_desc, nfit_spa); 3234 if (rc) 3235 return rc; 3236 break; 3237 case NFIT_SPA_BDW: 3238 /* nothing to register */ 3239 break; 3240 case NFIT_SPA_DCR: 3241 case NFIT_SPA_VDISK: 3242 case NFIT_SPA_VCD: 3243 case NFIT_SPA_PDISK: 3244 case NFIT_SPA_PCD: 3245 /* register known regions that don't support ARS */ 3246 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 3247 if (rc) 3248 return rc; 3249 break; 3250 default: 3251 /* don't register unknown regions */ 3252 break; 3253 } 3254 3255 sched_ars(acpi_desc); 3256 return 0; 3257 } 3258 3259 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 3260 struct nfit_table_prev *prev) 3261 { 3262 struct device *dev = acpi_desc->dev; 3263 3264 if (!list_empty(&prev->spas) || 3265 !list_empty(&prev->memdevs) || 3266 !list_empty(&prev->dcrs) || 3267 !list_empty(&prev->bdws) || 3268 !list_empty(&prev->idts) || 3269 !list_empty(&prev->flushes)) { 3270 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 3271 return -ENXIO; 3272 } 3273 return 0; 3274 } 3275 3276 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) 3277 { 3278 struct device *dev = acpi_desc->dev; 3279 struct kernfs_node *nfit; 3280 struct device *bus_dev; 3281 3282 if (!ars_supported(acpi_desc->nvdimm_bus)) 3283 return 0; 3284 3285 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3286 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); 3287 if (!nfit) { 3288 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); 3289 return -ENODEV; 3290 } 3291 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); 3292 sysfs_put(nfit); 3293 if (!acpi_desc->scrub_count_state) { 3294 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); 3295 return -ENODEV; 3296 } 3297 3298 return 0; 3299 } 3300 3301 static void acpi_nfit_unregister(void *data) 3302 { 3303 struct acpi_nfit_desc *acpi_desc = data; 3304 3305 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 3306 } 3307 3308 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) 3309 { 3310 struct device *dev = acpi_desc->dev; 3311 struct nfit_table_prev prev; 3312 const void *end; 3313 int rc; 3314 3315 if (!acpi_desc->nvdimm_bus) { 3316 acpi_nfit_init_dsms(acpi_desc); 3317 3318 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, 3319 &acpi_desc->nd_desc); 3320 if (!acpi_desc->nvdimm_bus) 3321 return -ENOMEM; 3322 3323 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, 3324 acpi_desc); 3325 if (rc) 3326 return rc; 3327 3328 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); 3329 if (rc) 3330 return rc; 3331 3332 /* register this acpi_desc for mce notifications */ 3333 mutex_lock(&acpi_desc_lock); 3334 list_add_tail(&acpi_desc->list, &acpi_descs); 3335 mutex_unlock(&acpi_desc_lock); 3336 } 3337 3338 mutex_lock(&acpi_desc->init_mutex); 3339 3340 INIT_LIST_HEAD(&prev.spas); 3341 INIT_LIST_HEAD(&prev.memdevs); 3342 INIT_LIST_HEAD(&prev.dcrs); 3343 INIT_LIST_HEAD(&prev.bdws); 3344 INIT_LIST_HEAD(&prev.idts); 3345 INIT_LIST_HEAD(&prev.flushes); 3346 3347 list_cut_position(&prev.spas, &acpi_desc->spas, 3348 acpi_desc->spas.prev); 3349 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 3350 acpi_desc->memdevs.prev); 3351 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 3352 acpi_desc->dcrs.prev); 3353 list_cut_position(&prev.bdws, &acpi_desc->bdws, 3354 acpi_desc->bdws.prev); 3355 list_cut_position(&prev.idts, &acpi_desc->idts, 3356 acpi_desc->idts.prev); 3357 list_cut_position(&prev.flushes, &acpi_desc->flushes, 3358 acpi_desc->flushes.prev); 3359 3360 end = data + sz; 3361 while (!IS_ERR_OR_NULL(data)) 3362 data = add_table(acpi_desc, &prev, data, end); 3363 3364 if (IS_ERR(data)) { 3365 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data)); 3366 rc = PTR_ERR(data); 3367 goto out_unlock; 3368 } 3369 3370 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 3371 if (rc) 3372 goto out_unlock; 3373 3374 rc = nfit_mem_init(acpi_desc); 3375 if (rc) 3376 goto out_unlock; 3377 3378 rc = acpi_nfit_register_dimms(acpi_desc); 3379 if (rc) 3380 goto out_unlock; 3381 3382 rc = acpi_nfit_register_regions(acpi_desc); 3383 3384 out_unlock: 3385 mutex_unlock(&acpi_desc->init_mutex); 3386 return rc; 3387 } 3388 EXPORT_SYMBOL_GPL(acpi_nfit_init); 3389 3390 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3391 { 3392 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 3393 struct device *dev = acpi_desc->dev; 3394 3395 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3396 device_lock(dev); 3397 device_unlock(dev); 3398 3399 /* Bounce the init_mutex to complete initial registration */ 3400 mutex_lock(&acpi_desc->init_mutex); 3401 mutex_unlock(&acpi_desc->init_mutex); 3402 3403 return 0; 3404 } 3405 3406 static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3407 struct nvdimm *nvdimm, unsigned int cmd) 3408 { 3409 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 3410 3411 if (nvdimm) 3412 return 0; 3413 if (cmd != ND_CMD_ARS_START) 3414 return 0; 3415 3416 /* 3417 * The kernel and userspace may race to initiate a scrub, but 3418 * the scrub thread is prepared to lose that initial race. It 3419 * just needs guarantees that any ARS it initiates are not 3420 * interrupted by any intervening start requests from userspace. 3421 */ 3422 if (work_busy(&acpi_desc->dwork.work)) 3423 return -EBUSY; 3424 3425 return 0; 3426 } 3427 3428 /* prevent security commands from being issued via ioctl */ 3429 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3430 struct nvdimm *nvdimm, unsigned int cmd, void *buf) 3431 { 3432 struct nd_cmd_pkg *call_pkg = buf; 3433 unsigned int func; 3434 3435 if (nvdimm && cmd == ND_CMD_CALL && 3436 call_pkg->nd_family == NVDIMM_FAMILY_INTEL) { 3437 func = call_pkg->nd_command; 3438 if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK) 3439 return -EOPNOTSUPP; 3440 } 3441 3442 return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd); 3443 } 3444 3445 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, 3446 enum nfit_ars_state req_type) 3447 { 3448 struct device *dev = acpi_desc->dev; 3449 int scheduled = 0, busy = 0; 3450 struct nfit_spa *nfit_spa; 3451 3452 mutex_lock(&acpi_desc->init_mutex); 3453 if (acpi_desc->cancel) { 3454 mutex_unlock(&acpi_desc->init_mutex); 3455 return 0; 3456 } 3457 3458 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3459 int type = nfit_spa_type(nfit_spa->spa); 3460 3461 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE) 3462 continue; 3463 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3464 continue; 3465 3466 if (test_and_set_bit(req_type, &nfit_spa->ars_state)) 3467 busy++; 3468 else 3469 scheduled++; 3470 } 3471 if (scheduled) { 3472 sched_ars(acpi_desc); 3473 dev_dbg(dev, "ars_scan triggered\n"); 3474 } 3475 mutex_unlock(&acpi_desc->init_mutex); 3476 3477 if (scheduled) 3478 return 0; 3479 if (busy) 3480 return -EBUSY; 3481 return -ENOTTY; 3482 } 3483 3484 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 3485 { 3486 struct nvdimm_bus_descriptor *nd_desc; 3487 3488 dev_set_drvdata(dev, acpi_desc); 3489 acpi_desc->dev = dev; 3490 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 3491 nd_desc = &acpi_desc->nd_desc; 3492 nd_desc->provider_name = "ACPI.NFIT"; 3493 nd_desc->module = THIS_MODULE; 3494 nd_desc->ndctl = acpi_nfit_ctl; 3495 nd_desc->flush_probe = acpi_nfit_flush_probe; 3496 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 3497 nd_desc->attr_groups = acpi_nfit_attribute_groups; 3498 3499 INIT_LIST_HEAD(&acpi_desc->spas); 3500 INIT_LIST_HEAD(&acpi_desc->dcrs); 3501 INIT_LIST_HEAD(&acpi_desc->bdws); 3502 INIT_LIST_HEAD(&acpi_desc->idts); 3503 INIT_LIST_HEAD(&acpi_desc->flushes); 3504 INIT_LIST_HEAD(&acpi_desc->memdevs); 3505 INIT_LIST_HEAD(&acpi_desc->dimms); 3506 INIT_LIST_HEAD(&acpi_desc->list); 3507 mutex_init(&acpi_desc->init_mutex); 3508 acpi_desc->scrub_tmo = 1; 3509 INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub); 3510 } 3511 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); 3512 3513 static void acpi_nfit_put_table(void *table) 3514 { 3515 acpi_put_table(table); 3516 } 3517 3518 void acpi_nfit_shutdown(void *data) 3519 { 3520 struct acpi_nfit_desc *acpi_desc = data; 3521 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3522 3523 /* 3524 * Destruct under acpi_desc_lock so that nfit_handle_mce does not 3525 * race teardown 3526 */ 3527 mutex_lock(&acpi_desc_lock); 3528 list_del(&acpi_desc->list); 3529 mutex_unlock(&acpi_desc_lock); 3530 3531 mutex_lock(&acpi_desc->init_mutex); 3532 acpi_desc->cancel = 1; 3533 cancel_delayed_work_sync(&acpi_desc->dwork); 3534 mutex_unlock(&acpi_desc->init_mutex); 3535 3536 /* 3537 * Bounce the nvdimm bus lock to make sure any in-flight 3538 * acpi_nfit_ars_rescan() submissions have had a chance to 3539 * either submit or see ->cancel set. 3540 */ 3541 device_lock(bus_dev); 3542 device_unlock(bus_dev); 3543 3544 flush_workqueue(nfit_wq); 3545 } 3546 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); 3547 3548 static int acpi_nfit_add(struct acpi_device *adev) 3549 { 3550 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3551 struct acpi_nfit_desc *acpi_desc; 3552 struct device *dev = &adev->dev; 3553 struct acpi_table_header *tbl; 3554 acpi_status status = AE_OK; 3555 acpi_size sz; 3556 int rc = 0; 3557 3558 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); 3559 if (ACPI_FAILURE(status)) { 3560 /* The NVDIMM root device allows OS to trigger enumeration of 3561 * NVDIMMs through NFIT at boot time and re-enumeration at 3562 * root level via the _FIT method during runtime. 3563 * This is ok to return 0 here, we could have an nvdimm 3564 * hotplugged later and evaluate _FIT method which returns 3565 * data in the format of a series of NFIT Structures. 3566 */ 3567 dev_dbg(dev, "failed to find NFIT at startup\n"); 3568 return 0; 3569 } 3570 3571 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); 3572 if (rc) 3573 return rc; 3574 sz = tbl->length; 3575 3576 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3577 if (!acpi_desc) 3578 return -ENOMEM; 3579 acpi_nfit_desc_init(acpi_desc, &adev->dev); 3580 3581 /* Save the acpi header for exporting the revision via sysfs */ 3582 acpi_desc->acpi_header = *tbl; 3583 3584 /* Evaluate _FIT and override with that if present */ 3585 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 3586 if (ACPI_SUCCESS(status) && buf.length > 0) { 3587 union acpi_object *obj = buf.pointer; 3588 3589 if (obj->type == ACPI_TYPE_BUFFER) 3590 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3591 obj->buffer.length); 3592 else 3593 dev_dbg(dev, "invalid type %d, ignoring _FIT\n", 3594 (int) obj->type); 3595 kfree(buf.pointer); 3596 } else 3597 /* skip over the lead-in header table */ 3598 rc = acpi_nfit_init(acpi_desc, (void *) tbl 3599 + sizeof(struct acpi_table_nfit), 3600 sz - sizeof(struct acpi_table_nfit)); 3601 3602 if (rc) 3603 return rc; 3604 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); 3605 } 3606 3607 static int acpi_nfit_remove(struct acpi_device *adev) 3608 { 3609 /* see acpi_nfit_unregister */ 3610 return 0; 3611 } 3612 3613 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) 3614 { 3615 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3616 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3617 union acpi_object *obj; 3618 acpi_status status; 3619 int ret; 3620 3621 if (!dev->driver) { 3622 /* dev->driver may be null if we're being removed */ 3623 dev_dbg(dev, "no driver found for dev\n"); 3624 return; 3625 } 3626 3627 if (!acpi_desc) { 3628 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3629 if (!acpi_desc) 3630 return; 3631 acpi_nfit_desc_init(acpi_desc, dev); 3632 } else { 3633 /* 3634 * Finish previous registration before considering new 3635 * regions. 3636 */ 3637 flush_workqueue(nfit_wq); 3638 } 3639 3640 /* Evaluate _FIT */ 3641 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); 3642 if (ACPI_FAILURE(status)) { 3643 dev_err(dev, "failed to evaluate _FIT\n"); 3644 return; 3645 } 3646 3647 obj = buf.pointer; 3648 if (obj->type == ACPI_TYPE_BUFFER) { 3649 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3650 obj->buffer.length); 3651 if (ret) 3652 dev_err(dev, "failed to merge updated NFIT\n"); 3653 } else 3654 dev_err(dev, "Invalid _FIT\n"); 3655 kfree(buf.pointer); 3656 } 3657 3658 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) 3659 { 3660 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3661 3662 if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) 3663 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); 3664 else 3665 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT); 3666 } 3667 3668 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) 3669 { 3670 dev_dbg(dev, "event: 0x%x\n", event); 3671 3672 switch (event) { 3673 case NFIT_NOTIFY_UPDATE: 3674 return acpi_nfit_update_notify(dev, handle); 3675 case NFIT_NOTIFY_UC_MEMORY_ERROR: 3676 return acpi_nfit_uc_error_notify(dev, handle); 3677 default: 3678 return; 3679 } 3680 } 3681 EXPORT_SYMBOL_GPL(__acpi_nfit_notify); 3682 3683 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 3684 { 3685 device_lock(&adev->dev); 3686 __acpi_nfit_notify(&adev->dev, adev->handle, event); 3687 device_unlock(&adev->dev); 3688 } 3689 3690 static const struct acpi_device_id acpi_nfit_ids[] = { 3691 { "ACPI0012", 0 }, 3692 { "", 0 }, 3693 }; 3694 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 3695 3696 static struct acpi_driver acpi_nfit_driver = { 3697 .name = KBUILD_MODNAME, 3698 .ids = acpi_nfit_ids, 3699 .ops = { 3700 .add = acpi_nfit_add, 3701 .remove = acpi_nfit_remove, 3702 .notify = acpi_nfit_notify, 3703 }, 3704 }; 3705 3706 static __init int nfit_init(void) 3707 { 3708 int ret; 3709 3710 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 3711 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 3712 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 3713 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 3714 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 3715 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 3716 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 3717 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); 3718 3719 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]); 3720 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]); 3721 guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]); 3722 guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]); 3723 guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]); 3724 guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]); 3725 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]); 3726 guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]); 3727 guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]); 3728 guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]); 3729 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 3730 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 3731 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); 3732 3733 nfit_wq = create_singlethread_workqueue("nfit"); 3734 if (!nfit_wq) 3735 return -ENOMEM; 3736 3737 nfit_mce_register(); 3738 ret = acpi_bus_register_driver(&acpi_nfit_driver); 3739 if (ret) { 3740 nfit_mce_unregister(); 3741 destroy_workqueue(nfit_wq); 3742 } 3743 3744 return ret; 3745 3746 } 3747 3748 static __exit void nfit_exit(void) 3749 { 3750 nfit_mce_unregister(); 3751 acpi_bus_unregister_driver(&acpi_nfit_driver); 3752 destroy_workqueue(nfit_wq); 3753 WARN_ON(!list_empty(&acpi_descs)); 3754 } 3755 3756 module_init(nfit_init); 3757 module_exit(nfit_exit); 3758 MODULE_LICENSE("GPL v2"); 3759 MODULE_AUTHOR("Intel Corporation"); 3760