1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/libnvdimm.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/ndctl.h> 18 #include <linux/sysfs.h> 19 #include <linux/delay.h> 20 #include <linux/list.h> 21 #include <linux/acpi.h> 22 #include <linux/sort.h> 23 #include <linux/io.h> 24 #include <linux/nd.h> 25 #include <asm/cacheflush.h> 26 #include <acpi/nfit.h> 27 #include "nfit.h" 28 29 /* 30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 31 * irrelevant. 32 */ 33 #include <linux/io-64-nonatomic-hi-lo.h> 34 35 static bool force_enable_dimms; 36 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 37 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 38 39 static bool disable_vendor_specific; 40 module_param(disable_vendor_specific, bool, S_IRUGO); 41 MODULE_PARM_DESC(disable_vendor_specific, 42 "Limit commands to the publicly specified set"); 43 44 static unsigned long override_dsm_mask; 45 module_param(override_dsm_mask, ulong, S_IRUGO); 46 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); 47 48 static int default_dsm_family = -1; 49 module_param(default_dsm_family, int, S_IRUGO); 50 MODULE_PARM_DESC(default_dsm_family, 51 "Try this DSM type first when identifying NVDIMM family"); 52 53 static bool no_init_ars; 54 module_param(no_init_ars, bool, 0644); 55 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); 56 57 LIST_HEAD(acpi_descs); 58 DEFINE_MUTEX(acpi_desc_lock); 59 60 static struct workqueue_struct *nfit_wq; 61 62 struct nfit_table_prev { 63 struct list_head spas; 64 struct list_head memdevs; 65 struct list_head dcrs; 66 struct list_head bdws; 67 struct list_head idts; 68 struct list_head flushes; 69 }; 70 71 static guid_t nfit_uuid[NFIT_UUID_MAX]; 72 73 const guid_t *to_nfit_uuid(enum nfit_uuids id) 74 { 75 return &nfit_uuid[id]; 76 } 77 EXPORT_SYMBOL(to_nfit_uuid); 78 79 static struct acpi_nfit_desc *to_acpi_nfit_desc( 80 struct nvdimm_bus_descriptor *nd_desc) 81 { 82 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 83 } 84 85 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 86 { 87 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 88 89 /* 90 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 91 * acpi_device. 92 */ 93 if (!nd_desc->provider_name 94 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 95 return NULL; 96 97 return to_acpi_device(acpi_desc->dev); 98 } 99 100 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) 101 { 102 struct nd_cmd_clear_error *clear_err; 103 struct nd_cmd_ars_status *ars_status; 104 u16 flags; 105 106 switch (cmd) { 107 case ND_CMD_ARS_CAP: 108 if ((status & 0xffff) == NFIT_ARS_CAP_NONE) 109 return -ENOTTY; 110 111 /* Command failed */ 112 if (status & 0xffff) 113 return -EIO; 114 115 /* No supported scan types for this range */ 116 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 117 if ((status >> 16 & flags) == 0) 118 return -ENOTTY; 119 return 0; 120 case ND_CMD_ARS_START: 121 /* ARS is in progress */ 122 if ((status & 0xffff) == NFIT_ARS_START_BUSY) 123 return -EBUSY; 124 125 /* Command failed */ 126 if (status & 0xffff) 127 return -EIO; 128 return 0; 129 case ND_CMD_ARS_STATUS: 130 ars_status = buf; 131 /* Command failed */ 132 if (status & 0xffff) 133 return -EIO; 134 /* Check extended status (Upper two bytes) */ 135 if (status == NFIT_ARS_STATUS_DONE) 136 return 0; 137 138 /* ARS is in progress */ 139 if (status == NFIT_ARS_STATUS_BUSY) 140 return -EBUSY; 141 142 /* No ARS performed for the current boot */ 143 if (status == NFIT_ARS_STATUS_NONE) 144 return -EAGAIN; 145 146 /* 147 * ARS interrupted, either we overflowed or some other 148 * agent wants the scan to stop. If we didn't overflow 149 * then just continue with the returned results. 150 */ 151 if (status == NFIT_ARS_STATUS_INTR) { 152 if (ars_status->out_length >= 40 && (ars_status->flags 153 & NFIT_ARS_F_OVERFLOW)) 154 return -ENOSPC; 155 return 0; 156 } 157 158 /* Unknown status */ 159 if (status >> 16) 160 return -EIO; 161 return 0; 162 case ND_CMD_CLEAR_ERROR: 163 clear_err = buf; 164 if (status & 0xffff) 165 return -EIO; 166 if (!clear_err->cleared) 167 return -EIO; 168 if (clear_err->length > clear_err->cleared) 169 return clear_err->cleared; 170 return 0; 171 default: 172 break; 173 } 174 175 /* all other non-zero status results in an error */ 176 if (status) 177 return -EIO; 178 return 0; 179 } 180 181 #define ACPI_LABELS_LOCKED 3 182 183 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 184 u32 status) 185 { 186 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 187 188 switch (cmd) { 189 case ND_CMD_GET_CONFIG_SIZE: 190 /* 191 * In the _LSI, _LSR, _LSW case the locked status is 192 * communicated via the read/write commands 193 */ 194 if (nfit_mem->has_lsr) 195 break; 196 197 if (status >> 16 & ND_CONFIG_LOCKED) 198 return -EACCES; 199 break; 200 case ND_CMD_GET_CONFIG_DATA: 201 if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED) 202 return -EACCES; 203 break; 204 case ND_CMD_SET_CONFIG_DATA: 205 if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED) 206 return -EACCES; 207 break; 208 default: 209 break; 210 } 211 212 /* all other non-zero status results in an error */ 213 if (status) 214 return -EIO; 215 return 0; 216 } 217 218 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 219 u32 status) 220 { 221 if (!nvdimm) 222 return xlat_bus_status(buf, cmd, status); 223 return xlat_nvdimm_status(nvdimm, buf, cmd, status); 224 } 225 226 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */ 227 static union acpi_object *pkg_to_buf(union acpi_object *pkg) 228 { 229 int i; 230 void *dst; 231 size_t size = 0; 232 union acpi_object *buf = NULL; 233 234 if (pkg->type != ACPI_TYPE_PACKAGE) { 235 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 236 pkg->type); 237 goto err; 238 } 239 240 for (i = 0; i < pkg->package.count; i++) { 241 union acpi_object *obj = &pkg->package.elements[i]; 242 243 if (obj->type == ACPI_TYPE_INTEGER) 244 size += 4; 245 else if (obj->type == ACPI_TYPE_BUFFER) 246 size += obj->buffer.length; 247 else { 248 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 249 obj->type); 250 goto err; 251 } 252 } 253 254 buf = ACPI_ALLOCATE(sizeof(*buf) + size); 255 if (!buf) 256 goto err; 257 258 dst = buf + 1; 259 buf->type = ACPI_TYPE_BUFFER; 260 buf->buffer.length = size; 261 buf->buffer.pointer = dst; 262 for (i = 0; i < pkg->package.count; i++) { 263 union acpi_object *obj = &pkg->package.elements[i]; 264 265 if (obj->type == ACPI_TYPE_INTEGER) { 266 memcpy(dst, &obj->integer.value, 4); 267 dst += 4; 268 } else if (obj->type == ACPI_TYPE_BUFFER) { 269 memcpy(dst, obj->buffer.pointer, obj->buffer.length); 270 dst += obj->buffer.length; 271 } 272 } 273 err: 274 ACPI_FREE(pkg); 275 return buf; 276 } 277 278 static union acpi_object *int_to_buf(union acpi_object *integer) 279 { 280 union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4); 281 void *dst = NULL; 282 283 if (!buf) 284 goto err; 285 286 if (integer->type != ACPI_TYPE_INTEGER) { 287 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 288 integer->type); 289 goto err; 290 } 291 292 dst = buf + 1; 293 buf->type = ACPI_TYPE_BUFFER; 294 buf->buffer.length = 4; 295 buf->buffer.pointer = dst; 296 memcpy(dst, &integer->integer.value, 4); 297 err: 298 ACPI_FREE(integer); 299 return buf; 300 } 301 302 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset, 303 u32 len, void *data) 304 { 305 acpi_status rc; 306 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 307 struct acpi_object_list input = { 308 .count = 3, 309 .pointer = (union acpi_object []) { 310 [0] = { 311 .integer.type = ACPI_TYPE_INTEGER, 312 .integer.value = offset, 313 }, 314 [1] = { 315 .integer.type = ACPI_TYPE_INTEGER, 316 .integer.value = len, 317 }, 318 [2] = { 319 .buffer.type = ACPI_TYPE_BUFFER, 320 .buffer.pointer = data, 321 .buffer.length = len, 322 }, 323 }, 324 }; 325 326 rc = acpi_evaluate_object(handle, "_LSW", &input, &buf); 327 if (ACPI_FAILURE(rc)) 328 return NULL; 329 return int_to_buf(buf.pointer); 330 } 331 332 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset, 333 u32 len) 334 { 335 acpi_status rc; 336 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 337 struct acpi_object_list input = { 338 .count = 2, 339 .pointer = (union acpi_object []) { 340 [0] = { 341 .integer.type = ACPI_TYPE_INTEGER, 342 .integer.value = offset, 343 }, 344 [1] = { 345 .integer.type = ACPI_TYPE_INTEGER, 346 .integer.value = len, 347 }, 348 }, 349 }; 350 351 rc = acpi_evaluate_object(handle, "_LSR", &input, &buf); 352 if (ACPI_FAILURE(rc)) 353 return NULL; 354 return pkg_to_buf(buf.pointer); 355 } 356 357 static union acpi_object *acpi_label_info(acpi_handle handle) 358 { 359 acpi_status rc; 360 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 361 362 rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf); 363 if (ACPI_FAILURE(rc)) 364 return NULL; 365 return pkg_to_buf(buf.pointer); 366 } 367 368 static u8 nfit_dsm_revid(unsigned family, unsigned func) 369 { 370 static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { 371 [NVDIMM_FAMILY_INTEL] = { 372 [NVDIMM_INTEL_GET_MODES] = 2, 373 [NVDIMM_INTEL_GET_FWINFO] = 2, 374 [NVDIMM_INTEL_START_FWUPDATE] = 2, 375 [NVDIMM_INTEL_SEND_FWUPDATE] = 2, 376 [NVDIMM_INTEL_FINISH_FWUPDATE] = 2, 377 [NVDIMM_INTEL_QUERY_FWUPDATE] = 2, 378 [NVDIMM_INTEL_SET_THRESHOLD] = 2, 379 [NVDIMM_INTEL_INJECT_ERROR] = 2, 380 }, 381 }; 382 u8 id; 383 384 if (family > NVDIMM_FAMILY_MAX) 385 return 0; 386 if (func > 31) 387 return 0; 388 id = revid_table[family][func]; 389 if (id == 0) 390 return 1; /* default */ 391 return id; 392 } 393 394 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 395 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 396 { 397 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 398 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 399 union acpi_object in_obj, in_buf, *out_obj; 400 const struct nd_cmd_desc *desc = NULL; 401 struct device *dev = acpi_desc->dev; 402 struct nd_cmd_pkg *call_pkg = NULL; 403 const char *cmd_name, *dimm_name; 404 unsigned long cmd_mask, dsm_mask; 405 u32 offset, fw_status = 0; 406 acpi_handle handle; 407 unsigned int func; 408 const guid_t *guid; 409 int rc, i; 410 411 func = cmd; 412 if (cmd == ND_CMD_CALL) { 413 call_pkg = buf; 414 func = call_pkg->nd_command; 415 416 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) 417 if (call_pkg->nd_reserved2[i]) 418 return -EINVAL; 419 } 420 421 if (nvdimm) { 422 struct acpi_device *adev = nfit_mem->adev; 423 424 if (!adev) 425 return -ENOTTY; 426 if (call_pkg && nfit_mem->family != call_pkg->nd_family) 427 return -ENOTTY; 428 429 dimm_name = nvdimm_name(nvdimm); 430 cmd_name = nvdimm_cmd_name(cmd); 431 cmd_mask = nvdimm_cmd_mask(nvdimm); 432 dsm_mask = nfit_mem->dsm_mask; 433 desc = nd_cmd_dimm_desc(cmd); 434 guid = to_nfit_uuid(nfit_mem->family); 435 handle = adev->handle; 436 } else { 437 struct acpi_device *adev = to_acpi_dev(acpi_desc); 438 439 cmd_name = nvdimm_bus_cmd_name(cmd); 440 cmd_mask = nd_desc->cmd_mask; 441 dsm_mask = cmd_mask; 442 if (cmd == ND_CMD_CALL) 443 dsm_mask = nd_desc->bus_dsm_mask; 444 desc = nd_cmd_bus_desc(cmd); 445 guid = to_nfit_uuid(NFIT_DEV_BUS); 446 handle = adev->handle; 447 dimm_name = "bus"; 448 } 449 450 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 451 return -ENOTTY; 452 453 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) 454 return -ENOTTY; 455 456 in_obj.type = ACPI_TYPE_PACKAGE; 457 in_obj.package.count = 1; 458 in_obj.package.elements = &in_buf; 459 in_buf.type = ACPI_TYPE_BUFFER; 460 in_buf.buffer.pointer = buf; 461 in_buf.buffer.length = 0; 462 463 /* libnvdimm has already validated the input envelope */ 464 for (i = 0; i < desc->in_num; i++) 465 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 466 i, buf); 467 468 if (call_pkg) { 469 /* skip over package wrapper */ 470 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; 471 in_buf.buffer.length = call_pkg->nd_size_in; 472 } 473 474 dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n", 475 dimm_name, cmd, func, in_buf.buffer.length); 476 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, 477 in_buf.buffer.pointer, 478 min_t(u32, 256, in_buf.buffer.length), true); 479 480 /* call the BIOS, prefer the named methods over _DSM if available */ 481 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr) 482 out_obj = acpi_label_info(handle); 483 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) { 484 struct nd_cmd_get_config_data_hdr *p = buf; 485 486 out_obj = acpi_label_read(handle, p->in_offset, p->in_length); 487 } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA 488 && nfit_mem->has_lsw) { 489 struct nd_cmd_set_config_hdr *p = buf; 490 491 out_obj = acpi_label_write(handle, p->in_offset, p->in_length, 492 p->in_buf); 493 } else { 494 u8 revid; 495 496 if (nvdimm) 497 revid = nfit_dsm_revid(nfit_mem->family, func); 498 else 499 revid = 1; 500 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 501 } 502 503 if (!out_obj) { 504 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name); 505 return -EINVAL; 506 } 507 508 if (call_pkg) { 509 call_pkg->nd_fw_size = out_obj->buffer.length; 510 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, 511 out_obj->buffer.pointer, 512 min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); 513 514 ACPI_FREE(out_obj); 515 /* 516 * Need to support FW function w/o known size in advance. 517 * Caller can determine required size based upon nd_fw_size. 518 * If we return an error (like elsewhere) then caller wouldn't 519 * be able to rely upon data returned to make calculation. 520 */ 521 return 0; 522 } 523 524 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 525 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", 526 dimm_name, cmd_name, out_obj->type); 527 rc = -EINVAL; 528 goto out; 529 } 530 531 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, 532 cmd_name, out_obj->buffer.length); 533 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, 534 out_obj->buffer.pointer, 535 min_t(u32, 128, out_obj->buffer.length), true); 536 537 for (i = 0, offset = 0; i < desc->out_num; i++) { 538 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 539 (u32 *) out_obj->buffer.pointer, 540 out_obj->buffer.length - offset); 541 542 if (offset + out_size > out_obj->buffer.length) { 543 dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n", 544 dimm_name, cmd_name, i); 545 break; 546 } 547 548 if (in_buf.buffer.length + offset + out_size > buf_len) { 549 dev_dbg(dev, "%s output overrun cmd: %s field: %d\n", 550 dimm_name, cmd_name, i); 551 rc = -ENXIO; 552 goto out; 553 } 554 memcpy(buf + in_buf.buffer.length + offset, 555 out_obj->buffer.pointer + offset, out_size); 556 offset += out_size; 557 } 558 559 /* 560 * Set fw_status for all the commands with a known format to be 561 * later interpreted by xlat_status(). 562 */ 563 if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP 564 && cmd <= ND_CMD_CLEAR_ERROR) 565 || (nvdimm && cmd >= ND_CMD_SMART 566 && cmd <= ND_CMD_VENDOR))) 567 fw_status = *(u32 *) out_obj->buffer.pointer; 568 569 if (offset + in_buf.buffer.length < buf_len) { 570 if (i >= 1) { 571 /* 572 * status valid, return the number of bytes left 573 * unfilled in the output buffer 574 */ 575 rc = buf_len - offset - in_buf.buffer.length; 576 if (cmd_rc) 577 *cmd_rc = xlat_status(nvdimm, buf, cmd, 578 fw_status); 579 } else { 580 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 581 __func__, dimm_name, cmd_name, buf_len, 582 offset); 583 rc = -ENXIO; 584 } 585 } else { 586 rc = 0; 587 if (cmd_rc) 588 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); 589 } 590 591 out: 592 ACPI_FREE(out_obj); 593 594 return rc; 595 } 596 EXPORT_SYMBOL_GPL(acpi_nfit_ctl); 597 598 static const char *spa_type_name(u16 type) 599 { 600 static const char *to_name[] = { 601 [NFIT_SPA_VOLATILE] = "volatile", 602 [NFIT_SPA_PM] = "pmem", 603 [NFIT_SPA_DCR] = "dimm-control-region", 604 [NFIT_SPA_BDW] = "block-data-window", 605 [NFIT_SPA_VDISK] = "volatile-disk", 606 [NFIT_SPA_VCD] = "volatile-cd", 607 [NFIT_SPA_PDISK] = "persistent-disk", 608 [NFIT_SPA_PCD] = "persistent-cd", 609 610 }; 611 612 if (type > NFIT_SPA_PCD) 613 return "unknown"; 614 615 return to_name[type]; 616 } 617 618 int nfit_spa_type(struct acpi_nfit_system_address *spa) 619 { 620 int i; 621 622 for (i = 0; i < NFIT_UUID_MAX; i++) 623 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid)) 624 return i; 625 return -1; 626 } 627 628 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 629 struct nfit_table_prev *prev, 630 struct acpi_nfit_system_address *spa) 631 { 632 struct device *dev = acpi_desc->dev; 633 struct nfit_spa *nfit_spa; 634 635 if (spa->header.length != sizeof(*spa)) 636 return false; 637 638 list_for_each_entry(nfit_spa, &prev->spas, list) { 639 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 640 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 641 return true; 642 } 643 } 644 645 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), 646 GFP_KERNEL); 647 if (!nfit_spa) 648 return false; 649 INIT_LIST_HEAD(&nfit_spa->list); 650 memcpy(nfit_spa->spa, spa, sizeof(*spa)); 651 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 652 dev_dbg(dev, "spa index: %d type: %s\n", 653 spa->range_index, 654 spa_type_name(nfit_spa_type(spa))); 655 return true; 656 } 657 658 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 659 struct nfit_table_prev *prev, 660 struct acpi_nfit_memory_map *memdev) 661 { 662 struct device *dev = acpi_desc->dev; 663 struct nfit_memdev *nfit_memdev; 664 665 if (memdev->header.length != sizeof(*memdev)) 666 return false; 667 668 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 669 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 670 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 671 return true; 672 } 673 674 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), 675 GFP_KERNEL); 676 if (!nfit_memdev) 677 return false; 678 INIT_LIST_HEAD(&nfit_memdev->list); 679 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); 680 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 681 dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n", 682 memdev->device_handle, memdev->range_index, 683 memdev->region_index, memdev->flags); 684 return true; 685 } 686 687 int nfit_get_smbios_id(u32 device_handle, u16 *flags) 688 { 689 struct acpi_nfit_memory_map *memdev; 690 struct acpi_nfit_desc *acpi_desc; 691 struct nfit_mem *nfit_mem; 692 693 mutex_lock(&acpi_desc_lock); 694 list_for_each_entry(acpi_desc, &acpi_descs, list) { 695 mutex_lock(&acpi_desc->init_mutex); 696 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 697 memdev = __to_nfit_memdev(nfit_mem); 698 if (memdev->device_handle == device_handle) { 699 mutex_unlock(&acpi_desc->init_mutex); 700 mutex_unlock(&acpi_desc_lock); 701 *flags = memdev->flags; 702 return memdev->physical_id; 703 } 704 } 705 mutex_unlock(&acpi_desc->init_mutex); 706 } 707 mutex_unlock(&acpi_desc_lock); 708 709 return -ENODEV; 710 } 711 EXPORT_SYMBOL_GPL(nfit_get_smbios_id); 712 713 /* 714 * An implementation may provide a truncated control region if no block windows 715 * are defined. 716 */ 717 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) 718 { 719 if (dcr->header.length < offsetof(struct acpi_nfit_control_region, 720 window_size)) 721 return 0; 722 if (dcr->windows) 723 return sizeof(*dcr); 724 return offsetof(struct acpi_nfit_control_region, window_size); 725 } 726 727 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 728 struct nfit_table_prev *prev, 729 struct acpi_nfit_control_region *dcr) 730 { 731 struct device *dev = acpi_desc->dev; 732 struct nfit_dcr *nfit_dcr; 733 734 if (!sizeof_dcr(dcr)) 735 return false; 736 737 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 738 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { 739 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 740 return true; 741 } 742 743 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), 744 GFP_KERNEL); 745 if (!nfit_dcr) 746 return false; 747 INIT_LIST_HEAD(&nfit_dcr->list); 748 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); 749 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 750 dev_dbg(dev, "dcr index: %d windows: %d\n", 751 dcr->region_index, dcr->windows); 752 return true; 753 } 754 755 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 756 struct nfit_table_prev *prev, 757 struct acpi_nfit_data_region *bdw) 758 { 759 struct device *dev = acpi_desc->dev; 760 struct nfit_bdw *nfit_bdw; 761 762 if (bdw->header.length != sizeof(*bdw)) 763 return false; 764 list_for_each_entry(nfit_bdw, &prev->bdws, list) 765 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 766 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 767 return true; 768 } 769 770 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), 771 GFP_KERNEL); 772 if (!nfit_bdw) 773 return false; 774 INIT_LIST_HEAD(&nfit_bdw->list); 775 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); 776 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 777 dev_dbg(dev, "bdw dcr: %d windows: %d\n", 778 bdw->region_index, bdw->windows); 779 return true; 780 } 781 782 static size_t sizeof_idt(struct acpi_nfit_interleave *idt) 783 { 784 if (idt->header.length < sizeof(*idt)) 785 return 0; 786 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); 787 } 788 789 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 790 struct nfit_table_prev *prev, 791 struct acpi_nfit_interleave *idt) 792 { 793 struct device *dev = acpi_desc->dev; 794 struct nfit_idt *nfit_idt; 795 796 if (!sizeof_idt(idt)) 797 return false; 798 799 list_for_each_entry(nfit_idt, &prev->idts, list) { 800 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) 801 continue; 802 803 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { 804 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 805 return true; 806 } 807 } 808 809 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), 810 GFP_KERNEL); 811 if (!nfit_idt) 812 return false; 813 INIT_LIST_HEAD(&nfit_idt->list); 814 memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); 815 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 816 dev_dbg(dev, "idt index: %d num_lines: %d\n", 817 idt->interleave_index, idt->line_count); 818 return true; 819 } 820 821 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) 822 { 823 if (flush->header.length < sizeof(*flush)) 824 return 0; 825 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); 826 } 827 828 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 829 struct nfit_table_prev *prev, 830 struct acpi_nfit_flush_address *flush) 831 { 832 struct device *dev = acpi_desc->dev; 833 struct nfit_flush *nfit_flush; 834 835 if (!sizeof_flush(flush)) 836 return false; 837 838 list_for_each_entry(nfit_flush, &prev->flushes, list) { 839 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) 840 continue; 841 842 if (memcmp(nfit_flush->flush, flush, 843 sizeof_flush(flush)) == 0) { 844 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 845 return true; 846 } 847 } 848 849 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) 850 + sizeof_flush(flush), GFP_KERNEL); 851 if (!nfit_flush) 852 return false; 853 INIT_LIST_HEAD(&nfit_flush->list); 854 memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); 855 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 856 dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n", 857 flush->device_handle, flush->hint_count); 858 return true; 859 } 860 861 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, 862 struct acpi_nfit_capabilities *pcap) 863 { 864 struct device *dev = acpi_desc->dev; 865 u32 mask; 866 867 mask = (1 << (pcap->highest_capability + 1)) - 1; 868 acpi_desc->platform_cap = pcap->capabilities & mask; 869 dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap); 870 return true; 871 } 872 873 static void *add_table(struct acpi_nfit_desc *acpi_desc, 874 struct nfit_table_prev *prev, void *table, const void *end) 875 { 876 struct device *dev = acpi_desc->dev; 877 struct acpi_nfit_header *hdr; 878 void *err = ERR_PTR(-ENOMEM); 879 880 if (table >= end) 881 return NULL; 882 883 hdr = table; 884 if (!hdr->length) { 885 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 886 hdr->type); 887 return NULL; 888 } 889 890 switch (hdr->type) { 891 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 892 if (!add_spa(acpi_desc, prev, table)) 893 return err; 894 break; 895 case ACPI_NFIT_TYPE_MEMORY_MAP: 896 if (!add_memdev(acpi_desc, prev, table)) 897 return err; 898 break; 899 case ACPI_NFIT_TYPE_CONTROL_REGION: 900 if (!add_dcr(acpi_desc, prev, table)) 901 return err; 902 break; 903 case ACPI_NFIT_TYPE_DATA_REGION: 904 if (!add_bdw(acpi_desc, prev, table)) 905 return err; 906 break; 907 case ACPI_NFIT_TYPE_INTERLEAVE: 908 if (!add_idt(acpi_desc, prev, table)) 909 return err; 910 break; 911 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 912 if (!add_flush(acpi_desc, prev, table)) 913 return err; 914 break; 915 case ACPI_NFIT_TYPE_SMBIOS: 916 dev_dbg(dev, "smbios\n"); 917 break; 918 case ACPI_NFIT_TYPE_CAPABILITIES: 919 if (!add_platform_cap(acpi_desc, table)) 920 return err; 921 break; 922 default: 923 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 924 break; 925 } 926 927 return table + hdr->length; 928 } 929 930 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 931 struct nfit_mem *nfit_mem) 932 { 933 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 934 u16 dcr = nfit_mem->dcr->region_index; 935 struct nfit_spa *nfit_spa; 936 937 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 938 u16 range_index = nfit_spa->spa->range_index; 939 int type = nfit_spa_type(nfit_spa->spa); 940 struct nfit_memdev *nfit_memdev; 941 942 if (type != NFIT_SPA_BDW) 943 continue; 944 945 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 946 if (nfit_memdev->memdev->range_index != range_index) 947 continue; 948 if (nfit_memdev->memdev->device_handle != device_handle) 949 continue; 950 if (nfit_memdev->memdev->region_index != dcr) 951 continue; 952 953 nfit_mem->spa_bdw = nfit_spa->spa; 954 return; 955 } 956 } 957 958 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 959 nfit_mem->spa_dcr->range_index); 960 nfit_mem->bdw = NULL; 961 } 962 963 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, 964 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 965 { 966 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 967 struct nfit_memdev *nfit_memdev; 968 struct nfit_bdw *nfit_bdw; 969 struct nfit_idt *nfit_idt; 970 u16 idt_idx, range_index; 971 972 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 973 if (nfit_bdw->bdw->region_index != dcr) 974 continue; 975 nfit_mem->bdw = nfit_bdw->bdw; 976 break; 977 } 978 979 if (!nfit_mem->bdw) 980 return; 981 982 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 983 984 if (!nfit_mem->spa_bdw) 985 return; 986 987 range_index = nfit_mem->spa_bdw->range_index; 988 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 989 if (nfit_memdev->memdev->range_index != range_index || 990 nfit_memdev->memdev->region_index != dcr) 991 continue; 992 nfit_mem->memdev_bdw = nfit_memdev->memdev; 993 idt_idx = nfit_memdev->memdev->interleave_index; 994 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 995 if (nfit_idt->idt->interleave_index != idt_idx) 996 continue; 997 nfit_mem->idt_bdw = nfit_idt->idt; 998 break; 999 } 1000 break; 1001 } 1002 } 1003 1004 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, 1005 struct acpi_nfit_system_address *spa) 1006 { 1007 struct nfit_mem *nfit_mem, *found; 1008 struct nfit_memdev *nfit_memdev; 1009 int type = spa ? nfit_spa_type(spa) : 0; 1010 1011 switch (type) { 1012 case NFIT_SPA_DCR: 1013 case NFIT_SPA_PM: 1014 break; 1015 default: 1016 if (spa) 1017 return 0; 1018 } 1019 1020 /* 1021 * This loop runs in two modes, when a dimm is mapped the loop 1022 * adds memdev associations to an existing dimm, or creates a 1023 * dimm. In the unmapped dimm case this loop sweeps for memdev 1024 * instances with an invalid / zero range_index and adds those 1025 * dimms without spa associations. 1026 */ 1027 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1028 struct nfit_flush *nfit_flush; 1029 struct nfit_dcr *nfit_dcr; 1030 u32 device_handle; 1031 u16 dcr; 1032 1033 if (spa && nfit_memdev->memdev->range_index != spa->range_index) 1034 continue; 1035 if (!spa && nfit_memdev->memdev->range_index) 1036 continue; 1037 found = NULL; 1038 dcr = nfit_memdev->memdev->region_index; 1039 device_handle = nfit_memdev->memdev->device_handle; 1040 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1041 if (__to_nfit_memdev(nfit_mem)->device_handle 1042 == device_handle) { 1043 found = nfit_mem; 1044 break; 1045 } 1046 1047 if (found) 1048 nfit_mem = found; 1049 else { 1050 nfit_mem = devm_kzalloc(acpi_desc->dev, 1051 sizeof(*nfit_mem), GFP_KERNEL); 1052 if (!nfit_mem) 1053 return -ENOMEM; 1054 INIT_LIST_HEAD(&nfit_mem->list); 1055 nfit_mem->acpi_desc = acpi_desc; 1056 list_add(&nfit_mem->list, &acpi_desc->dimms); 1057 } 1058 1059 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1060 if (nfit_dcr->dcr->region_index != dcr) 1061 continue; 1062 /* 1063 * Record the control region for the dimm. For 1064 * the ACPI 6.1 case, where there are separate 1065 * control regions for the pmem vs blk 1066 * interfaces, be sure to record the extended 1067 * blk details. 1068 */ 1069 if (!nfit_mem->dcr) 1070 nfit_mem->dcr = nfit_dcr->dcr; 1071 else if (nfit_mem->dcr->windows == 0 1072 && nfit_dcr->dcr->windows) 1073 nfit_mem->dcr = nfit_dcr->dcr; 1074 break; 1075 } 1076 1077 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 1078 struct acpi_nfit_flush_address *flush; 1079 u16 i; 1080 1081 if (nfit_flush->flush->device_handle != device_handle) 1082 continue; 1083 nfit_mem->nfit_flush = nfit_flush; 1084 flush = nfit_flush->flush; 1085 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, 1086 flush->hint_count 1087 * sizeof(struct resource), GFP_KERNEL); 1088 if (!nfit_mem->flush_wpq) 1089 return -ENOMEM; 1090 for (i = 0; i < flush->hint_count; i++) { 1091 struct resource *res = &nfit_mem->flush_wpq[i]; 1092 1093 res->start = flush->hint_address[i]; 1094 res->end = res->start + 8 - 1; 1095 } 1096 break; 1097 } 1098 1099 if (dcr && !nfit_mem->dcr) { 1100 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 1101 spa->range_index, dcr); 1102 return -ENODEV; 1103 } 1104 1105 if (type == NFIT_SPA_DCR) { 1106 struct nfit_idt *nfit_idt; 1107 u16 idt_idx; 1108 1109 /* multiple dimms may share a SPA when interleaved */ 1110 nfit_mem->spa_dcr = spa; 1111 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1112 idt_idx = nfit_memdev->memdev->interleave_index; 1113 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 1114 if (nfit_idt->idt->interleave_index != idt_idx) 1115 continue; 1116 nfit_mem->idt_dcr = nfit_idt->idt; 1117 break; 1118 } 1119 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); 1120 } else if (type == NFIT_SPA_PM) { 1121 /* 1122 * A single dimm may belong to multiple SPA-PM 1123 * ranges, record at least one in addition to 1124 * any SPA-DCR range. 1125 */ 1126 nfit_mem->memdev_pmem = nfit_memdev->memdev; 1127 } else 1128 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1129 } 1130 1131 return 0; 1132 } 1133 1134 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 1135 { 1136 struct nfit_mem *a = container_of(_a, typeof(*a), list); 1137 struct nfit_mem *b = container_of(_b, typeof(*b), list); 1138 u32 handleA, handleB; 1139 1140 handleA = __to_nfit_memdev(a)->device_handle; 1141 handleB = __to_nfit_memdev(b)->device_handle; 1142 if (handleA < handleB) 1143 return -1; 1144 else if (handleA > handleB) 1145 return 1; 1146 return 0; 1147 } 1148 1149 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 1150 { 1151 struct nfit_spa *nfit_spa; 1152 int rc; 1153 1154 1155 /* 1156 * For each SPA-DCR or SPA-PMEM address range find its 1157 * corresponding MEMDEV(s). From each MEMDEV find the 1158 * corresponding DCR. Then, if we're operating on a SPA-DCR, 1159 * try to find a SPA-BDW and a corresponding BDW that references 1160 * the DCR. Throw it all into an nfit_mem object. Note, that 1161 * BDWs are optional. 1162 */ 1163 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1164 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); 1165 if (rc) 1166 return rc; 1167 } 1168 1169 /* 1170 * If a DIMM has failed to be mapped into SPA there will be no 1171 * SPA entries above. Find and register all the unmapped DIMMs 1172 * for reporting and recovery purposes. 1173 */ 1174 rc = __nfit_mem_init(acpi_desc, NULL); 1175 if (rc) 1176 return rc; 1177 1178 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 1179 1180 return 0; 1181 } 1182 1183 static ssize_t bus_dsm_mask_show(struct device *dev, 1184 struct device_attribute *attr, char *buf) 1185 { 1186 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1187 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1188 1189 return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); 1190 } 1191 static struct device_attribute dev_attr_bus_dsm_mask = 1192 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); 1193 1194 static ssize_t revision_show(struct device *dev, 1195 struct device_attribute *attr, char *buf) 1196 { 1197 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1198 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1199 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1200 1201 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 1202 } 1203 static DEVICE_ATTR_RO(revision); 1204 1205 static ssize_t hw_error_scrub_show(struct device *dev, 1206 struct device_attribute *attr, char *buf) 1207 { 1208 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1209 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1210 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1211 1212 return sprintf(buf, "%d\n", acpi_desc->scrub_mode); 1213 } 1214 1215 /* 1216 * The 'hw_error_scrub' attribute can have the following values written to it: 1217 * '0': Switch to the default mode where an exception will only insert 1218 * the address of the memory error into the poison and badblocks lists. 1219 * '1': Enable a full scrub to happen if an exception for a memory error is 1220 * received. 1221 */ 1222 static ssize_t hw_error_scrub_store(struct device *dev, 1223 struct device_attribute *attr, const char *buf, size_t size) 1224 { 1225 struct nvdimm_bus_descriptor *nd_desc; 1226 ssize_t rc; 1227 long val; 1228 1229 rc = kstrtol(buf, 0, &val); 1230 if (rc) 1231 return rc; 1232 1233 device_lock(dev); 1234 nd_desc = dev_get_drvdata(dev); 1235 if (nd_desc) { 1236 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1237 1238 switch (val) { 1239 case HW_ERROR_SCRUB_ON: 1240 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; 1241 break; 1242 case HW_ERROR_SCRUB_OFF: 1243 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; 1244 break; 1245 default: 1246 rc = -EINVAL; 1247 break; 1248 } 1249 } 1250 device_unlock(dev); 1251 if (rc) 1252 return rc; 1253 return size; 1254 } 1255 static DEVICE_ATTR_RW(hw_error_scrub); 1256 1257 /* 1258 * This shows the number of full Address Range Scrubs that have been 1259 * completed since driver load time. Userspace can wait on this using 1260 * select/poll etc. A '+' at the end indicates an ARS is in progress 1261 */ 1262 static ssize_t scrub_show(struct device *dev, 1263 struct device_attribute *attr, char *buf) 1264 { 1265 struct nvdimm_bus_descriptor *nd_desc; 1266 ssize_t rc = -ENXIO; 1267 1268 device_lock(dev); 1269 nd_desc = dev_get_drvdata(dev); 1270 if (nd_desc) { 1271 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1272 1273 mutex_lock(&acpi_desc->init_mutex); 1274 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1275 work_busy(&acpi_desc->dwork.work) 1276 && !acpi_desc->cancel ? "+\n" : "\n"); 1277 mutex_unlock(&acpi_desc->init_mutex); 1278 } 1279 device_unlock(dev); 1280 return rc; 1281 } 1282 1283 static ssize_t scrub_store(struct device *dev, 1284 struct device_attribute *attr, const char *buf, size_t size) 1285 { 1286 struct nvdimm_bus_descriptor *nd_desc; 1287 ssize_t rc; 1288 long val; 1289 1290 rc = kstrtol(buf, 0, &val); 1291 if (rc) 1292 return rc; 1293 if (val != 1) 1294 return -EINVAL; 1295 1296 device_lock(dev); 1297 nd_desc = dev_get_drvdata(dev); 1298 if (nd_desc) { 1299 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1300 1301 rc = acpi_nfit_ars_rescan(acpi_desc, 0); 1302 } 1303 device_unlock(dev); 1304 if (rc) 1305 return rc; 1306 return size; 1307 } 1308 static DEVICE_ATTR_RW(scrub); 1309 1310 static bool ars_supported(struct nvdimm_bus *nvdimm_bus) 1311 { 1312 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1313 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START 1314 | 1 << ND_CMD_ARS_STATUS; 1315 1316 return (nd_desc->cmd_mask & mask) == mask; 1317 } 1318 1319 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) 1320 { 1321 struct device *dev = container_of(kobj, struct device, kobj); 1322 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1323 1324 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) 1325 return 0; 1326 return a->mode; 1327 } 1328 1329 static struct attribute *acpi_nfit_attributes[] = { 1330 &dev_attr_revision.attr, 1331 &dev_attr_scrub.attr, 1332 &dev_attr_hw_error_scrub.attr, 1333 &dev_attr_bus_dsm_mask.attr, 1334 NULL, 1335 }; 1336 1337 static const struct attribute_group acpi_nfit_attribute_group = { 1338 .name = "nfit", 1339 .attrs = acpi_nfit_attributes, 1340 .is_visible = nfit_visible, 1341 }; 1342 1343 static const struct attribute_group *acpi_nfit_attribute_groups[] = { 1344 &nvdimm_bus_attribute_group, 1345 &acpi_nfit_attribute_group, 1346 NULL, 1347 }; 1348 1349 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 1350 { 1351 struct nvdimm *nvdimm = to_nvdimm(dev); 1352 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1353 1354 return __to_nfit_memdev(nfit_mem); 1355 } 1356 1357 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 1358 { 1359 struct nvdimm *nvdimm = to_nvdimm(dev); 1360 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1361 1362 return nfit_mem->dcr; 1363 } 1364 1365 static ssize_t handle_show(struct device *dev, 1366 struct device_attribute *attr, char *buf) 1367 { 1368 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1369 1370 return sprintf(buf, "%#x\n", memdev->device_handle); 1371 } 1372 static DEVICE_ATTR_RO(handle); 1373 1374 static ssize_t phys_id_show(struct device *dev, 1375 struct device_attribute *attr, char *buf) 1376 { 1377 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1378 1379 return sprintf(buf, "%#x\n", memdev->physical_id); 1380 } 1381 static DEVICE_ATTR_RO(phys_id); 1382 1383 static ssize_t vendor_show(struct device *dev, 1384 struct device_attribute *attr, char *buf) 1385 { 1386 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1387 1388 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1389 } 1390 static DEVICE_ATTR_RO(vendor); 1391 1392 static ssize_t rev_id_show(struct device *dev, 1393 struct device_attribute *attr, char *buf) 1394 { 1395 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1396 1397 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1398 } 1399 static DEVICE_ATTR_RO(rev_id); 1400 1401 static ssize_t device_show(struct device *dev, 1402 struct device_attribute *attr, char *buf) 1403 { 1404 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1405 1406 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1407 } 1408 static DEVICE_ATTR_RO(device); 1409 1410 static ssize_t subsystem_vendor_show(struct device *dev, 1411 struct device_attribute *attr, char *buf) 1412 { 1413 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1414 1415 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1416 } 1417 static DEVICE_ATTR_RO(subsystem_vendor); 1418 1419 static ssize_t subsystem_rev_id_show(struct device *dev, 1420 struct device_attribute *attr, char *buf) 1421 { 1422 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1423 1424 return sprintf(buf, "0x%04x\n", 1425 be16_to_cpu(dcr->subsystem_revision_id)); 1426 } 1427 static DEVICE_ATTR_RO(subsystem_rev_id); 1428 1429 static ssize_t subsystem_device_show(struct device *dev, 1430 struct device_attribute *attr, char *buf) 1431 { 1432 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1433 1434 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1435 } 1436 static DEVICE_ATTR_RO(subsystem_device); 1437 1438 static int num_nvdimm_formats(struct nvdimm *nvdimm) 1439 { 1440 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1441 int formats = 0; 1442 1443 if (nfit_mem->memdev_pmem) 1444 formats++; 1445 if (nfit_mem->memdev_bdw) 1446 formats++; 1447 return formats; 1448 } 1449 1450 static ssize_t format_show(struct device *dev, 1451 struct device_attribute *attr, char *buf) 1452 { 1453 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1454 1455 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1456 } 1457 static DEVICE_ATTR_RO(format); 1458 1459 static ssize_t format1_show(struct device *dev, 1460 struct device_attribute *attr, char *buf) 1461 { 1462 u32 handle; 1463 ssize_t rc = -ENXIO; 1464 struct nfit_mem *nfit_mem; 1465 struct nfit_memdev *nfit_memdev; 1466 struct acpi_nfit_desc *acpi_desc; 1467 struct nvdimm *nvdimm = to_nvdimm(dev); 1468 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1469 1470 nfit_mem = nvdimm_provider_data(nvdimm); 1471 acpi_desc = nfit_mem->acpi_desc; 1472 handle = to_nfit_memdev(dev)->device_handle; 1473 1474 /* assumes DIMMs have at most 2 published interface codes */ 1475 mutex_lock(&acpi_desc->init_mutex); 1476 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1477 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1478 struct nfit_dcr *nfit_dcr; 1479 1480 if (memdev->device_handle != handle) 1481 continue; 1482 1483 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1484 if (nfit_dcr->dcr->region_index != memdev->region_index) 1485 continue; 1486 if (nfit_dcr->dcr->code == dcr->code) 1487 continue; 1488 rc = sprintf(buf, "0x%04x\n", 1489 le16_to_cpu(nfit_dcr->dcr->code)); 1490 break; 1491 } 1492 if (rc != ENXIO) 1493 break; 1494 } 1495 mutex_unlock(&acpi_desc->init_mutex); 1496 return rc; 1497 } 1498 static DEVICE_ATTR_RO(format1); 1499 1500 static ssize_t formats_show(struct device *dev, 1501 struct device_attribute *attr, char *buf) 1502 { 1503 struct nvdimm *nvdimm = to_nvdimm(dev); 1504 1505 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1506 } 1507 static DEVICE_ATTR_RO(formats); 1508 1509 static ssize_t serial_show(struct device *dev, 1510 struct device_attribute *attr, char *buf) 1511 { 1512 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1513 1514 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1515 } 1516 static DEVICE_ATTR_RO(serial); 1517 1518 static ssize_t family_show(struct device *dev, 1519 struct device_attribute *attr, char *buf) 1520 { 1521 struct nvdimm *nvdimm = to_nvdimm(dev); 1522 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1523 1524 if (nfit_mem->family < 0) 1525 return -ENXIO; 1526 return sprintf(buf, "%d\n", nfit_mem->family); 1527 } 1528 static DEVICE_ATTR_RO(family); 1529 1530 static ssize_t dsm_mask_show(struct device *dev, 1531 struct device_attribute *attr, char *buf) 1532 { 1533 struct nvdimm *nvdimm = to_nvdimm(dev); 1534 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1535 1536 if (nfit_mem->family < 0) 1537 return -ENXIO; 1538 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); 1539 } 1540 static DEVICE_ATTR_RO(dsm_mask); 1541 1542 static ssize_t flags_show(struct device *dev, 1543 struct device_attribute *attr, char *buf) 1544 { 1545 u16 flags = to_nfit_memdev(dev)->flags; 1546 1547 return sprintf(buf, "%s%s%s%s%s%s%s\n", 1548 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 1549 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 1550 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 1551 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 1552 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", 1553 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", 1554 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); 1555 } 1556 static DEVICE_ATTR_RO(flags); 1557 1558 static ssize_t id_show(struct device *dev, 1559 struct device_attribute *attr, char *buf) 1560 { 1561 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1562 1563 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) 1564 return sprintf(buf, "%04x-%02x-%04x-%08x\n", 1565 be16_to_cpu(dcr->vendor_id), 1566 dcr->manufacturing_location, 1567 be16_to_cpu(dcr->manufacturing_date), 1568 be32_to_cpu(dcr->serial_number)); 1569 else 1570 return sprintf(buf, "%04x-%08x\n", 1571 be16_to_cpu(dcr->vendor_id), 1572 be32_to_cpu(dcr->serial_number)); 1573 } 1574 static DEVICE_ATTR_RO(id); 1575 1576 static struct attribute *acpi_nfit_dimm_attributes[] = { 1577 &dev_attr_handle.attr, 1578 &dev_attr_phys_id.attr, 1579 &dev_attr_vendor.attr, 1580 &dev_attr_device.attr, 1581 &dev_attr_rev_id.attr, 1582 &dev_attr_subsystem_vendor.attr, 1583 &dev_attr_subsystem_device.attr, 1584 &dev_attr_subsystem_rev_id.attr, 1585 &dev_attr_format.attr, 1586 &dev_attr_formats.attr, 1587 &dev_attr_format1.attr, 1588 &dev_attr_serial.attr, 1589 &dev_attr_flags.attr, 1590 &dev_attr_id.attr, 1591 &dev_attr_family.attr, 1592 &dev_attr_dsm_mask.attr, 1593 NULL, 1594 }; 1595 1596 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 1597 struct attribute *a, int n) 1598 { 1599 struct device *dev = container_of(kobj, struct device, kobj); 1600 struct nvdimm *nvdimm = to_nvdimm(dev); 1601 1602 if (!to_nfit_dcr(dev)) { 1603 /* Without a dcr only the memdev attributes can be surfaced */ 1604 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr 1605 || a == &dev_attr_flags.attr 1606 || a == &dev_attr_family.attr 1607 || a == &dev_attr_dsm_mask.attr) 1608 return a->mode; 1609 return 0; 1610 } 1611 1612 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) 1613 return 0; 1614 return a->mode; 1615 } 1616 1617 static const struct attribute_group acpi_nfit_dimm_attribute_group = { 1618 .name = "nfit", 1619 .attrs = acpi_nfit_dimm_attributes, 1620 .is_visible = acpi_nfit_dimm_attr_visible, 1621 }; 1622 1623 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 1624 &nvdimm_attribute_group, 1625 &nd_device_attribute_group, 1626 &acpi_nfit_dimm_attribute_group, 1627 NULL, 1628 }; 1629 1630 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 1631 u32 device_handle) 1632 { 1633 struct nfit_mem *nfit_mem; 1634 1635 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1636 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 1637 return nfit_mem->nvdimm; 1638 1639 return NULL; 1640 } 1641 1642 void __acpi_nvdimm_notify(struct device *dev, u32 event) 1643 { 1644 struct nfit_mem *nfit_mem; 1645 struct acpi_nfit_desc *acpi_desc; 1646 1647 dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev), 1648 event); 1649 1650 if (event != NFIT_NOTIFY_DIMM_HEALTH) { 1651 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), 1652 event); 1653 return; 1654 } 1655 1656 acpi_desc = dev_get_drvdata(dev->parent); 1657 if (!acpi_desc) 1658 return; 1659 1660 /* 1661 * If we successfully retrieved acpi_desc, then we know nfit_mem data 1662 * is still valid. 1663 */ 1664 nfit_mem = dev_get_drvdata(dev); 1665 if (nfit_mem && nfit_mem->flags_attr) 1666 sysfs_notify_dirent(nfit_mem->flags_attr); 1667 } 1668 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); 1669 1670 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) 1671 { 1672 struct acpi_device *adev = data; 1673 struct device *dev = &adev->dev; 1674 1675 device_lock(dev->parent); 1676 __acpi_nvdimm_notify(dev, event); 1677 device_unlock(dev->parent); 1678 } 1679 1680 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) 1681 { 1682 acpi_handle handle; 1683 acpi_status status; 1684 1685 status = acpi_get_handle(adev->handle, method, &handle); 1686 1687 if (ACPI_SUCCESS(status)) 1688 return true; 1689 return false; 1690 } 1691 1692 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1693 struct nfit_mem *nfit_mem, u32 device_handle) 1694 { 1695 struct acpi_device *adev, *adev_dimm; 1696 struct device *dev = acpi_desc->dev; 1697 unsigned long dsm_mask; 1698 const guid_t *guid; 1699 int i; 1700 int family = -1; 1701 1702 /* nfit test assumes 1:1 relationship between commands and dsms */ 1703 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; 1704 nfit_mem->family = NVDIMM_FAMILY_INTEL; 1705 adev = to_acpi_dev(acpi_desc); 1706 if (!adev) 1707 return 0; 1708 1709 adev_dimm = acpi_find_child_device(adev, device_handle, false); 1710 nfit_mem->adev = adev_dimm; 1711 if (!adev_dimm) { 1712 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 1713 device_handle); 1714 return force_enable_dimms ? 0 : -ENODEV; 1715 } 1716 1717 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, 1718 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { 1719 dev_err(dev, "%s: notification registration failed\n", 1720 dev_name(&adev_dimm->dev)); 1721 return -ENXIO; 1722 } 1723 /* 1724 * Record nfit_mem for the notification path to track back to 1725 * the nfit sysfs attributes for this dimm device object. 1726 */ 1727 dev_set_drvdata(&adev_dimm->dev, nfit_mem); 1728 1729 /* 1730 * Until standardization materializes we need to consider 4 1731 * different command sets. Note, that checking for function0 (bit0) 1732 * tells us if any commands are reachable through this GUID. 1733 */ 1734 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) 1735 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1736 if (family < 0 || i == default_dsm_family) 1737 family = i; 1738 1739 /* limit the supported commands to those that are publicly documented */ 1740 nfit_mem->family = family; 1741 if (override_dsm_mask && !disable_vendor_specific) 1742 dsm_mask = override_dsm_mask; 1743 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1744 dsm_mask = NVDIMM_INTEL_CMDMASK; 1745 if (disable_vendor_specific) 1746 dsm_mask &= ~(1 << ND_CMD_VENDOR); 1747 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { 1748 dsm_mask = 0x1c3c76; 1749 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { 1750 dsm_mask = 0x1fe; 1751 if (disable_vendor_specific) 1752 dsm_mask &= ~(1 << 8); 1753 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { 1754 dsm_mask = 0xffffffff; 1755 } else { 1756 dev_dbg(dev, "unknown dimm command family\n"); 1757 nfit_mem->family = -1; 1758 /* DSMs are optional, continue loading the driver... */ 1759 return 0; 1760 } 1761 1762 guid = to_nfit_uuid(nfit_mem->family); 1763 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1764 if (acpi_check_dsm(adev_dimm->handle, guid, 1765 nfit_dsm_revid(nfit_mem->family, i), 1766 1ULL << i)) 1767 set_bit(i, &nfit_mem->dsm_mask); 1768 1769 if (acpi_nvdimm_has_method(adev_dimm, "_LSI") 1770 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { 1771 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1772 nfit_mem->has_lsr = true; 1773 } 1774 1775 if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { 1776 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); 1777 nfit_mem->has_lsw = true; 1778 } 1779 1780 return 0; 1781 } 1782 1783 static void shutdown_dimm_notify(void *data) 1784 { 1785 struct acpi_nfit_desc *acpi_desc = data; 1786 struct nfit_mem *nfit_mem; 1787 1788 mutex_lock(&acpi_desc->init_mutex); 1789 /* 1790 * Clear out the nfit_mem->flags_attr and shut down dimm event 1791 * notifications. 1792 */ 1793 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1794 struct acpi_device *adev_dimm = nfit_mem->adev; 1795 1796 if (nfit_mem->flags_attr) { 1797 sysfs_put(nfit_mem->flags_attr); 1798 nfit_mem->flags_attr = NULL; 1799 } 1800 if (adev_dimm) { 1801 acpi_remove_notify_handler(adev_dimm->handle, 1802 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); 1803 dev_set_drvdata(&adev_dimm->dev, NULL); 1804 } 1805 } 1806 mutex_unlock(&acpi_desc->init_mutex); 1807 } 1808 1809 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 1810 { 1811 struct nfit_mem *nfit_mem; 1812 int dimm_count = 0, rc; 1813 struct nvdimm *nvdimm; 1814 1815 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1816 struct acpi_nfit_flush_address *flush; 1817 unsigned long flags = 0, cmd_mask; 1818 struct nfit_memdev *nfit_memdev; 1819 u32 device_handle; 1820 u16 mem_flags; 1821 1822 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 1823 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 1824 if (nvdimm) { 1825 dimm_count++; 1826 continue; 1827 } 1828 1829 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 1830 set_bit(NDD_ALIASING, &flags); 1831 1832 /* collate flags across all memdevs for this dimm */ 1833 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1834 struct acpi_nfit_memory_map *dimm_memdev; 1835 1836 dimm_memdev = __to_nfit_memdev(nfit_mem); 1837 if (dimm_memdev->device_handle 1838 != nfit_memdev->memdev->device_handle) 1839 continue; 1840 dimm_memdev->flags |= nfit_memdev->memdev->flags; 1841 } 1842 1843 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 1844 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 1845 set_bit(NDD_UNARMED, &flags); 1846 1847 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 1848 if (rc) 1849 continue; 1850 1851 /* 1852 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL 1853 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the 1854 * userspace interface. 1855 */ 1856 cmd_mask = 1UL << ND_CMD_CALL; 1857 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1858 /* 1859 * These commands have a 1:1 correspondence 1860 * between DSM payload and libnvdimm ioctl 1861 * payload format. 1862 */ 1863 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; 1864 } 1865 1866 if (nfit_mem->has_lsr) { 1867 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 1868 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); 1869 } 1870 if (nfit_mem->has_lsw) 1871 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); 1872 1873 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush 1874 : NULL; 1875 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 1876 acpi_nfit_dimm_attribute_groups, 1877 flags, cmd_mask, flush ? flush->hint_count : 0, 1878 nfit_mem->flush_wpq); 1879 if (!nvdimm) 1880 return -ENOMEM; 1881 1882 nfit_mem->nvdimm = nvdimm; 1883 dimm_count++; 1884 1885 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 1886 continue; 1887 1888 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", 1889 nvdimm_name(nvdimm), 1890 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 1891 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 1892 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 1893 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", 1894 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); 1895 1896 } 1897 1898 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 1899 if (rc) 1900 return rc; 1901 1902 /* 1903 * Now that dimms are successfully registered, and async registration 1904 * is flushed, attempt to enable event notification. 1905 */ 1906 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1907 struct kernfs_node *nfit_kernfs; 1908 1909 nvdimm = nfit_mem->nvdimm; 1910 if (!nvdimm) 1911 continue; 1912 1913 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 1914 if (nfit_kernfs) 1915 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 1916 "flags"); 1917 sysfs_put(nfit_kernfs); 1918 if (!nfit_mem->flags_attr) 1919 dev_warn(acpi_desc->dev, "%s: notifications disabled\n", 1920 nvdimm_name(nvdimm)); 1921 } 1922 1923 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, 1924 acpi_desc); 1925 } 1926 1927 /* 1928 * These constants are private because there are no kernel consumers of 1929 * these commands. 1930 */ 1931 enum nfit_aux_cmds { 1932 NFIT_CMD_TRANSLATE_SPA = 5, 1933 NFIT_CMD_ARS_INJECT_SET = 7, 1934 NFIT_CMD_ARS_INJECT_CLEAR = 8, 1935 NFIT_CMD_ARS_INJECT_GET = 9, 1936 }; 1937 1938 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 1939 { 1940 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1941 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); 1942 struct acpi_device *adev; 1943 unsigned long dsm_mask; 1944 int i; 1945 1946 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; 1947 nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; 1948 adev = to_acpi_dev(acpi_desc); 1949 if (!adev) 1950 return; 1951 1952 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) 1953 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 1954 set_bit(i, &nd_desc->cmd_mask); 1955 set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); 1956 1957 dsm_mask = 1958 (1 << ND_CMD_ARS_CAP) | 1959 (1 << ND_CMD_ARS_START) | 1960 (1 << ND_CMD_ARS_STATUS) | 1961 (1 << ND_CMD_CLEAR_ERROR) | 1962 (1 << NFIT_CMD_TRANSLATE_SPA) | 1963 (1 << NFIT_CMD_ARS_INJECT_SET) | 1964 (1 << NFIT_CMD_ARS_INJECT_CLEAR) | 1965 (1 << NFIT_CMD_ARS_INJECT_GET); 1966 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1967 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 1968 set_bit(i, &nd_desc->bus_dsm_mask); 1969 } 1970 1971 static ssize_t range_index_show(struct device *dev, 1972 struct device_attribute *attr, char *buf) 1973 { 1974 struct nd_region *nd_region = to_nd_region(dev); 1975 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 1976 1977 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 1978 } 1979 static DEVICE_ATTR_RO(range_index); 1980 1981 static struct attribute *acpi_nfit_region_attributes[] = { 1982 &dev_attr_range_index.attr, 1983 NULL, 1984 }; 1985 1986 static const struct attribute_group acpi_nfit_region_attribute_group = { 1987 .name = "nfit", 1988 .attrs = acpi_nfit_region_attributes, 1989 }; 1990 1991 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 1992 &nd_region_attribute_group, 1993 &nd_mapping_attribute_group, 1994 &nd_device_attribute_group, 1995 &nd_numa_attribute_group, 1996 &acpi_nfit_region_attribute_group, 1997 NULL, 1998 }; 1999 2000 /* enough info to uniquely specify an interleave set */ 2001 struct nfit_set_info { 2002 struct nfit_set_info_map { 2003 u64 region_offset; 2004 u32 serial_number; 2005 u32 pad; 2006 } mapping[0]; 2007 }; 2008 2009 struct nfit_set_info2 { 2010 struct nfit_set_info_map2 { 2011 u64 region_offset; 2012 u32 serial_number; 2013 u16 vendor_id; 2014 u16 manufacturing_date; 2015 u8 manufacturing_location; 2016 u8 reserved[31]; 2017 } mapping[0]; 2018 }; 2019 2020 static size_t sizeof_nfit_set_info(int num_mappings) 2021 { 2022 return sizeof(struct nfit_set_info) 2023 + num_mappings * sizeof(struct nfit_set_info_map); 2024 } 2025 2026 static size_t sizeof_nfit_set_info2(int num_mappings) 2027 { 2028 return sizeof(struct nfit_set_info2) 2029 + num_mappings * sizeof(struct nfit_set_info_map2); 2030 } 2031 2032 static int cmp_map_compat(const void *m0, const void *m1) 2033 { 2034 const struct nfit_set_info_map *map0 = m0; 2035 const struct nfit_set_info_map *map1 = m1; 2036 2037 return memcmp(&map0->region_offset, &map1->region_offset, 2038 sizeof(u64)); 2039 } 2040 2041 static int cmp_map(const void *m0, const void *m1) 2042 { 2043 const struct nfit_set_info_map *map0 = m0; 2044 const struct nfit_set_info_map *map1 = m1; 2045 2046 if (map0->region_offset < map1->region_offset) 2047 return -1; 2048 else if (map0->region_offset > map1->region_offset) 2049 return 1; 2050 return 0; 2051 } 2052 2053 static int cmp_map2(const void *m0, const void *m1) 2054 { 2055 const struct nfit_set_info_map2 *map0 = m0; 2056 const struct nfit_set_info_map2 *map1 = m1; 2057 2058 if (map0->region_offset < map1->region_offset) 2059 return -1; 2060 else if (map0->region_offset > map1->region_offset) 2061 return 1; 2062 return 0; 2063 } 2064 2065 /* Retrieve the nth entry referencing this spa */ 2066 static struct acpi_nfit_memory_map *memdev_from_spa( 2067 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 2068 { 2069 struct nfit_memdev *nfit_memdev; 2070 2071 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 2072 if (nfit_memdev->memdev->range_index == range_index) 2073 if (n-- == 0) 2074 return nfit_memdev->memdev; 2075 return NULL; 2076 } 2077 2078 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 2079 struct nd_region_desc *ndr_desc, 2080 struct acpi_nfit_system_address *spa) 2081 { 2082 struct device *dev = acpi_desc->dev; 2083 struct nd_interleave_set *nd_set; 2084 u16 nr = ndr_desc->num_mappings; 2085 struct nfit_set_info2 *info2; 2086 struct nfit_set_info *info; 2087 int i; 2088 2089 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2090 if (!nd_set) 2091 return -ENOMEM; 2092 ndr_desc->nd_set = nd_set; 2093 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); 2094 2095 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 2096 if (!info) 2097 return -ENOMEM; 2098 2099 info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL); 2100 if (!info2) 2101 return -ENOMEM; 2102 2103 for (i = 0; i < nr; i++) { 2104 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 2105 struct nfit_set_info_map *map = &info->mapping[i]; 2106 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2107 struct nvdimm *nvdimm = mapping->nvdimm; 2108 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2109 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 2110 spa->range_index, i); 2111 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2112 2113 if (!memdev || !nfit_mem->dcr) { 2114 dev_err(dev, "%s: failed to find DCR\n", __func__); 2115 return -ENODEV; 2116 } 2117 2118 map->region_offset = memdev->region_offset; 2119 map->serial_number = dcr->serial_number; 2120 2121 map2->region_offset = memdev->region_offset; 2122 map2->serial_number = dcr->serial_number; 2123 map2->vendor_id = dcr->vendor_id; 2124 map2->manufacturing_date = dcr->manufacturing_date; 2125 map2->manufacturing_location = dcr->manufacturing_location; 2126 } 2127 2128 /* v1.1 namespaces */ 2129 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2130 cmp_map, NULL); 2131 nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2132 2133 /* v1.2 namespaces */ 2134 sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2), 2135 cmp_map2, NULL); 2136 nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0); 2137 2138 /* support v1.1 namespaces created with the wrong sort order */ 2139 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2140 cmp_map_compat, NULL); 2141 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2142 2143 /* record the result of the sort for the mapping position */ 2144 for (i = 0; i < nr; i++) { 2145 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2146 int j; 2147 2148 for (j = 0; j < nr; j++) { 2149 struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; 2150 struct nvdimm *nvdimm = mapping->nvdimm; 2151 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2152 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2153 2154 if (map2->serial_number == dcr->serial_number && 2155 map2->vendor_id == dcr->vendor_id && 2156 map2->manufacturing_date == dcr->manufacturing_date && 2157 map2->manufacturing_location 2158 == dcr->manufacturing_location) { 2159 mapping->position = i; 2160 break; 2161 } 2162 } 2163 } 2164 2165 ndr_desc->nd_set = nd_set; 2166 devm_kfree(dev, info); 2167 devm_kfree(dev, info2); 2168 2169 return 0; 2170 } 2171 2172 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 2173 { 2174 struct acpi_nfit_interleave *idt = mmio->idt; 2175 u32 sub_line_offset, line_index, line_offset; 2176 u64 line_no, table_skip_count, table_offset; 2177 2178 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 2179 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 2180 line_offset = idt->line_offset[line_index] 2181 * mmio->line_size; 2182 table_offset = table_skip_count * mmio->table_size; 2183 2184 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 2185 } 2186 2187 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 2188 { 2189 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2190 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 2191 const u32 STATUS_MASK = 0x80000037; 2192 2193 if (mmio->num_lines) 2194 offset = to_interleave_offset(offset, mmio); 2195 2196 return readl(mmio->addr.base + offset) & STATUS_MASK; 2197 } 2198 2199 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 2200 resource_size_t dpa, unsigned int len, unsigned int write) 2201 { 2202 u64 cmd, offset; 2203 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2204 2205 enum { 2206 BCW_OFFSET_MASK = (1ULL << 48)-1, 2207 BCW_LEN_SHIFT = 48, 2208 BCW_LEN_MASK = (1ULL << 8) - 1, 2209 BCW_CMD_SHIFT = 56, 2210 }; 2211 2212 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 2213 len = len >> L1_CACHE_SHIFT; 2214 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 2215 cmd |= ((u64) write) << BCW_CMD_SHIFT; 2216 2217 offset = nfit_blk->cmd_offset + mmio->size * bw; 2218 if (mmio->num_lines) 2219 offset = to_interleave_offset(offset, mmio); 2220 2221 writeq(cmd, mmio->addr.base + offset); 2222 nvdimm_flush(nfit_blk->nd_region); 2223 2224 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) 2225 readq(mmio->addr.base + offset); 2226 } 2227 2228 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 2229 resource_size_t dpa, void *iobuf, size_t len, int rw, 2230 unsigned int lane) 2231 { 2232 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2233 unsigned int copied = 0; 2234 u64 base_offset; 2235 int rc; 2236 2237 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 2238 + lane * mmio->size; 2239 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 2240 while (len) { 2241 unsigned int c; 2242 u64 offset; 2243 2244 if (mmio->num_lines) { 2245 u32 line_offset; 2246 2247 offset = to_interleave_offset(base_offset + copied, 2248 mmio); 2249 div_u64_rem(offset, mmio->line_size, &line_offset); 2250 c = min_t(size_t, len, mmio->line_size - line_offset); 2251 } else { 2252 offset = base_offset + nfit_blk->bdw_offset; 2253 c = len; 2254 } 2255 2256 if (rw) 2257 memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); 2258 else { 2259 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) 2260 arch_invalidate_pmem((void __force *) 2261 mmio->addr.aperture + offset, c); 2262 2263 memcpy(iobuf + copied, mmio->addr.aperture + offset, c); 2264 } 2265 2266 copied += c; 2267 len -= c; 2268 } 2269 2270 if (rw) 2271 nvdimm_flush(nfit_blk->nd_region); 2272 2273 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 2274 return rc; 2275 } 2276 2277 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 2278 resource_size_t dpa, void *iobuf, u64 len, int rw) 2279 { 2280 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 2281 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2282 struct nd_region *nd_region = nfit_blk->nd_region; 2283 unsigned int lane, copied = 0; 2284 int rc = 0; 2285 2286 lane = nd_region_acquire_lane(nd_region); 2287 while (len) { 2288 u64 c = min(len, mmio->size); 2289 2290 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 2291 iobuf + copied, c, rw, lane); 2292 if (rc) 2293 break; 2294 2295 copied += c; 2296 len -= c; 2297 } 2298 nd_region_release_lane(nd_region, lane); 2299 2300 return rc; 2301 } 2302 2303 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 2304 struct acpi_nfit_interleave *idt, u16 interleave_ways) 2305 { 2306 if (idt) { 2307 mmio->num_lines = idt->line_count; 2308 mmio->line_size = idt->line_size; 2309 if (interleave_ways == 0) 2310 return -ENXIO; 2311 mmio->table_size = mmio->num_lines * interleave_ways 2312 * mmio->line_size; 2313 } 2314 2315 return 0; 2316 } 2317 2318 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 2319 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 2320 { 2321 struct nd_cmd_dimm_flags flags; 2322 int rc; 2323 2324 memset(&flags, 0, sizeof(flags)); 2325 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 2326 sizeof(flags), NULL); 2327 2328 if (rc >= 0 && flags.status == 0) 2329 nfit_blk->dimm_flags = flags.flags; 2330 else if (rc == -ENOTTY) { 2331 /* fall back to a conservative default */ 2332 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; 2333 rc = 0; 2334 } else 2335 rc = -ENXIO; 2336 2337 return rc; 2338 } 2339 2340 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 2341 struct device *dev) 2342 { 2343 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 2344 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 2345 struct nfit_blk_mmio *mmio; 2346 struct nfit_blk *nfit_blk; 2347 struct nfit_mem *nfit_mem; 2348 struct nvdimm *nvdimm; 2349 int rc; 2350 2351 nvdimm = nd_blk_region_to_dimm(ndbr); 2352 nfit_mem = nvdimm_provider_data(nvdimm); 2353 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 2354 dev_dbg(dev, "missing%s%s%s\n", 2355 nfit_mem ? "" : " nfit_mem", 2356 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 2357 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 2358 return -ENXIO; 2359 } 2360 2361 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 2362 if (!nfit_blk) 2363 return -ENOMEM; 2364 nd_blk_region_set_provider_data(ndbr, nfit_blk); 2365 nfit_blk->nd_region = to_nd_region(dev); 2366 2367 /* map block aperture memory */ 2368 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 2369 mmio = &nfit_blk->mmio[BDW]; 2370 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, 2371 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr)); 2372 if (!mmio->addr.base) { 2373 dev_dbg(dev, "%s failed to map bdw\n", 2374 nvdimm_name(nvdimm)); 2375 return -ENOMEM; 2376 } 2377 mmio->size = nfit_mem->bdw->size; 2378 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 2379 mmio->idt = nfit_mem->idt_bdw; 2380 mmio->spa = nfit_mem->spa_bdw; 2381 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 2382 nfit_mem->memdev_bdw->interleave_ways); 2383 if (rc) { 2384 dev_dbg(dev, "%s failed to init bdw interleave\n", 2385 nvdimm_name(nvdimm)); 2386 return rc; 2387 } 2388 2389 /* map block control memory */ 2390 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 2391 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 2392 mmio = &nfit_blk->mmio[DCR]; 2393 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, 2394 nfit_mem->spa_dcr->length); 2395 if (!mmio->addr.base) { 2396 dev_dbg(dev, "%s failed to map dcr\n", 2397 nvdimm_name(nvdimm)); 2398 return -ENOMEM; 2399 } 2400 mmio->size = nfit_mem->dcr->window_size; 2401 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 2402 mmio->idt = nfit_mem->idt_dcr; 2403 mmio->spa = nfit_mem->spa_dcr; 2404 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 2405 nfit_mem->memdev_dcr->interleave_ways); 2406 if (rc) { 2407 dev_dbg(dev, "%s failed to init dcr interleave\n", 2408 nvdimm_name(nvdimm)); 2409 return rc; 2410 } 2411 2412 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 2413 if (rc < 0) { 2414 dev_dbg(dev, "%s failed get DIMM flags\n", 2415 nvdimm_name(nvdimm)); 2416 return rc; 2417 } 2418 2419 if (nvdimm_has_flush(nfit_blk->nd_region) < 0) 2420 dev_warn(dev, "unable to guarantee persistence of writes\n"); 2421 2422 if (mmio->line_size == 0) 2423 return 0; 2424 2425 if ((u32) nfit_blk->cmd_offset % mmio->line_size 2426 + 8 > mmio->line_size) { 2427 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 2428 return -ENXIO; 2429 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 2430 + 8 > mmio->line_size) { 2431 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 2432 return -ENXIO; 2433 } 2434 2435 return 0; 2436 } 2437 2438 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 2439 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 2440 { 2441 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2442 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2443 int cmd_rc, rc; 2444 2445 cmd->address = spa->address; 2446 cmd->length = spa->length; 2447 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 2448 sizeof(*cmd), &cmd_rc); 2449 if (rc < 0) 2450 return rc; 2451 return cmd_rc; 2452 } 2453 2454 static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) 2455 { 2456 int rc; 2457 int cmd_rc; 2458 struct nd_cmd_ars_start ars_start; 2459 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2460 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2461 2462 memset(&ars_start, 0, sizeof(ars_start)); 2463 ars_start.address = spa->address; 2464 ars_start.length = spa->length; 2465 if (test_bit(ARS_SHORT, &nfit_spa->ars_state)) 2466 ars_start.flags = ND_ARS_RETURN_PREV_DATA; 2467 if (nfit_spa_type(spa) == NFIT_SPA_PM) 2468 ars_start.type = ND_ARS_PERSISTENT; 2469 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) 2470 ars_start.type = ND_ARS_VOLATILE; 2471 else 2472 return -ENOTTY; 2473 2474 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2475 sizeof(ars_start), &cmd_rc); 2476 2477 if (rc < 0) 2478 return rc; 2479 return cmd_rc; 2480 } 2481 2482 static int ars_continue(struct acpi_nfit_desc *acpi_desc) 2483 { 2484 int rc, cmd_rc; 2485 struct nd_cmd_ars_start ars_start; 2486 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2487 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2488 2489 memset(&ars_start, 0, sizeof(ars_start)); 2490 ars_start.address = ars_status->restart_address; 2491 ars_start.length = ars_status->restart_length; 2492 ars_start.type = ars_status->type; 2493 ars_start.flags = acpi_desc->ars_start_flags; 2494 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2495 sizeof(ars_start), &cmd_rc); 2496 if (rc < 0) 2497 return rc; 2498 return cmd_rc; 2499 } 2500 2501 static int ars_get_status(struct acpi_nfit_desc *acpi_desc) 2502 { 2503 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2504 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2505 int rc, cmd_rc; 2506 2507 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, 2508 acpi_desc->max_ars, &cmd_rc); 2509 if (rc < 0) 2510 return rc; 2511 return cmd_rc; 2512 } 2513 2514 static void ars_complete(struct acpi_nfit_desc *acpi_desc, 2515 struct nfit_spa *nfit_spa) 2516 { 2517 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2518 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2519 struct nd_region *nd_region = nfit_spa->nd_region; 2520 struct device *dev; 2521 2522 if ((ars_status->address >= spa->address && ars_status->address 2523 < spa->address + spa->length) 2524 || (ars_status->address < spa->address)) { 2525 /* 2526 * Assume that if a scrub starts at an offset from the 2527 * start of nfit_spa that we are in the continuation 2528 * case. 2529 * 2530 * Otherwise, if the scrub covers the spa range, mark 2531 * any pending request complete. 2532 */ 2533 if (ars_status->address + ars_status->length 2534 >= spa->address + spa->length) 2535 /* complete */; 2536 else 2537 return; 2538 } else 2539 return; 2540 2541 if (test_bit(ARS_DONE, &nfit_spa->ars_state)) 2542 return; 2543 2544 if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state)) 2545 return; 2546 2547 if (nd_region) { 2548 dev = nd_region_dev(nd_region); 2549 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON); 2550 } else 2551 dev = acpi_desc->dev; 2552 2553 dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index, 2554 test_bit(ARS_SHORT, &nfit_spa->ars_state) 2555 ? "short" : "long"); 2556 clear_bit(ARS_SHORT, &nfit_spa->ars_state); 2557 set_bit(ARS_DONE, &nfit_spa->ars_state); 2558 } 2559 2560 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) 2561 { 2562 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; 2563 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2564 int rc; 2565 u32 i; 2566 2567 /* 2568 * First record starts at 44 byte offset from the start of the 2569 * payload. 2570 */ 2571 if (ars_status->out_length < 44) 2572 return 0; 2573 for (i = 0; i < ars_status->num_records; i++) { 2574 /* only process full records */ 2575 if (ars_status->out_length 2576 < 44 + sizeof(struct nd_ars_record) * (i + 1)) 2577 break; 2578 rc = nvdimm_bus_add_badrange(nvdimm_bus, 2579 ars_status->records[i].err_address, 2580 ars_status->records[i].length); 2581 if (rc) 2582 return rc; 2583 } 2584 if (i < ars_status->num_records) 2585 dev_warn(acpi_desc->dev, "detected truncated ars results\n"); 2586 2587 return 0; 2588 } 2589 2590 static void acpi_nfit_remove_resource(void *data) 2591 { 2592 struct resource *res = data; 2593 2594 remove_resource(res); 2595 } 2596 2597 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, 2598 struct nd_region_desc *ndr_desc) 2599 { 2600 struct resource *res, *nd_res = ndr_desc->res; 2601 int is_pmem, ret; 2602 2603 /* No operation if the region is already registered as PMEM */ 2604 is_pmem = region_intersects(nd_res->start, resource_size(nd_res), 2605 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); 2606 if (is_pmem == REGION_INTERSECTS) 2607 return 0; 2608 2609 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); 2610 if (!res) 2611 return -ENOMEM; 2612 2613 res->name = "Persistent Memory"; 2614 res->start = nd_res->start; 2615 res->end = nd_res->end; 2616 res->flags = IORESOURCE_MEM; 2617 res->desc = IORES_DESC_PERSISTENT_MEMORY; 2618 2619 ret = insert_resource(&iomem_resource, res); 2620 if (ret) 2621 return ret; 2622 2623 ret = devm_add_action_or_reset(acpi_desc->dev, 2624 acpi_nfit_remove_resource, 2625 res); 2626 if (ret) 2627 return ret; 2628 2629 return 0; 2630 } 2631 2632 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 2633 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, 2634 struct acpi_nfit_memory_map *memdev, 2635 struct nfit_spa *nfit_spa) 2636 { 2637 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 2638 memdev->device_handle); 2639 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2640 struct nd_blk_region_desc *ndbr_desc; 2641 struct nfit_mem *nfit_mem; 2642 int rc; 2643 2644 if (!nvdimm) { 2645 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 2646 spa->range_index, memdev->device_handle); 2647 return -ENODEV; 2648 } 2649 2650 mapping->nvdimm = nvdimm; 2651 switch (nfit_spa_type(spa)) { 2652 case NFIT_SPA_PM: 2653 case NFIT_SPA_VOLATILE: 2654 mapping->start = memdev->address; 2655 mapping->size = memdev->region_size; 2656 break; 2657 case NFIT_SPA_DCR: 2658 nfit_mem = nvdimm_provider_data(nvdimm); 2659 if (!nfit_mem || !nfit_mem->bdw) { 2660 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 2661 spa->range_index, nvdimm_name(nvdimm)); 2662 break; 2663 } 2664 2665 mapping->size = nfit_mem->bdw->capacity; 2666 mapping->start = nfit_mem->bdw->start_address; 2667 ndr_desc->num_lanes = nfit_mem->bdw->windows; 2668 ndr_desc->mapping = mapping; 2669 ndr_desc->num_mappings = 1; 2670 ndbr_desc = to_blk_region_desc(ndr_desc); 2671 ndbr_desc->enable = acpi_nfit_blk_region_enable; 2672 ndbr_desc->do_io = acpi_desc->blk_do_io; 2673 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2674 if (rc) 2675 return rc; 2676 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 2677 ndr_desc); 2678 if (!nfit_spa->nd_region) 2679 return -ENOMEM; 2680 break; 2681 } 2682 2683 return 0; 2684 } 2685 2686 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) 2687 { 2688 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2689 nfit_spa_type(spa) == NFIT_SPA_VCD || 2690 nfit_spa_type(spa) == NFIT_SPA_PDISK || 2691 nfit_spa_type(spa) == NFIT_SPA_PCD); 2692 } 2693 2694 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa) 2695 { 2696 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2697 nfit_spa_type(spa) == NFIT_SPA_VCD || 2698 nfit_spa_type(spa) == NFIT_SPA_VOLATILE); 2699 } 2700 2701 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 2702 struct nfit_spa *nfit_spa) 2703 { 2704 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; 2705 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2706 struct nd_blk_region_desc ndbr_desc; 2707 struct nd_region_desc *ndr_desc; 2708 struct nfit_memdev *nfit_memdev; 2709 struct nvdimm_bus *nvdimm_bus; 2710 struct resource res; 2711 int count = 0, rc; 2712 2713 if (nfit_spa->nd_region) 2714 return 0; 2715 2716 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { 2717 dev_dbg(acpi_desc->dev, "detected invalid spa index\n"); 2718 return 0; 2719 } 2720 2721 memset(&res, 0, sizeof(res)); 2722 memset(&mappings, 0, sizeof(mappings)); 2723 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 2724 res.start = spa->address; 2725 res.end = res.start + spa->length - 1; 2726 ndr_desc = &ndbr_desc.ndr_desc; 2727 ndr_desc->res = &res; 2728 ndr_desc->provider_data = nfit_spa; 2729 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 2730 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 2731 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 2732 spa->proximity_domain); 2733 else 2734 ndr_desc->numa_node = NUMA_NO_NODE; 2735 2736 /* 2737 * Persistence domain bits are hierarchical, if 2738 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then 2739 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. 2740 */ 2741 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) 2742 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); 2743 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) 2744 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); 2745 2746 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2747 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 2748 struct nd_mapping_desc *mapping; 2749 2750 if (memdev->range_index != spa->range_index) 2751 continue; 2752 if (count >= ND_MAX_MAPPINGS) { 2753 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 2754 spa->range_index, ND_MAX_MAPPINGS); 2755 return -ENXIO; 2756 } 2757 mapping = &mappings[count++]; 2758 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, 2759 memdev, nfit_spa); 2760 if (rc) 2761 goto out; 2762 } 2763 2764 ndr_desc->mapping = mappings; 2765 ndr_desc->num_mappings = count; 2766 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2767 if (rc) 2768 goto out; 2769 2770 nvdimm_bus = acpi_desc->nvdimm_bus; 2771 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 2772 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); 2773 if (rc) { 2774 dev_warn(acpi_desc->dev, 2775 "failed to insert pmem resource to iomem: %d\n", 2776 rc); 2777 goto out; 2778 } 2779 2780 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2781 ndr_desc); 2782 if (!nfit_spa->nd_region) 2783 rc = -ENOMEM; 2784 } else if (nfit_spa_is_volatile(spa)) { 2785 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, 2786 ndr_desc); 2787 if (!nfit_spa->nd_region) 2788 rc = -ENOMEM; 2789 } else if (nfit_spa_is_virtual(spa)) { 2790 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2791 ndr_desc); 2792 if (!nfit_spa->nd_region) 2793 rc = -ENOMEM; 2794 } 2795 2796 out: 2797 if (rc) 2798 dev_err(acpi_desc->dev, "failed to register spa range %d\n", 2799 nfit_spa->spa->range_index); 2800 return rc; 2801 } 2802 2803 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc) 2804 { 2805 struct device *dev = acpi_desc->dev; 2806 struct nd_cmd_ars_status *ars_status; 2807 2808 if (acpi_desc->ars_status) { 2809 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2810 return 0; 2811 } 2812 2813 ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL); 2814 if (!ars_status) 2815 return -ENOMEM; 2816 acpi_desc->ars_status = ars_status; 2817 return 0; 2818 } 2819 2820 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) 2821 { 2822 int rc; 2823 2824 if (ars_status_alloc(acpi_desc)) 2825 return -ENOMEM; 2826 2827 rc = ars_get_status(acpi_desc); 2828 2829 if (rc < 0 && rc != -ENOSPC) 2830 return rc; 2831 2832 if (ars_status_process_records(acpi_desc)) 2833 return -ENOMEM; 2834 2835 return 0; 2836 } 2837 2838 static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa, 2839 int *query_rc) 2840 { 2841 int rc = *query_rc; 2842 2843 if (no_init_ars) 2844 return acpi_nfit_register_region(acpi_desc, nfit_spa); 2845 2846 set_bit(ARS_REQ, &nfit_spa->ars_state); 2847 set_bit(ARS_SHORT, &nfit_spa->ars_state); 2848 2849 switch (rc) { 2850 case 0: 2851 case -EAGAIN: 2852 rc = ars_start(acpi_desc, nfit_spa); 2853 if (rc == -EBUSY) { 2854 *query_rc = rc; 2855 break; 2856 } else if (rc == 0) { 2857 rc = acpi_nfit_query_poison(acpi_desc); 2858 } else { 2859 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2860 break; 2861 } 2862 if (rc == -EAGAIN) 2863 clear_bit(ARS_SHORT, &nfit_spa->ars_state); 2864 else if (rc == 0) 2865 ars_complete(acpi_desc, nfit_spa); 2866 break; 2867 case -EBUSY: 2868 case -ENOSPC: 2869 break; 2870 default: 2871 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2872 break; 2873 } 2874 2875 if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state)) 2876 set_bit(ARS_REQ, &nfit_spa->ars_state); 2877 2878 return acpi_nfit_register_region(acpi_desc, nfit_spa); 2879 } 2880 2881 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc) 2882 { 2883 struct nfit_spa *nfit_spa; 2884 2885 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2886 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 2887 continue; 2888 ars_complete(acpi_desc, nfit_spa); 2889 } 2890 } 2891 2892 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, 2893 int query_rc) 2894 { 2895 unsigned int tmo = acpi_desc->scrub_tmo; 2896 struct device *dev = acpi_desc->dev; 2897 struct nfit_spa *nfit_spa; 2898 2899 if (acpi_desc->cancel) 2900 return 0; 2901 2902 if (query_rc == -EBUSY) { 2903 dev_dbg(dev, "ARS: ARS busy\n"); 2904 return min(30U * 60U, tmo * 2); 2905 } 2906 if (query_rc == -ENOSPC) { 2907 dev_dbg(dev, "ARS: ARS continue\n"); 2908 ars_continue(acpi_desc); 2909 return 1; 2910 } 2911 if (query_rc && query_rc != -EAGAIN) { 2912 unsigned long long addr, end; 2913 2914 addr = acpi_desc->ars_status->address; 2915 end = addr + acpi_desc->ars_status->length; 2916 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end, 2917 query_rc); 2918 } 2919 2920 ars_complete_all(acpi_desc); 2921 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2922 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 2923 continue; 2924 if (test_bit(ARS_REQ, &nfit_spa->ars_state)) { 2925 int rc = ars_start(acpi_desc, nfit_spa); 2926 2927 clear_bit(ARS_DONE, &nfit_spa->ars_state); 2928 dev = nd_region_dev(nfit_spa->nd_region); 2929 dev_dbg(dev, "ARS: range %d ARS start (%d)\n", 2930 nfit_spa->spa->range_index, rc); 2931 if (rc == 0 || rc == -EBUSY) 2932 return 1; 2933 dev_err(dev, "ARS: range %d ARS failed (%d)\n", 2934 nfit_spa->spa->range_index, rc); 2935 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2936 } 2937 } 2938 return 0; 2939 } 2940 2941 static void acpi_nfit_scrub(struct work_struct *work) 2942 { 2943 struct acpi_nfit_desc *acpi_desc; 2944 unsigned int tmo; 2945 int query_rc; 2946 2947 acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); 2948 mutex_lock(&acpi_desc->init_mutex); 2949 query_rc = acpi_nfit_query_poison(acpi_desc); 2950 tmo = __acpi_nfit_scrub(acpi_desc, query_rc); 2951 if (tmo) { 2952 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); 2953 acpi_desc->scrub_tmo = tmo; 2954 } else { 2955 acpi_desc->scrub_count++; 2956 if (acpi_desc->scrub_count_state) 2957 sysfs_notify_dirent(acpi_desc->scrub_count_state); 2958 } 2959 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2960 mutex_unlock(&acpi_desc->init_mutex); 2961 } 2962 2963 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, 2964 struct nfit_spa *nfit_spa) 2965 { 2966 int type = nfit_spa_type(nfit_spa->spa); 2967 struct nd_cmd_ars_cap ars_cap; 2968 int rc; 2969 2970 memset(&ars_cap, 0, sizeof(ars_cap)); 2971 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); 2972 if (rc < 0) 2973 return; 2974 /* check that the supported scrub types match the spa type */ 2975 if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16) 2976 & ND_ARS_VOLATILE) == 0) 2977 return; 2978 if (type == NFIT_SPA_PM && ((ars_cap.status >> 16) 2979 & ND_ARS_PERSISTENT) == 0) 2980 return; 2981 2982 nfit_spa->max_ars = ars_cap.max_ars_out; 2983 nfit_spa->clear_err_unit = ars_cap.clear_err_unit; 2984 acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); 2985 clear_bit(ARS_FAILED, &nfit_spa->ars_state); 2986 set_bit(ARS_REQ, &nfit_spa->ars_state); 2987 } 2988 2989 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 2990 { 2991 struct nfit_spa *nfit_spa; 2992 int rc, query_rc; 2993 2994 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2995 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2996 switch (nfit_spa_type(nfit_spa->spa)) { 2997 case NFIT_SPA_VOLATILE: 2998 case NFIT_SPA_PM: 2999 acpi_nfit_init_ars(acpi_desc, nfit_spa); 3000 break; 3001 } 3002 } 3003 3004 /* 3005 * Reap any results that might be pending before starting new 3006 * short requests. 3007 */ 3008 query_rc = acpi_nfit_query_poison(acpi_desc); 3009 if (query_rc == 0) 3010 ars_complete_all(acpi_desc); 3011 3012 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 3013 switch (nfit_spa_type(nfit_spa->spa)) { 3014 case NFIT_SPA_VOLATILE: 3015 case NFIT_SPA_PM: 3016 /* register regions and kick off initial ARS run */ 3017 rc = ars_register(acpi_desc, nfit_spa, &query_rc); 3018 if (rc) 3019 return rc; 3020 break; 3021 case NFIT_SPA_BDW: 3022 /* nothing to register */ 3023 break; 3024 case NFIT_SPA_DCR: 3025 case NFIT_SPA_VDISK: 3026 case NFIT_SPA_VCD: 3027 case NFIT_SPA_PDISK: 3028 case NFIT_SPA_PCD: 3029 /* register known regions that don't support ARS */ 3030 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 3031 if (rc) 3032 return rc; 3033 break; 3034 default: 3035 /* don't register unknown regions */ 3036 break; 3037 } 3038 3039 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); 3040 return 0; 3041 } 3042 3043 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 3044 struct nfit_table_prev *prev) 3045 { 3046 struct device *dev = acpi_desc->dev; 3047 3048 if (!list_empty(&prev->spas) || 3049 !list_empty(&prev->memdevs) || 3050 !list_empty(&prev->dcrs) || 3051 !list_empty(&prev->bdws) || 3052 !list_empty(&prev->idts) || 3053 !list_empty(&prev->flushes)) { 3054 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 3055 return -ENXIO; 3056 } 3057 return 0; 3058 } 3059 3060 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) 3061 { 3062 struct device *dev = acpi_desc->dev; 3063 struct kernfs_node *nfit; 3064 struct device *bus_dev; 3065 3066 if (!ars_supported(acpi_desc->nvdimm_bus)) 3067 return 0; 3068 3069 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3070 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); 3071 if (!nfit) { 3072 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); 3073 return -ENODEV; 3074 } 3075 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); 3076 sysfs_put(nfit); 3077 if (!acpi_desc->scrub_count_state) { 3078 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); 3079 return -ENODEV; 3080 } 3081 3082 return 0; 3083 } 3084 3085 static void acpi_nfit_unregister(void *data) 3086 { 3087 struct acpi_nfit_desc *acpi_desc = data; 3088 3089 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 3090 } 3091 3092 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) 3093 { 3094 struct device *dev = acpi_desc->dev; 3095 struct nfit_table_prev prev; 3096 const void *end; 3097 int rc; 3098 3099 if (!acpi_desc->nvdimm_bus) { 3100 acpi_nfit_init_dsms(acpi_desc); 3101 3102 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, 3103 &acpi_desc->nd_desc); 3104 if (!acpi_desc->nvdimm_bus) 3105 return -ENOMEM; 3106 3107 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, 3108 acpi_desc); 3109 if (rc) 3110 return rc; 3111 3112 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); 3113 if (rc) 3114 return rc; 3115 3116 /* register this acpi_desc for mce notifications */ 3117 mutex_lock(&acpi_desc_lock); 3118 list_add_tail(&acpi_desc->list, &acpi_descs); 3119 mutex_unlock(&acpi_desc_lock); 3120 } 3121 3122 mutex_lock(&acpi_desc->init_mutex); 3123 3124 INIT_LIST_HEAD(&prev.spas); 3125 INIT_LIST_HEAD(&prev.memdevs); 3126 INIT_LIST_HEAD(&prev.dcrs); 3127 INIT_LIST_HEAD(&prev.bdws); 3128 INIT_LIST_HEAD(&prev.idts); 3129 INIT_LIST_HEAD(&prev.flushes); 3130 3131 list_cut_position(&prev.spas, &acpi_desc->spas, 3132 acpi_desc->spas.prev); 3133 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 3134 acpi_desc->memdevs.prev); 3135 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 3136 acpi_desc->dcrs.prev); 3137 list_cut_position(&prev.bdws, &acpi_desc->bdws, 3138 acpi_desc->bdws.prev); 3139 list_cut_position(&prev.idts, &acpi_desc->idts, 3140 acpi_desc->idts.prev); 3141 list_cut_position(&prev.flushes, &acpi_desc->flushes, 3142 acpi_desc->flushes.prev); 3143 3144 end = data + sz; 3145 while (!IS_ERR_OR_NULL(data)) 3146 data = add_table(acpi_desc, &prev, data, end); 3147 3148 if (IS_ERR(data)) { 3149 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data)); 3150 rc = PTR_ERR(data); 3151 goto out_unlock; 3152 } 3153 3154 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 3155 if (rc) 3156 goto out_unlock; 3157 3158 rc = nfit_mem_init(acpi_desc); 3159 if (rc) 3160 goto out_unlock; 3161 3162 rc = acpi_nfit_register_dimms(acpi_desc); 3163 if (rc) 3164 goto out_unlock; 3165 3166 rc = acpi_nfit_register_regions(acpi_desc); 3167 3168 out_unlock: 3169 mutex_unlock(&acpi_desc->init_mutex); 3170 return rc; 3171 } 3172 EXPORT_SYMBOL_GPL(acpi_nfit_init); 3173 3174 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3175 { 3176 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3177 struct device *dev = acpi_desc->dev; 3178 3179 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3180 device_lock(dev); 3181 device_unlock(dev); 3182 3183 /* Bounce the init_mutex to complete initial registration */ 3184 mutex_lock(&acpi_desc->init_mutex); 3185 mutex_unlock(&acpi_desc->init_mutex); 3186 3187 return 0; 3188 } 3189 3190 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3191 struct nvdimm *nvdimm, unsigned int cmd) 3192 { 3193 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3194 3195 if (nvdimm) 3196 return 0; 3197 if (cmd != ND_CMD_ARS_START) 3198 return 0; 3199 3200 /* 3201 * The kernel and userspace may race to initiate a scrub, but 3202 * the scrub thread is prepared to lose that initial race. It 3203 * just needs guarantees that any ars it initiates are not 3204 * interrupted by any intervening start reqeusts from userspace. 3205 */ 3206 if (work_busy(&acpi_desc->dwork.work)) 3207 return -EBUSY; 3208 3209 return 0; 3210 } 3211 3212 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) 3213 { 3214 struct device *dev = acpi_desc->dev; 3215 int scheduled = 0, busy = 0; 3216 struct nfit_spa *nfit_spa; 3217 3218 mutex_lock(&acpi_desc->init_mutex); 3219 if (acpi_desc->cancel) { 3220 mutex_unlock(&acpi_desc->init_mutex); 3221 return 0; 3222 } 3223 3224 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3225 int type = nfit_spa_type(nfit_spa->spa); 3226 3227 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE) 3228 continue; 3229 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3230 continue; 3231 3232 if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) 3233 busy++; 3234 else { 3235 if (test_bit(ARS_SHORT, &flags)) 3236 set_bit(ARS_SHORT, &nfit_spa->ars_state); 3237 scheduled++; 3238 } 3239 } 3240 if (scheduled) { 3241 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); 3242 dev_dbg(dev, "ars_scan triggered\n"); 3243 } 3244 mutex_unlock(&acpi_desc->init_mutex); 3245 3246 if (scheduled) 3247 return 0; 3248 if (busy) 3249 return -EBUSY; 3250 return -ENOTTY; 3251 } 3252 3253 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 3254 { 3255 struct nvdimm_bus_descriptor *nd_desc; 3256 3257 dev_set_drvdata(dev, acpi_desc); 3258 acpi_desc->dev = dev; 3259 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 3260 nd_desc = &acpi_desc->nd_desc; 3261 nd_desc->provider_name = "ACPI.NFIT"; 3262 nd_desc->module = THIS_MODULE; 3263 nd_desc->ndctl = acpi_nfit_ctl; 3264 nd_desc->flush_probe = acpi_nfit_flush_probe; 3265 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 3266 nd_desc->attr_groups = acpi_nfit_attribute_groups; 3267 3268 INIT_LIST_HEAD(&acpi_desc->spas); 3269 INIT_LIST_HEAD(&acpi_desc->dcrs); 3270 INIT_LIST_HEAD(&acpi_desc->bdws); 3271 INIT_LIST_HEAD(&acpi_desc->idts); 3272 INIT_LIST_HEAD(&acpi_desc->flushes); 3273 INIT_LIST_HEAD(&acpi_desc->memdevs); 3274 INIT_LIST_HEAD(&acpi_desc->dimms); 3275 INIT_LIST_HEAD(&acpi_desc->list); 3276 mutex_init(&acpi_desc->init_mutex); 3277 acpi_desc->scrub_tmo = 1; 3278 INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub); 3279 } 3280 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); 3281 3282 static void acpi_nfit_put_table(void *table) 3283 { 3284 acpi_put_table(table); 3285 } 3286 3287 void acpi_nfit_shutdown(void *data) 3288 { 3289 struct acpi_nfit_desc *acpi_desc = data; 3290 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3291 3292 /* 3293 * Destruct under acpi_desc_lock so that nfit_handle_mce does not 3294 * race teardown 3295 */ 3296 mutex_lock(&acpi_desc_lock); 3297 list_del(&acpi_desc->list); 3298 mutex_unlock(&acpi_desc_lock); 3299 3300 mutex_lock(&acpi_desc->init_mutex); 3301 acpi_desc->cancel = 1; 3302 cancel_delayed_work_sync(&acpi_desc->dwork); 3303 mutex_unlock(&acpi_desc->init_mutex); 3304 3305 /* 3306 * Bounce the nvdimm bus lock to make sure any in-flight 3307 * acpi_nfit_ars_rescan() submissions have had a chance to 3308 * either submit or see ->cancel set. 3309 */ 3310 device_lock(bus_dev); 3311 device_unlock(bus_dev); 3312 3313 flush_workqueue(nfit_wq); 3314 } 3315 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); 3316 3317 static int acpi_nfit_add(struct acpi_device *adev) 3318 { 3319 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3320 struct acpi_nfit_desc *acpi_desc; 3321 struct device *dev = &adev->dev; 3322 struct acpi_table_header *tbl; 3323 acpi_status status = AE_OK; 3324 acpi_size sz; 3325 int rc = 0; 3326 3327 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); 3328 if (ACPI_FAILURE(status)) { 3329 /* This is ok, we could have an nvdimm hotplugged later */ 3330 dev_dbg(dev, "failed to find NFIT at startup\n"); 3331 return 0; 3332 } 3333 3334 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); 3335 if (rc) 3336 return rc; 3337 sz = tbl->length; 3338 3339 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3340 if (!acpi_desc) 3341 return -ENOMEM; 3342 acpi_nfit_desc_init(acpi_desc, &adev->dev); 3343 3344 /* Save the acpi header for exporting the revision via sysfs */ 3345 acpi_desc->acpi_header = *tbl; 3346 3347 /* Evaluate _FIT and override with that if present */ 3348 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 3349 if (ACPI_SUCCESS(status) && buf.length > 0) { 3350 union acpi_object *obj = buf.pointer; 3351 3352 if (obj->type == ACPI_TYPE_BUFFER) 3353 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3354 obj->buffer.length); 3355 else 3356 dev_dbg(dev, "invalid type %d, ignoring _FIT\n", 3357 (int) obj->type); 3358 kfree(buf.pointer); 3359 } else 3360 /* skip over the lead-in header table */ 3361 rc = acpi_nfit_init(acpi_desc, (void *) tbl 3362 + sizeof(struct acpi_table_nfit), 3363 sz - sizeof(struct acpi_table_nfit)); 3364 3365 if (rc) 3366 return rc; 3367 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); 3368 } 3369 3370 static int acpi_nfit_remove(struct acpi_device *adev) 3371 { 3372 /* see acpi_nfit_unregister */ 3373 return 0; 3374 } 3375 3376 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) 3377 { 3378 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3379 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3380 union acpi_object *obj; 3381 acpi_status status; 3382 int ret; 3383 3384 if (!dev->driver) { 3385 /* dev->driver may be null if we're being removed */ 3386 dev_dbg(dev, "no driver found for dev\n"); 3387 return; 3388 } 3389 3390 if (!acpi_desc) { 3391 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3392 if (!acpi_desc) 3393 return; 3394 acpi_nfit_desc_init(acpi_desc, dev); 3395 } else { 3396 /* 3397 * Finish previous registration before considering new 3398 * regions. 3399 */ 3400 flush_workqueue(nfit_wq); 3401 } 3402 3403 /* Evaluate _FIT */ 3404 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); 3405 if (ACPI_FAILURE(status)) { 3406 dev_err(dev, "failed to evaluate _FIT\n"); 3407 return; 3408 } 3409 3410 obj = buf.pointer; 3411 if (obj->type == ACPI_TYPE_BUFFER) { 3412 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3413 obj->buffer.length); 3414 if (ret) 3415 dev_err(dev, "failed to merge updated NFIT\n"); 3416 } else 3417 dev_err(dev, "Invalid _FIT\n"); 3418 kfree(buf.pointer); 3419 } 3420 3421 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) 3422 { 3423 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3424 unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ? 3425 0 : 1 << ARS_SHORT; 3426 3427 acpi_nfit_ars_rescan(acpi_desc, flags); 3428 } 3429 3430 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) 3431 { 3432 dev_dbg(dev, "event: 0x%x\n", event); 3433 3434 switch (event) { 3435 case NFIT_NOTIFY_UPDATE: 3436 return acpi_nfit_update_notify(dev, handle); 3437 case NFIT_NOTIFY_UC_MEMORY_ERROR: 3438 return acpi_nfit_uc_error_notify(dev, handle); 3439 default: 3440 return; 3441 } 3442 } 3443 EXPORT_SYMBOL_GPL(__acpi_nfit_notify); 3444 3445 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 3446 { 3447 device_lock(&adev->dev); 3448 __acpi_nfit_notify(&adev->dev, adev->handle, event); 3449 device_unlock(&adev->dev); 3450 } 3451 3452 static const struct acpi_device_id acpi_nfit_ids[] = { 3453 { "ACPI0012", 0 }, 3454 { "", 0 }, 3455 }; 3456 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 3457 3458 static struct acpi_driver acpi_nfit_driver = { 3459 .name = KBUILD_MODNAME, 3460 .ids = acpi_nfit_ids, 3461 .ops = { 3462 .add = acpi_nfit_add, 3463 .remove = acpi_nfit_remove, 3464 .notify = acpi_nfit_notify, 3465 }, 3466 }; 3467 3468 static __init int nfit_init(void) 3469 { 3470 int ret; 3471 3472 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 3473 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 3474 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 3475 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 3476 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 3477 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 3478 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 3479 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); 3480 3481 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]); 3482 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]); 3483 guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]); 3484 guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]); 3485 guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]); 3486 guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]); 3487 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]); 3488 guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]); 3489 guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]); 3490 guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]); 3491 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 3492 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 3493 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); 3494 3495 nfit_wq = create_singlethread_workqueue("nfit"); 3496 if (!nfit_wq) 3497 return -ENOMEM; 3498 3499 nfit_mce_register(); 3500 ret = acpi_bus_register_driver(&acpi_nfit_driver); 3501 if (ret) { 3502 nfit_mce_unregister(); 3503 destroy_workqueue(nfit_wq); 3504 } 3505 3506 return ret; 3507 3508 } 3509 3510 static __exit void nfit_exit(void) 3511 { 3512 nfit_mce_unregister(); 3513 acpi_bus_unregister_driver(&acpi_nfit_driver); 3514 destroy_workqueue(nfit_wq); 3515 WARN_ON(!list_empty(&acpi_descs)); 3516 } 3517 3518 module_init(nfit_init); 3519 module_exit(nfit_exit); 3520 MODULE_LICENSE("GPL v2"); 3521 MODULE_AUTHOR("Intel Corporation"); 3522