1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/libnvdimm.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/ndctl.h> 18 #include <linux/sysfs.h> 19 #include <linux/delay.h> 20 #include <linux/list.h> 21 #include <linux/acpi.h> 22 #include <linux/sort.h> 23 #include <linux/io.h> 24 #include <linux/nd.h> 25 #include <asm/cacheflush.h> 26 #include <acpi/nfit.h> 27 #include "nfit.h" 28 29 /* 30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 31 * irrelevant. 32 */ 33 #include <linux/io-64-nonatomic-hi-lo.h> 34 35 static bool force_enable_dimms; 36 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 37 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 38 39 static bool disable_vendor_specific; 40 module_param(disable_vendor_specific, bool, S_IRUGO); 41 MODULE_PARM_DESC(disable_vendor_specific, 42 "Limit commands to the publicly specified set"); 43 44 static unsigned long override_dsm_mask; 45 module_param(override_dsm_mask, ulong, S_IRUGO); 46 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); 47 48 static int default_dsm_family = -1; 49 module_param(default_dsm_family, int, S_IRUGO); 50 MODULE_PARM_DESC(default_dsm_family, 51 "Try this DSM type first when identifying NVDIMM family"); 52 53 static bool no_init_ars; 54 module_param(no_init_ars, bool, 0644); 55 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); 56 57 LIST_HEAD(acpi_descs); 58 DEFINE_MUTEX(acpi_desc_lock); 59 60 static struct workqueue_struct *nfit_wq; 61 62 struct nfit_table_prev { 63 struct list_head spas; 64 struct list_head memdevs; 65 struct list_head dcrs; 66 struct list_head bdws; 67 struct list_head idts; 68 struct list_head flushes; 69 }; 70 71 static guid_t nfit_uuid[NFIT_UUID_MAX]; 72 73 const guid_t *to_nfit_uuid(enum nfit_uuids id) 74 { 75 return &nfit_uuid[id]; 76 } 77 EXPORT_SYMBOL(to_nfit_uuid); 78 79 static struct acpi_nfit_desc *to_acpi_nfit_desc( 80 struct nvdimm_bus_descriptor *nd_desc) 81 { 82 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 83 } 84 85 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 86 { 87 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 88 89 /* 90 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 91 * acpi_device. 92 */ 93 if (!nd_desc->provider_name 94 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 95 return NULL; 96 97 return to_acpi_device(acpi_desc->dev); 98 } 99 100 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) 101 { 102 struct nd_cmd_clear_error *clear_err; 103 struct nd_cmd_ars_status *ars_status; 104 u16 flags; 105 106 switch (cmd) { 107 case ND_CMD_ARS_CAP: 108 if ((status & 0xffff) == NFIT_ARS_CAP_NONE) 109 return -ENOTTY; 110 111 /* Command failed */ 112 if (status & 0xffff) 113 return -EIO; 114 115 /* No supported scan types for this range */ 116 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 117 if ((status >> 16 & flags) == 0) 118 return -ENOTTY; 119 return 0; 120 case ND_CMD_ARS_START: 121 /* ARS is in progress */ 122 if ((status & 0xffff) == NFIT_ARS_START_BUSY) 123 return -EBUSY; 124 125 /* Command failed */ 126 if (status & 0xffff) 127 return -EIO; 128 return 0; 129 case ND_CMD_ARS_STATUS: 130 ars_status = buf; 131 /* Command failed */ 132 if (status & 0xffff) 133 return -EIO; 134 /* Check extended status (Upper two bytes) */ 135 if (status == NFIT_ARS_STATUS_DONE) 136 return 0; 137 138 /* ARS is in progress */ 139 if (status == NFIT_ARS_STATUS_BUSY) 140 return -EBUSY; 141 142 /* No ARS performed for the current boot */ 143 if (status == NFIT_ARS_STATUS_NONE) 144 return -EAGAIN; 145 146 /* 147 * ARS interrupted, either we overflowed or some other 148 * agent wants the scan to stop. If we didn't overflow 149 * then just continue with the returned results. 150 */ 151 if (status == NFIT_ARS_STATUS_INTR) { 152 if (ars_status->out_length >= 40 && (ars_status->flags 153 & NFIT_ARS_F_OVERFLOW)) 154 return -ENOSPC; 155 return 0; 156 } 157 158 /* Unknown status */ 159 if (status >> 16) 160 return -EIO; 161 return 0; 162 case ND_CMD_CLEAR_ERROR: 163 clear_err = buf; 164 if (status & 0xffff) 165 return -EIO; 166 if (!clear_err->cleared) 167 return -EIO; 168 if (clear_err->length > clear_err->cleared) 169 return clear_err->cleared; 170 return 0; 171 default: 172 break; 173 } 174 175 /* all other non-zero status results in an error */ 176 if (status) 177 return -EIO; 178 return 0; 179 } 180 181 #define ACPI_LABELS_LOCKED 3 182 183 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 184 u32 status) 185 { 186 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 187 188 switch (cmd) { 189 case ND_CMD_GET_CONFIG_SIZE: 190 /* 191 * In the _LSI, _LSR, _LSW case the locked status is 192 * communicated via the read/write commands 193 */ 194 if (nfit_mem->has_lsr) 195 break; 196 197 if (status >> 16 & ND_CONFIG_LOCKED) 198 return -EACCES; 199 break; 200 case ND_CMD_GET_CONFIG_DATA: 201 if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED) 202 return -EACCES; 203 break; 204 case ND_CMD_SET_CONFIG_DATA: 205 if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED) 206 return -EACCES; 207 break; 208 default: 209 break; 210 } 211 212 /* all other non-zero status results in an error */ 213 if (status) 214 return -EIO; 215 return 0; 216 } 217 218 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 219 u32 status) 220 { 221 if (!nvdimm) 222 return xlat_bus_status(buf, cmd, status); 223 return xlat_nvdimm_status(nvdimm, buf, cmd, status); 224 } 225 226 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */ 227 static union acpi_object *pkg_to_buf(union acpi_object *pkg) 228 { 229 int i; 230 void *dst; 231 size_t size = 0; 232 union acpi_object *buf = NULL; 233 234 if (pkg->type != ACPI_TYPE_PACKAGE) { 235 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 236 pkg->type); 237 goto err; 238 } 239 240 for (i = 0; i < pkg->package.count; i++) { 241 union acpi_object *obj = &pkg->package.elements[i]; 242 243 if (obj->type == ACPI_TYPE_INTEGER) 244 size += 4; 245 else if (obj->type == ACPI_TYPE_BUFFER) 246 size += obj->buffer.length; 247 else { 248 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 249 obj->type); 250 goto err; 251 } 252 } 253 254 buf = ACPI_ALLOCATE(sizeof(*buf) + size); 255 if (!buf) 256 goto err; 257 258 dst = buf + 1; 259 buf->type = ACPI_TYPE_BUFFER; 260 buf->buffer.length = size; 261 buf->buffer.pointer = dst; 262 for (i = 0; i < pkg->package.count; i++) { 263 union acpi_object *obj = &pkg->package.elements[i]; 264 265 if (obj->type == ACPI_TYPE_INTEGER) { 266 memcpy(dst, &obj->integer.value, 4); 267 dst += 4; 268 } else if (obj->type == ACPI_TYPE_BUFFER) { 269 memcpy(dst, obj->buffer.pointer, obj->buffer.length); 270 dst += obj->buffer.length; 271 } 272 } 273 err: 274 ACPI_FREE(pkg); 275 return buf; 276 } 277 278 static union acpi_object *int_to_buf(union acpi_object *integer) 279 { 280 union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4); 281 void *dst = NULL; 282 283 if (!buf) 284 goto err; 285 286 if (integer->type != ACPI_TYPE_INTEGER) { 287 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 288 integer->type); 289 goto err; 290 } 291 292 dst = buf + 1; 293 buf->type = ACPI_TYPE_BUFFER; 294 buf->buffer.length = 4; 295 buf->buffer.pointer = dst; 296 memcpy(dst, &integer->integer.value, 4); 297 err: 298 ACPI_FREE(integer); 299 return buf; 300 } 301 302 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset, 303 u32 len, void *data) 304 { 305 acpi_status rc; 306 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 307 struct acpi_object_list input = { 308 .count = 3, 309 .pointer = (union acpi_object []) { 310 [0] = { 311 .integer.type = ACPI_TYPE_INTEGER, 312 .integer.value = offset, 313 }, 314 [1] = { 315 .integer.type = ACPI_TYPE_INTEGER, 316 .integer.value = len, 317 }, 318 [2] = { 319 .buffer.type = ACPI_TYPE_BUFFER, 320 .buffer.pointer = data, 321 .buffer.length = len, 322 }, 323 }, 324 }; 325 326 rc = acpi_evaluate_object(handle, "_LSW", &input, &buf); 327 if (ACPI_FAILURE(rc)) 328 return NULL; 329 return int_to_buf(buf.pointer); 330 } 331 332 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset, 333 u32 len) 334 { 335 acpi_status rc; 336 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 337 struct acpi_object_list input = { 338 .count = 2, 339 .pointer = (union acpi_object []) { 340 [0] = { 341 .integer.type = ACPI_TYPE_INTEGER, 342 .integer.value = offset, 343 }, 344 [1] = { 345 .integer.type = ACPI_TYPE_INTEGER, 346 .integer.value = len, 347 }, 348 }, 349 }; 350 351 rc = acpi_evaluate_object(handle, "_LSR", &input, &buf); 352 if (ACPI_FAILURE(rc)) 353 return NULL; 354 return pkg_to_buf(buf.pointer); 355 } 356 357 static union acpi_object *acpi_label_info(acpi_handle handle) 358 { 359 acpi_status rc; 360 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 361 362 rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf); 363 if (ACPI_FAILURE(rc)) 364 return NULL; 365 return pkg_to_buf(buf.pointer); 366 } 367 368 static u8 nfit_dsm_revid(unsigned family, unsigned func) 369 { 370 static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { 371 [NVDIMM_FAMILY_INTEL] = { 372 [NVDIMM_INTEL_GET_MODES] = 2, 373 [NVDIMM_INTEL_GET_FWINFO] = 2, 374 [NVDIMM_INTEL_START_FWUPDATE] = 2, 375 [NVDIMM_INTEL_SEND_FWUPDATE] = 2, 376 [NVDIMM_INTEL_FINISH_FWUPDATE] = 2, 377 [NVDIMM_INTEL_QUERY_FWUPDATE] = 2, 378 [NVDIMM_INTEL_SET_THRESHOLD] = 2, 379 [NVDIMM_INTEL_INJECT_ERROR] = 2, 380 }, 381 }; 382 u8 id; 383 384 if (family > NVDIMM_FAMILY_MAX) 385 return 0; 386 if (func > 31) 387 return 0; 388 id = revid_table[family][func]; 389 if (id == 0) 390 return 1; /* default */ 391 return id; 392 } 393 394 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 395 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 396 { 397 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 398 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 399 union acpi_object in_obj, in_buf, *out_obj; 400 const struct nd_cmd_desc *desc = NULL; 401 struct device *dev = acpi_desc->dev; 402 struct nd_cmd_pkg *call_pkg = NULL; 403 const char *cmd_name, *dimm_name; 404 unsigned long cmd_mask, dsm_mask; 405 u32 offset, fw_status = 0; 406 acpi_handle handle; 407 unsigned int func; 408 const guid_t *guid; 409 int rc, i; 410 411 func = cmd; 412 if (cmd == ND_CMD_CALL) { 413 call_pkg = buf; 414 func = call_pkg->nd_command; 415 416 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) 417 if (call_pkg->nd_reserved2[i]) 418 return -EINVAL; 419 } 420 421 if (nvdimm) { 422 struct acpi_device *adev = nfit_mem->adev; 423 424 if (!adev) 425 return -ENOTTY; 426 if (call_pkg && nfit_mem->family != call_pkg->nd_family) 427 return -ENOTTY; 428 429 dimm_name = nvdimm_name(nvdimm); 430 cmd_name = nvdimm_cmd_name(cmd); 431 cmd_mask = nvdimm_cmd_mask(nvdimm); 432 dsm_mask = nfit_mem->dsm_mask; 433 desc = nd_cmd_dimm_desc(cmd); 434 guid = to_nfit_uuid(nfit_mem->family); 435 handle = adev->handle; 436 } else { 437 struct acpi_device *adev = to_acpi_dev(acpi_desc); 438 439 cmd_name = nvdimm_bus_cmd_name(cmd); 440 cmd_mask = nd_desc->cmd_mask; 441 dsm_mask = cmd_mask; 442 if (cmd == ND_CMD_CALL) 443 dsm_mask = nd_desc->bus_dsm_mask; 444 desc = nd_cmd_bus_desc(cmd); 445 guid = to_nfit_uuid(NFIT_DEV_BUS); 446 handle = adev->handle; 447 dimm_name = "bus"; 448 } 449 450 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 451 return -ENOTTY; 452 453 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) 454 return -ENOTTY; 455 456 in_obj.type = ACPI_TYPE_PACKAGE; 457 in_obj.package.count = 1; 458 in_obj.package.elements = &in_buf; 459 in_buf.type = ACPI_TYPE_BUFFER; 460 in_buf.buffer.pointer = buf; 461 in_buf.buffer.length = 0; 462 463 /* libnvdimm has already validated the input envelope */ 464 for (i = 0; i < desc->in_num; i++) 465 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 466 i, buf); 467 468 if (call_pkg) { 469 /* skip over package wrapper */ 470 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; 471 in_buf.buffer.length = call_pkg->nd_size_in; 472 } 473 474 dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n", 475 dimm_name, cmd, func, in_buf.buffer.length); 476 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, 477 in_buf.buffer.pointer, 478 min_t(u32, 256, in_buf.buffer.length), true); 479 480 /* call the BIOS, prefer the named methods over _DSM if available */ 481 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr) 482 out_obj = acpi_label_info(handle); 483 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) { 484 struct nd_cmd_get_config_data_hdr *p = buf; 485 486 out_obj = acpi_label_read(handle, p->in_offset, p->in_length); 487 } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA 488 && nfit_mem->has_lsw) { 489 struct nd_cmd_set_config_hdr *p = buf; 490 491 out_obj = acpi_label_write(handle, p->in_offset, p->in_length, 492 p->in_buf); 493 } else { 494 u8 revid; 495 496 if (nvdimm) 497 revid = nfit_dsm_revid(nfit_mem->family, func); 498 else 499 revid = 1; 500 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 501 } 502 503 if (!out_obj) { 504 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name); 505 return -EINVAL; 506 } 507 508 if (call_pkg) { 509 call_pkg->nd_fw_size = out_obj->buffer.length; 510 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, 511 out_obj->buffer.pointer, 512 min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); 513 514 ACPI_FREE(out_obj); 515 /* 516 * Need to support FW function w/o known size in advance. 517 * Caller can determine required size based upon nd_fw_size. 518 * If we return an error (like elsewhere) then caller wouldn't 519 * be able to rely upon data returned to make calculation. 520 */ 521 return 0; 522 } 523 524 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 525 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", 526 dimm_name, cmd_name, out_obj->type); 527 rc = -EINVAL; 528 goto out; 529 } 530 531 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, 532 cmd_name, out_obj->buffer.length); 533 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, 534 out_obj->buffer.pointer, 535 min_t(u32, 128, out_obj->buffer.length), true); 536 537 for (i = 0, offset = 0; i < desc->out_num; i++) { 538 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 539 (u32 *) out_obj->buffer.pointer, 540 out_obj->buffer.length - offset); 541 542 if (offset + out_size > out_obj->buffer.length) { 543 dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n", 544 dimm_name, cmd_name, i); 545 break; 546 } 547 548 if (in_buf.buffer.length + offset + out_size > buf_len) { 549 dev_dbg(dev, "%s output overrun cmd: %s field: %d\n", 550 dimm_name, cmd_name, i); 551 rc = -ENXIO; 552 goto out; 553 } 554 memcpy(buf + in_buf.buffer.length + offset, 555 out_obj->buffer.pointer + offset, out_size); 556 offset += out_size; 557 } 558 559 /* 560 * Set fw_status for all the commands with a known format to be 561 * later interpreted by xlat_status(). 562 */ 563 if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP 564 && cmd <= ND_CMD_CLEAR_ERROR) 565 || (nvdimm && cmd >= ND_CMD_SMART 566 && cmd <= ND_CMD_VENDOR))) 567 fw_status = *(u32 *) out_obj->buffer.pointer; 568 569 if (offset + in_buf.buffer.length < buf_len) { 570 if (i >= 1) { 571 /* 572 * status valid, return the number of bytes left 573 * unfilled in the output buffer 574 */ 575 rc = buf_len - offset - in_buf.buffer.length; 576 if (cmd_rc) 577 *cmd_rc = xlat_status(nvdimm, buf, cmd, 578 fw_status); 579 } else { 580 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 581 __func__, dimm_name, cmd_name, buf_len, 582 offset); 583 rc = -ENXIO; 584 } 585 } else { 586 rc = 0; 587 if (cmd_rc) 588 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); 589 } 590 591 out: 592 ACPI_FREE(out_obj); 593 594 return rc; 595 } 596 EXPORT_SYMBOL_GPL(acpi_nfit_ctl); 597 598 static const char *spa_type_name(u16 type) 599 { 600 static const char *to_name[] = { 601 [NFIT_SPA_VOLATILE] = "volatile", 602 [NFIT_SPA_PM] = "pmem", 603 [NFIT_SPA_DCR] = "dimm-control-region", 604 [NFIT_SPA_BDW] = "block-data-window", 605 [NFIT_SPA_VDISK] = "volatile-disk", 606 [NFIT_SPA_VCD] = "volatile-cd", 607 [NFIT_SPA_PDISK] = "persistent-disk", 608 [NFIT_SPA_PCD] = "persistent-cd", 609 610 }; 611 612 if (type > NFIT_SPA_PCD) 613 return "unknown"; 614 615 return to_name[type]; 616 } 617 618 int nfit_spa_type(struct acpi_nfit_system_address *spa) 619 { 620 int i; 621 622 for (i = 0; i < NFIT_UUID_MAX; i++) 623 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid)) 624 return i; 625 return -1; 626 } 627 628 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 629 struct nfit_table_prev *prev, 630 struct acpi_nfit_system_address *spa) 631 { 632 struct device *dev = acpi_desc->dev; 633 struct nfit_spa *nfit_spa; 634 635 if (spa->header.length != sizeof(*spa)) 636 return false; 637 638 list_for_each_entry(nfit_spa, &prev->spas, list) { 639 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 640 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 641 return true; 642 } 643 } 644 645 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), 646 GFP_KERNEL); 647 if (!nfit_spa) 648 return false; 649 INIT_LIST_HEAD(&nfit_spa->list); 650 memcpy(nfit_spa->spa, spa, sizeof(*spa)); 651 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 652 dev_dbg(dev, "spa index: %d type: %s\n", 653 spa->range_index, 654 spa_type_name(nfit_spa_type(spa))); 655 return true; 656 } 657 658 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 659 struct nfit_table_prev *prev, 660 struct acpi_nfit_memory_map *memdev) 661 { 662 struct device *dev = acpi_desc->dev; 663 struct nfit_memdev *nfit_memdev; 664 665 if (memdev->header.length != sizeof(*memdev)) 666 return false; 667 668 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 669 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 670 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 671 return true; 672 } 673 674 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), 675 GFP_KERNEL); 676 if (!nfit_memdev) 677 return false; 678 INIT_LIST_HEAD(&nfit_memdev->list); 679 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); 680 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 681 dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n", 682 memdev->device_handle, memdev->range_index, 683 memdev->region_index, memdev->flags); 684 return true; 685 } 686 687 int nfit_get_smbios_id(u32 device_handle, u16 *flags) 688 { 689 struct acpi_nfit_memory_map *memdev; 690 struct acpi_nfit_desc *acpi_desc; 691 struct nfit_mem *nfit_mem; 692 693 mutex_lock(&acpi_desc_lock); 694 list_for_each_entry(acpi_desc, &acpi_descs, list) { 695 mutex_lock(&acpi_desc->init_mutex); 696 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 697 memdev = __to_nfit_memdev(nfit_mem); 698 if (memdev->device_handle == device_handle) { 699 mutex_unlock(&acpi_desc->init_mutex); 700 mutex_unlock(&acpi_desc_lock); 701 *flags = memdev->flags; 702 return memdev->physical_id; 703 } 704 } 705 mutex_unlock(&acpi_desc->init_mutex); 706 } 707 mutex_unlock(&acpi_desc_lock); 708 709 return -ENODEV; 710 } 711 EXPORT_SYMBOL_GPL(nfit_get_smbios_id); 712 713 /* 714 * An implementation may provide a truncated control region if no block windows 715 * are defined. 716 */ 717 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) 718 { 719 if (dcr->header.length < offsetof(struct acpi_nfit_control_region, 720 window_size)) 721 return 0; 722 if (dcr->windows) 723 return sizeof(*dcr); 724 return offsetof(struct acpi_nfit_control_region, window_size); 725 } 726 727 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 728 struct nfit_table_prev *prev, 729 struct acpi_nfit_control_region *dcr) 730 { 731 struct device *dev = acpi_desc->dev; 732 struct nfit_dcr *nfit_dcr; 733 734 if (!sizeof_dcr(dcr)) 735 return false; 736 737 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 738 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { 739 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 740 return true; 741 } 742 743 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), 744 GFP_KERNEL); 745 if (!nfit_dcr) 746 return false; 747 INIT_LIST_HEAD(&nfit_dcr->list); 748 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); 749 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 750 dev_dbg(dev, "dcr index: %d windows: %d\n", 751 dcr->region_index, dcr->windows); 752 return true; 753 } 754 755 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 756 struct nfit_table_prev *prev, 757 struct acpi_nfit_data_region *bdw) 758 { 759 struct device *dev = acpi_desc->dev; 760 struct nfit_bdw *nfit_bdw; 761 762 if (bdw->header.length != sizeof(*bdw)) 763 return false; 764 list_for_each_entry(nfit_bdw, &prev->bdws, list) 765 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 766 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 767 return true; 768 } 769 770 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), 771 GFP_KERNEL); 772 if (!nfit_bdw) 773 return false; 774 INIT_LIST_HEAD(&nfit_bdw->list); 775 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); 776 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 777 dev_dbg(dev, "bdw dcr: %d windows: %d\n", 778 bdw->region_index, bdw->windows); 779 return true; 780 } 781 782 static size_t sizeof_idt(struct acpi_nfit_interleave *idt) 783 { 784 if (idt->header.length < sizeof(*idt)) 785 return 0; 786 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); 787 } 788 789 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 790 struct nfit_table_prev *prev, 791 struct acpi_nfit_interleave *idt) 792 { 793 struct device *dev = acpi_desc->dev; 794 struct nfit_idt *nfit_idt; 795 796 if (!sizeof_idt(idt)) 797 return false; 798 799 list_for_each_entry(nfit_idt, &prev->idts, list) { 800 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) 801 continue; 802 803 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { 804 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 805 return true; 806 } 807 } 808 809 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), 810 GFP_KERNEL); 811 if (!nfit_idt) 812 return false; 813 INIT_LIST_HEAD(&nfit_idt->list); 814 memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); 815 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 816 dev_dbg(dev, "idt index: %d num_lines: %d\n", 817 idt->interleave_index, idt->line_count); 818 return true; 819 } 820 821 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) 822 { 823 if (flush->header.length < sizeof(*flush)) 824 return 0; 825 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); 826 } 827 828 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 829 struct nfit_table_prev *prev, 830 struct acpi_nfit_flush_address *flush) 831 { 832 struct device *dev = acpi_desc->dev; 833 struct nfit_flush *nfit_flush; 834 835 if (!sizeof_flush(flush)) 836 return false; 837 838 list_for_each_entry(nfit_flush, &prev->flushes, list) { 839 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) 840 continue; 841 842 if (memcmp(nfit_flush->flush, flush, 843 sizeof_flush(flush)) == 0) { 844 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 845 return true; 846 } 847 } 848 849 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) 850 + sizeof_flush(flush), GFP_KERNEL); 851 if (!nfit_flush) 852 return false; 853 INIT_LIST_HEAD(&nfit_flush->list); 854 memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); 855 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 856 dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n", 857 flush->device_handle, flush->hint_count); 858 return true; 859 } 860 861 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, 862 struct acpi_nfit_capabilities *pcap) 863 { 864 struct device *dev = acpi_desc->dev; 865 u32 mask; 866 867 mask = (1 << (pcap->highest_capability + 1)) - 1; 868 acpi_desc->platform_cap = pcap->capabilities & mask; 869 dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap); 870 return true; 871 } 872 873 static void *add_table(struct acpi_nfit_desc *acpi_desc, 874 struct nfit_table_prev *prev, void *table, const void *end) 875 { 876 struct device *dev = acpi_desc->dev; 877 struct acpi_nfit_header *hdr; 878 void *err = ERR_PTR(-ENOMEM); 879 880 if (table >= end) 881 return NULL; 882 883 hdr = table; 884 if (!hdr->length) { 885 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 886 hdr->type); 887 return NULL; 888 } 889 890 switch (hdr->type) { 891 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 892 if (!add_spa(acpi_desc, prev, table)) 893 return err; 894 break; 895 case ACPI_NFIT_TYPE_MEMORY_MAP: 896 if (!add_memdev(acpi_desc, prev, table)) 897 return err; 898 break; 899 case ACPI_NFIT_TYPE_CONTROL_REGION: 900 if (!add_dcr(acpi_desc, prev, table)) 901 return err; 902 break; 903 case ACPI_NFIT_TYPE_DATA_REGION: 904 if (!add_bdw(acpi_desc, prev, table)) 905 return err; 906 break; 907 case ACPI_NFIT_TYPE_INTERLEAVE: 908 if (!add_idt(acpi_desc, prev, table)) 909 return err; 910 break; 911 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 912 if (!add_flush(acpi_desc, prev, table)) 913 return err; 914 break; 915 case ACPI_NFIT_TYPE_SMBIOS: 916 dev_dbg(dev, "smbios\n"); 917 break; 918 case ACPI_NFIT_TYPE_CAPABILITIES: 919 if (!add_platform_cap(acpi_desc, table)) 920 return err; 921 break; 922 default: 923 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 924 break; 925 } 926 927 return table + hdr->length; 928 } 929 930 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 931 struct nfit_mem *nfit_mem) 932 { 933 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 934 u16 dcr = nfit_mem->dcr->region_index; 935 struct nfit_spa *nfit_spa; 936 937 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 938 u16 range_index = nfit_spa->spa->range_index; 939 int type = nfit_spa_type(nfit_spa->spa); 940 struct nfit_memdev *nfit_memdev; 941 942 if (type != NFIT_SPA_BDW) 943 continue; 944 945 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 946 if (nfit_memdev->memdev->range_index != range_index) 947 continue; 948 if (nfit_memdev->memdev->device_handle != device_handle) 949 continue; 950 if (nfit_memdev->memdev->region_index != dcr) 951 continue; 952 953 nfit_mem->spa_bdw = nfit_spa->spa; 954 return; 955 } 956 } 957 958 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 959 nfit_mem->spa_dcr->range_index); 960 nfit_mem->bdw = NULL; 961 } 962 963 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, 964 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 965 { 966 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 967 struct nfit_memdev *nfit_memdev; 968 struct nfit_bdw *nfit_bdw; 969 struct nfit_idt *nfit_idt; 970 u16 idt_idx, range_index; 971 972 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 973 if (nfit_bdw->bdw->region_index != dcr) 974 continue; 975 nfit_mem->bdw = nfit_bdw->bdw; 976 break; 977 } 978 979 if (!nfit_mem->bdw) 980 return; 981 982 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 983 984 if (!nfit_mem->spa_bdw) 985 return; 986 987 range_index = nfit_mem->spa_bdw->range_index; 988 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 989 if (nfit_memdev->memdev->range_index != range_index || 990 nfit_memdev->memdev->region_index != dcr) 991 continue; 992 nfit_mem->memdev_bdw = nfit_memdev->memdev; 993 idt_idx = nfit_memdev->memdev->interleave_index; 994 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 995 if (nfit_idt->idt->interleave_index != idt_idx) 996 continue; 997 nfit_mem->idt_bdw = nfit_idt->idt; 998 break; 999 } 1000 break; 1001 } 1002 } 1003 1004 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, 1005 struct acpi_nfit_system_address *spa) 1006 { 1007 struct nfit_mem *nfit_mem, *found; 1008 struct nfit_memdev *nfit_memdev; 1009 int type = spa ? nfit_spa_type(spa) : 0; 1010 1011 switch (type) { 1012 case NFIT_SPA_DCR: 1013 case NFIT_SPA_PM: 1014 break; 1015 default: 1016 if (spa) 1017 return 0; 1018 } 1019 1020 /* 1021 * This loop runs in two modes, when a dimm is mapped the loop 1022 * adds memdev associations to an existing dimm, or creates a 1023 * dimm. In the unmapped dimm case this loop sweeps for memdev 1024 * instances with an invalid / zero range_index and adds those 1025 * dimms without spa associations. 1026 */ 1027 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1028 struct nfit_flush *nfit_flush; 1029 struct nfit_dcr *nfit_dcr; 1030 u32 device_handle; 1031 u16 dcr; 1032 1033 if (spa && nfit_memdev->memdev->range_index != spa->range_index) 1034 continue; 1035 if (!spa && nfit_memdev->memdev->range_index) 1036 continue; 1037 found = NULL; 1038 dcr = nfit_memdev->memdev->region_index; 1039 device_handle = nfit_memdev->memdev->device_handle; 1040 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1041 if (__to_nfit_memdev(nfit_mem)->device_handle 1042 == device_handle) { 1043 found = nfit_mem; 1044 break; 1045 } 1046 1047 if (found) 1048 nfit_mem = found; 1049 else { 1050 nfit_mem = devm_kzalloc(acpi_desc->dev, 1051 sizeof(*nfit_mem), GFP_KERNEL); 1052 if (!nfit_mem) 1053 return -ENOMEM; 1054 INIT_LIST_HEAD(&nfit_mem->list); 1055 nfit_mem->acpi_desc = acpi_desc; 1056 list_add(&nfit_mem->list, &acpi_desc->dimms); 1057 } 1058 1059 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1060 if (nfit_dcr->dcr->region_index != dcr) 1061 continue; 1062 /* 1063 * Record the control region for the dimm. For 1064 * the ACPI 6.1 case, where there are separate 1065 * control regions for the pmem vs blk 1066 * interfaces, be sure to record the extended 1067 * blk details. 1068 */ 1069 if (!nfit_mem->dcr) 1070 nfit_mem->dcr = nfit_dcr->dcr; 1071 else if (nfit_mem->dcr->windows == 0 1072 && nfit_dcr->dcr->windows) 1073 nfit_mem->dcr = nfit_dcr->dcr; 1074 break; 1075 } 1076 1077 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 1078 struct acpi_nfit_flush_address *flush; 1079 u16 i; 1080 1081 if (nfit_flush->flush->device_handle != device_handle) 1082 continue; 1083 nfit_mem->nfit_flush = nfit_flush; 1084 flush = nfit_flush->flush; 1085 nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev, 1086 flush->hint_count, 1087 sizeof(struct resource), 1088 GFP_KERNEL); 1089 if (!nfit_mem->flush_wpq) 1090 return -ENOMEM; 1091 for (i = 0; i < flush->hint_count; i++) { 1092 struct resource *res = &nfit_mem->flush_wpq[i]; 1093 1094 res->start = flush->hint_address[i]; 1095 res->end = res->start + 8 - 1; 1096 } 1097 break; 1098 } 1099 1100 if (dcr && !nfit_mem->dcr) { 1101 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 1102 spa->range_index, dcr); 1103 return -ENODEV; 1104 } 1105 1106 if (type == NFIT_SPA_DCR) { 1107 struct nfit_idt *nfit_idt; 1108 u16 idt_idx; 1109 1110 /* multiple dimms may share a SPA when interleaved */ 1111 nfit_mem->spa_dcr = spa; 1112 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1113 idt_idx = nfit_memdev->memdev->interleave_index; 1114 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 1115 if (nfit_idt->idt->interleave_index != idt_idx) 1116 continue; 1117 nfit_mem->idt_dcr = nfit_idt->idt; 1118 break; 1119 } 1120 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); 1121 } else if (type == NFIT_SPA_PM) { 1122 /* 1123 * A single dimm may belong to multiple SPA-PM 1124 * ranges, record at least one in addition to 1125 * any SPA-DCR range. 1126 */ 1127 nfit_mem->memdev_pmem = nfit_memdev->memdev; 1128 } else 1129 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1130 } 1131 1132 return 0; 1133 } 1134 1135 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 1136 { 1137 struct nfit_mem *a = container_of(_a, typeof(*a), list); 1138 struct nfit_mem *b = container_of(_b, typeof(*b), list); 1139 u32 handleA, handleB; 1140 1141 handleA = __to_nfit_memdev(a)->device_handle; 1142 handleB = __to_nfit_memdev(b)->device_handle; 1143 if (handleA < handleB) 1144 return -1; 1145 else if (handleA > handleB) 1146 return 1; 1147 return 0; 1148 } 1149 1150 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 1151 { 1152 struct nfit_spa *nfit_spa; 1153 int rc; 1154 1155 1156 /* 1157 * For each SPA-DCR or SPA-PMEM address range find its 1158 * corresponding MEMDEV(s). From each MEMDEV find the 1159 * corresponding DCR. Then, if we're operating on a SPA-DCR, 1160 * try to find a SPA-BDW and a corresponding BDW that references 1161 * the DCR. Throw it all into an nfit_mem object. Note, that 1162 * BDWs are optional. 1163 */ 1164 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1165 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); 1166 if (rc) 1167 return rc; 1168 } 1169 1170 /* 1171 * If a DIMM has failed to be mapped into SPA there will be no 1172 * SPA entries above. Find and register all the unmapped DIMMs 1173 * for reporting and recovery purposes. 1174 */ 1175 rc = __nfit_mem_init(acpi_desc, NULL); 1176 if (rc) 1177 return rc; 1178 1179 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 1180 1181 return 0; 1182 } 1183 1184 static ssize_t bus_dsm_mask_show(struct device *dev, 1185 struct device_attribute *attr, char *buf) 1186 { 1187 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1188 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1189 1190 return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); 1191 } 1192 static struct device_attribute dev_attr_bus_dsm_mask = 1193 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); 1194 1195 static ssize_t revision_show(struct device *dev, 1196 struct device_attribute *attr, char *buf) 1197 { 1198 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1199 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1200 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1201 1202 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 1203 } 1204 static DEVICE_ATTR_RO(revision); 1205 1206 static ssize_t hw_error_scrub_show(struct device *dev, 1207 struct device_attribute *attr, char *buf) 1208 { 1209 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1210 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1211 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1212 1213 return sprintf(buf, "%d\n", acpi_desc->scrub_mode); 1214 } 1215 1216 /* 1217 * The 'hw_error_scrub' attribute can have the following values written to it: 1218 * '0': Switch to the default mode where an exception will only insert 1219 * the address of the memory error into the poison and badblocks lists. 1220 * '1': Enable a full scrub to happen if an exception for a memory error is 1221 * received. 1222 */ 1223 static ssize_t hw_error_scrub_store(struct device *dev, 1224 struct device_attribute *attr, const char *buf, size_t size) 1225 { 1226 struct nvdimm_bus_descriptor *nd_desc; 1227 ssize_t rc; 1228 long val; 1229 1230 rc = kstrtol(buf, 0, &val); 1231 if (rc) 1232 return rc; 1233 1234 device_lock(dev); 1235 nd_desc = dev_get_drvdata(dev); 1236 if (nd_desc) { 1237 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1238 1239 switch (val) { 1240 case HW_ERROR_SCRUB_ON: 1241 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; 1242 break; 1243 case HW_ERROR_SCRUB_OFF: 1244 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; 1245 break; 1246 default: 1247 rc = -EINVAL; 1248 break; 1249 } 1250 } 1251 device_unlock(dev); 1252 if (rc) 1253 return rc; 1254 return size; 1255 } 1256 static DEVICE_ATTR_RW(hw_error_scrub); 1257 1258 /* 1259 * This shows the number of full Address Range Scrubs that have been 1260 * completed since driver load time. Userspace can wait on this using 1261 * select/poll etc. A '+' at the end indicates an ARS is in progress 1262 */ 1263 static ssize_t scrub_show(struct device *dev, 1264 struct device_attribute *attr, char *buf) 1265 { 1266 struct nvdimm_bus_descriptor *nd_desc; 1267 ssize_t rc = -ENXIO; 1268 1269 device_lock(dev); 1270 nd_desc = dev_get_drvdata(dev); 1271 if (nd_desc) { 1272 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1273 1274 mutex_lock(&acpi_desc->init_mutex); 1275 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1276 work_busy(&acpi_desc->dwork.work) 1277 && !acpi_desc->cancel ? "+\n" : "\n"); 1278 mutex_unlock(&acpi_desc->init_mutex); 1279 } 1280 device_unlock(dev); 1281 return rc; 1282 } 1283 1284 static ssize_t scrub_store(struct device *dev, 1285 struct device_attribute *attr, const char *buf, size_t size) 1286 { 1287 struct nvdimm_bus_descriptor *nd_desc; 1288 ssize_t rc; 1289 long val; 1290 1291 rc = kstrtol(buf, 0, &val); 1292 if (rc) 1293 return rc; 1294 if (val != 1) 1295 return -EINVAL; 1296 1297 device_lock(dev); 1298 nd_desc = dev_get_drvdata(dev); 1299 if (nd_desc) { 1300 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1301 1302 rc = acpi_nfit_ars_rescan(acpi_desc, 0); 1303 } 1304 device_unlock(dev); 1305 if (rc) 1306 return rc; 1307 return size; 1308 } 1309 static DEVICE_ATTR_RW(scrub); 1310 1311 static bool ars_supported(struct nvdimm_bus *nvdimm_bus) 1312 { 1313 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1314 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START 1315 | 1 << ND_CMD_ARS_STATUS; 1316 1317 return (nd_desc->cmd_mask & mask) == mask; 1318 } 1319 1320 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) 1321 { 1322 struct device *dev = container_of(kobj, struct device, kobj); 1323 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1324 1325 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) 1326 return 0; 1327 return a->mode; 1328 } 1329 1330 static struct attribute *acpi_nfit_attributes[] = { 1331 &dev_attr_revision.attr, 1332 &dev_attr_scrub.attr, 1333 &dev_attr_hw_error_scrub.attr, 1334 &dev_attr_bus_dsm_mask.attr, 1335 NULL, 1336 }; 1337 1338 static const struct attribute_group acpi_nfit_attribute_group = { 1339 .name = "nfit", 1340 .attrs = acpi_nfit_attributes, 1341 .is_visible = nfit_visible, 1342 }; 1343 1344 static const struct attribute_group *acpi_nfit_attribute_groups[] = { 1345 &nvdimm_bus_attribute_group, 1346 &acpi_nfit_attribute_group, 1347 NULL, 1348 }; 1349 1350 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 1351 { 1352 struct nvdimm *nvdimm = to_nvdimm(dev); 1353 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1354 1355 return __to_nfit_memdev(nfit_mem); 1356 } 1357 1358 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 1359 { 1360 struct nvdimm *nvdimm = to_nvdimm(dev); 1361 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1362 1363 return nfit_mem->dcr; 1364 } 1365 1366 static ssize_t handle_show(struct device *dev, 1367 struct device_attribute *attr, char *buf) 1368 { 1369 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1370 1371 return sprintf(buf, "%#x\n", memdev->device_handle); 1372 } 1373 static DEVICE_ATTR_RO(handle); 1374 1375 static ssize_t phys_id_show(struct device *dev, 1376 struct device_attribute *attr, char *buf) 1377 { 1378 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1379 1380 return sprintf(buf, "%#x\n", memdev->physical_id); 1381 } 1382 static DEVICE_ATTR_RO(phys_id); 1383 1384 static ssize_t vendor_show(struct device *dev, 1385 struct device_attribute *attr, char *buf) 1386 { 1387 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1388 1389 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1390 } 1391 static DEVICE_ATTR_RO(vendor); 1392 1393 static ssize_t rev_id_show(struct device *dev, 1394 struct device_attribute *attr, char *buf) 1395 { 1396 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1397 1398 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1399 } 1400 static DEVICE_ATTR_RO(rev_id); 1401 1402 static ssize_t device_show(struct device *dev, 1403 struct device_attribute *attr, char *buf) 1404 { 1405 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1406 1407 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1408 } 1409 static DEVICE_ATTR_RO(device); 1410 1411 static ssize_t subsystem_vendor_show(struct device *dev, 1412 struct device_attribute *attr, char *buf) 1413 { 1414 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1415 1416 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1417 } 1418 static DEVICE_ATTR_RO(subsystem_vendor); 1419 1420 static ssize_t subsystem_rev_id_show(struct device *dev, 1421 struct device_attribute *attr, char *buf) 1422 { 1423 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1424 1425 return sprintf(buf, "0x%04x\n", 1426 be16_to_cpu(dcr->subsystem_revision_id)); 1427 } 1428 static DEVICE_ATTR_RO(subsystem_rev_id); 1429 1430 static ssize_t subsystem_device_show(struct device *dev, 1431 struct device_attribute *attr, char *buf) 1432 { 1433 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1434 1435 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1436 } 1437 static DEVICE_ATTR_RO(subsystem_device); 1438 1439 static int num_nvdimm_formats(struct nvdimm *nvdimm) 1440 { 1441 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1442 int formats = 0; 1443 1444 if (nfit_mem->memdev_pmem) 1445 formats++; 1446 if (nfit_mem->memdev_bdw) 1447 formats++; 1448 return formats; 1449 } 1450 1451 static ssize_t format_show(struct device *dev, 1452 struct device_attribute *attr, char *buf) 1453 { 1454 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1455 1456 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1457 } 1458 static DEVICE_ATTR_RO(format); 1459 1460 static ssize_t format1_show(struct device *dev, 1461 struct device_attribute *attr, char *buf) 1462 { 1463 u32 handle; 1464 ssize_t rc = -ENXIO; 1465 struct nfit_mem *nfit_mem; 1466 struct nfit_memdev *nfit_memdev; 1467 struct acpi_nfit_desc *acpi_desc; 1468 struct nvdimm *nvdimm = to_nvdimm(dev); 1469 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1470 1471 nfit_mem = nvdimm_provider_data(nvdimm); 1472 acpi_desc = nfit_mem->acpi_desc; 1473 handle = to_nfit_memdev(dev)->device_handle; 1474 1475 /* assumes DIMMs have at most 2 published interface codes */ 1476 mutex_lock(&acpi_desc->init_mutex); 1477 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1478 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1479 struct nfit_dcr *nfit_dcr; 1480 1481 if (memdev->device_handle != handle) 1482 continue; 1483 1484 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1485 if (nfit_dcr->dcr->region_index != memdev->region_index) 1486 continue; 1487 if (nfit_dcr->dcr->code == dcr->code) 1488 continue; 1489 rc = sprintf(buf, "0x%04x\n", 1490 le16_to_cpu(nfit_dcr->dcr->code)); 1491 break; 1492 } 1493 if (rc != ENXIO) 1494 break; 1495 } 1496 mutex_unlock(&acpi_desc->init_mutex); 1497 return rc; 1498 } 1499 static DEVICE_ATTR_RO(format1); 1500 1501 static ssize_t formats_show(struct device *dev, 1502 struct device_attribute *attr, char *buf) 1503 { 1504 struct nvdimm *nvdimm = to_nvdimm(dev); 1505 1506 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1507 } 1508 static DEVICE_ATTR_RO(formats); 1509 1510 static ssize_t serial_show(struct device *dev, 1511 struct device_attribute *attr, char *buf) 1512 { 1513 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1514 1515 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1516 } 1517 static DEVICE_ATTR_RO(serial); 1518 1519 static ssize_t family_show(struct device *dev, 1520 struct device_attribute *attr, char *buf) 1521 { 1522 struct nvdimm *nvdimm = to_nvdimm(dev); 1523 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1524 1525 if (nfit_mem->family < 0) 1526 return -ENXIO; 1527 return sprintf(buf, "%d\n", nfit_mem->family); 1528 } 1529 static DEVICE_ATTR_RO(family); 1530 1531 static ssize_t dsm_mask_show(struct device *dev, 1532 struct device_attribute *attr, char *buf) 1533 { 1534 struct nvdimm *nvdimm = to_nvdimm(dev); 1535 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1536 1537 if (nfit_mem->family < 0) 1538 return -ENXIO; 1539 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); 1540 } 1541 static DEVICE_ATTR_RO(dsm_mask); 1542 1543 static ssize_t flags_show(struct device *dev, 1544 struct device_attribute *attr, char *buf) 1545 { 1546 u16 flags = to_nfit_memdev(dev)->flags; 1547 1548 return sprintf(buf, "%s%s%s%s%s%s%s\n", 1549 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 1550 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 1551 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 1552 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 1553 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", 1554 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", 1555 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); 1556 } 1557 static DEVICE_ATTR_RO(flags); 1558 1559 static ssize_t id_show(struct device *dev, 1560 struct device_attribute *attr, char *buf) 1561 { 1562 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1563 1564 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) 1565 return sprintf(buf, "%04x-%02x-%04x-%08x\n", 1566 be16_to_cpu(dcr->vendor_id), 1567 dcr->manufacturing_location, 1568 be16_to_cpu(dcr->manufacturing_date), 1569 be32_to_cpu(dcr->serial_number)); 1570 else 1571 return sprintf(buf, "%04x-%08x\n", 1572 be16_to_cpu(dcr->vendor_id), 1573 be32_to_cpu(dcr->serial_number)); 1574 } 1575 static DEVICE_ATTR_RO(id); 1576 1577 static struct attribute *acpi_nfit_dimm_attributes[] = { 1578 &dev_attr_handle.attr, 1579 &dev_attr_phys_id.attr, 1580 &dev_attr_vendor.attr, 1581 &dev_attr_device.attr, 1582 &dev_attr_rev_id.attr, 1583 &dev_attr_subsystem_vendor.attr, 1584 &dev_attr_subsystem_device.attr, 1585 &dev_attr_subsystem_rev_id.attr, 1586 &dev_attr_format.attr, 1587 &dev_attr_formats.attr, 1588 &dev_attr_format1.attr, 1589 &dev_attr_serial.attr, 1590 &dev_attr_flags.attr, 1591 &dev_attr_id.attr, 1592 &dev_attr_family.attr, 1593 &dev_attr_dsm_mask.attr, 1594 NULL, 1595 }; 1596 1597 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 1598 struct attribute *a, int n) 1599 { 1600 struct device *dev = container_of(kobj, struct device, kobj); 1601 struct nvdimm *nvdimm = to_nvdimm(dev); 1602 1603 if (!to_nfit_dcr(dev)) { 1604 /* Without a dcr only the memdev attributes can be surfaced */ 1605 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr 1606 || a == &dev_attr_flags.attr 1607 || a == &dev_attr_family.attr 1608 || a == &dev_attr_dsm_mask.attr) 1609 return a->mode; 1610 return 0; 1611 } 1612 1613 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) 1614 return 0; 1615 return a->mode; 1616 } 1617 1618 static const struct attribute_group acpi_nfit_dimm_attribute_group = { 1619 .name = "nfit", 1620 .attrs = acpi_nfit_dimm_attributes, 1621 .is_visible = acpi_nfit_dimm_attr_visible, 1622 }; 1623 1624 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 1625 &nvdimm_attribute_group, 1626 &nd_device_attribute_group, 1627 &acpi_nfit_dimm_attribute_group, 1628 NULL, 1629 }; 1630 1631 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 1632 u32 device_handle) 1633 { 1634 struct nfit_mem *nfit_mem; 1635 1636 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1637 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 1638 return nfit_mem->nvdimm; 1639 1640 return NULL; 1641 } 1642 1643 void __acpi_nvdimm_notify(struct device *dev, u32 event) 1644 { 1645 struct nfit_mem *nfit_mem; 1646 struct acpi_nfit_desc *acpi_desc; 1647 1648 dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev), 1649 event); 1650 1651 if (event != NFIT_NOTIFY_DIMM_HEALTH) { 1652 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), 1653 event); 1654 return; 1655 } 1656 1657 acpi_desc = dev_get_drvdata(dev->parent); 1658 if (!acpi_desc) 1659 return; 1660 1661 /* 1662 * If we successfully retrieved acpi_desc, then we know nfit_mem data 1663 * is still valid. 1664 */ 1665 nfit_mem = dev_get_drvdata(dev); 1666 if (nfit_mem && nfit_mem->flags_attr) 1667 sysfs_notify_dirent(nfit_mem->flags_attr); 1668 } 1669 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); 1670 1671 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) 1672 { 1673 struct acpi_device *adev = data; 1674 struct device *dev = &adev->dev; 1675 1676 device_lock(dev->parent); 1677 __acpi_nvdimm_notify(dev, event); 1678 device_unlock(dev->parent); 1679 } 1680 1681 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) 1682 { 1683 acpi_handle handle; 1684 acpi_status status; 1685 1686 status = acpi_get_handle(adev->handle, method, &handle); 1687 1688 if (ACPI_SUCCESS(status)) 1689 return true; 1690 return false; 1691 } 1692 1693 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1694 struct nfit_mem *nfit_mem, u32 device_handle) 1695 { 1696 struct acpi_device *adev, *adev_dimm; 1697 struct device *dev = acpi_desc->dev; 1698 unsigned long dsm_mask; 1699 const guid_t *guid; 1700 int i; 1701 int family = -1; 1702 1703 /* nfit test assumes 1:1 relationship between commands and dsms */ 1704 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; 1705 nfit_mem->family = NVDIMM_FAMILY_INTEL; 1706 adev = to_acpi_dev(acpi_desc); 1707 if (!adev) 1708 return 0; 1709 1710 adev_dimm = acpi_find_child_device(adev, device_handle, false); 1711 nfit_mem->adev = adev_dimm; 1712 if (!adev_dimm) { 1713 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 1714 device_handle); 1715 return force_enable_dimms ? 0 : -ENODEV; 1716 } 1717 1718 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, 1719 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { 1720 dev_err(dev, "%s: notification registration failed\n", 1721 dev_name(&adev_dimm->dev)); 1722 return -ENXIO; 1723 } 1724 /* 1725 * Record nfit_mem for the notification path to track back to 1726 * the nfit sysfs attributes for this dimm device object. 1727 */ 1728 dev_set_drvdata(&adev_dimm->dev, nfit_mem); 1729 1730 /* 1731 * Until standardization materializes we need to consider 4 1732 * different command sets. Note, that checking for function0 (bit0) 1733 * tells us if any commands are reachable through this GUID. 1734 */ 1735 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) 1736 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1737 if (family < 0 || i == default_dsm_family) 1738 family = i; 1739 1740 /* limit the supported commands to those that are publicly documented */ 1741 nfit_mem->family = family; 1742 if (override_dsm_mask && !disable_vendor_specific) 1743 dsm_mask = override_dsm_mask; 1744 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1745 dsm_mask = NVDIMM_INTEL_CMDMASK; 1746 if (disable_vendor_specific) 1747 dsm_mask &= ~(1 << ND_CMD_VENDOR); 1748 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { 1749 dsm_mask = 0x1c3c76; 1750 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { 1751 dsm_mask = 0x1fe; 1752 if (disable_vendor_specific) 1753 dsm_mask &= ~(1 << 8); 1754 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { 1755 dsm_mask = 0xffffffff; 1756 } else { 1757 dev_dbg(dev, "unknown dimm command family\n"); 1758 nfit_mem->family = -1; 1759 /* DSMs are optional, continue loading the driver... */ 1760 return 0; 1761 } 1762 1763 guid = to_nfit_uuid(nfit_mem->family); 1764 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1765 if (acpi_check_dsm(adev_dimm->handle, guid, 1766 nfit_dsm_revid(nfit_mem->family, i), 1767 1ULL << i)) 1768 set_bit(i, &nfit_mem->dsm_mask); 1769 1770 if (acpi_nvdimm_has_method(adev_dimm, "_LSI") 1771 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { 1772 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1773 nfit_mem->has_lsr = true; 1774 } 1775 1776 if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { 1777 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); 1778 nfit_mem->has_lsw = true; 1779 } 1780 1781 return 0; 1782 } 1783 1784 static void shutdown_dimm_notify(void *data) 1785 { 1786 struct acpi_nfit_desc *acpi_desc = data; 1787 struct nfit_mem *nfit_mem; 1788 1789 mutex_lock(&acpi_desc->init_mutex); 1790 /* 1791 * Clear out the nfit_mem->flags_attr and shut down dimm event 1792 * notifications. 1793 */ 1794 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1795 struct acpi_device *adev_dimm = nfit_mem->adev; 1796 1797 if (nfit_mem->flags_attr) { 1798 sysfs_put(nfit_mem->flags_attr); 1799 nfit_mem->flags_attr = NULL; 1800 } 1801 if (adev_dimm) { 1802 acpi_remove_notify_handler(adev_dimm->handle, 1803 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); 1804 dev_set_drvdata(&adev_dimm->dev, NULL); 1805 } 1806 } 1807 mutex_unlock(&acpi_desc->init_mutex); 1808 } 1809 1810 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 1811 { 1812 struct nfit_mem *nfit_mem; 1813 int dimm_count = 0, rc; 1814 struct nvdimm *nvdimm; 1815 1816 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1817 struct acpi_nfit_flush_address *flush; 1818 unsigned long flags = 0, cmd_mask; 1819 struct nfit_memdev *nfit_memdev; 1820 u32 device_handle; 1821 u16 mem_flags; 1822 1823 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 1824 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 1825 if (nvdimm) { 1826 dimm_count++; 1827 continue; 1828 } 1829 1830 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 1831 set_bit(NDD_ALIASING, &flags); 1832 1833 /* collate flags across all memdevs for this dimm */ 1834 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1835 struct acpi_nfit_memory_map *dimm_memdev; 1836 1837 dimm_memdev = __to_nfit_memdev(nfit_mem); 1838 if (dimm_memdev->device_handle 1839 != nfit_memdev->memdev->device_handle) 1840 continue; 1841 dimm_memdev->flags |= nfit_memdev->memdev->flags; 1842 } 1843 1844 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 1845 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 1846 set_bit(NDD_UNARMED, &flags); 1847 1848 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 1849 if (rc) 1850 continue; 1851 1852 /* 1853 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL 1854 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the 1855 * userspace interface. 1856 */ 1857 cmd_mask = 1UL << ND_CMD_CALL; 1858 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1859 /* 1860 * These commands have a 1:1 correspondence 1861 * between DSM payload and libnvdimm ioctl 1862 * payload format. 1863 */ 1864 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; 1865 } 1866 1867 if (nfit_mem->has_lsr) { 1868 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 1869 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); 1870 } 1871 if (nfit_mem->has_lsw) 1872 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); 1873 1874 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush 1875 : NULL; 1876 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 1877 acpi_nfit_dimm_attribute_groups, 1878 flags, cmd_mask, flush ? flush->hint_count : 0, 1879 nfit_mem->flush_wpq); 1880 if (!nvdimm) 1881 return -ENOMEM; 1882 1883 nfit_mem->nvdimm = nvdimm; 1884 dimm_count++; 1885 1886 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 1887 continue; 1888 1889 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", 1890 nvdimm_name(nvdimm), 1891 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 1892 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 1893 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 1894 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", 1895 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); 1896 1897 } 1898 1899 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 1900 if (rc) 1901 return rc; 1902 1903 /* 1904 * Now that dimms are successfully registered, and async registration 1905 * is flushed, attempt to enable event notification. 1906 */ 1907 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1908 struct kernfs_node *nfit_kernfs; 1909 1910 nvdimm = nfit_mem->nvdimm; 1911 if (!nvdimm) 1912 continue; 1913 1914 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 1915 if (nfit_kernfs) 1916 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 1917 "flags"); 1918 sysfs_put(nfit_kernfs); 1919 if (!nfit_mem->flags_attr) 1920 dev_warn(acpi_desc->dev, "%s: notifications disabled\n", 1921 nvdimm_name(nvdimm)); 1922 } 1923 1924 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, 1925 acpi_desc); 1926 } 1927 1928 /* 1929 * These constants are private because there are no kernel consumers of 1930 * these commands. 1931 */ 1932 enum nfit_aux_cmds { 1933 NFIT_CMD_TRANSLATE_SPA = 5, 1934 NFIT_CMD_ARS_INJECT_SET = 7, 1935 NFIT_CMD_ARS_INJECT_CLEAR = 8, 1936 NFIT_CMD_ARS_INJECT_GET = 9, 1937 }; 1938 1939 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 1940 { 1941 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1942 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); 1943 struct acpi_device *adev; 1944 unsigned long dsm_mask; 1945 int i; 1946 1947 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; 1948 nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; 1949 adev = to_acpi_dev(acpi_desc); 1950 if (!adev) 1951 return; 1952 1953 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) 1954 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 1955 set_bit(i, &nd_desc->cmd_mask); 1956 set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); 1957 1958 dsm_mask = 1959 (1 << ND_CMD_ARS_CAP) | 1960 (1 << ND_CMD_ARS_START) | 1961 (1 << ND_CMD_ARS_STATUS) | 1962 (1 << ND_CMD_CLEAR_ERROR) | 1963 (1 << NFIT_CMD_TRANSLATE_SPA) | 1964 (1 << NFIT_CMD_ARS_INJECT_SET) | 1965 (1 << NFIT_CMD_ARS_INJECT_CLEAR) | 1966 (1 << NFIT_CMD_ARS_INJECT_GET); 1967 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1968 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 1969 set_bit(i, &nd_desc->bus_dsm_mask); 1970 } 1971 1972 static ssize_t range_index_show(struct device *dev, 1973 struct device_attribute *attr, char *buf) 1974 { 1975 struct nd_region *nd_region = to_nd_region(dev); 1976 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 1977 1978 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 1979 } 1980 static DEVICE_ATTR_RO(range_index); 1981 1982 static struct attribute *acpi_nfit_region_attributes[] = { 1983 &dev_attr_range_index.attr, 1984 NULL, 1985 }; 1986 1987 static const struct attribute_group acpi_nfit_region_attribute_group = { 1988 .name = "nfit", 1989 .attrs = acpi_nfit_region_attributes, 1990 }; 1991 1992 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 1993 &nd_region_attribute_group, 1994 &nd_mapping_attribute_group, 1995 &nd_device_attribute_group, 1996 &nd_numa_attribute_group, 1997 &acpi_nfit_region_attribute_group, 1998 NULL, 1999 }; 2000 2001 /* enough info to uniquely specify an interleave set */ 2002 struct nfit_set_info { 2003 struct nfit_set_info_map { 2004 u64 region_offset; 2005 u32 serial_number; 2006 u32 pad; 2007 } mapping[0]; 2008 }; 2009 2010 struct nfit_set_info2 { 2011 struct nfit_set_info_map2 { 2012 u64 region_offset; 2013 u32 serial_number; 2014 u16 vendor_id; 2015 u16 manufacturing_date; 2016 u8 manufacturing_location; 2017 u8 reserved[31]; 2018 } mapping[0]; 2019 }; 2020 2021 static size_t sizeof_nfit_set_info(int num_mappings) 2022 { 2023 return sizeof(struct nfit_set_info) 2024 + num_mappings * sizeof(struct nfit_set_info_map); 2025 } 2026 2027 static size_t sizeof_nfit_set_info2(int num_mappings) 2028 { 2029 return sizeof(struct nfit_set_info2) 2030 + num_mappings * sizeof(struct nfit_set_info_map2); 2031 } 2032 2033 static int cmp_map_compat(const void *m0, const void *m1) 2034 { 2035 const struct nfit_set_info_map *map0 = m0; 2036 const struct nfit_set_info_map *map1 = m1; 2037 2038 return memcmp(&map0->region_offset, &map1->region_offset, 2039 sizeof(u64)); 2040 } 2041 2042 static int cmp_map(const void *m0, const void *m1) 2043 { 2044 const struct nfit_set_info_map *map0 = m0; 2045 const struct nfit_set_info_map *map1 = m1; 2046 2047 if (map0->region_offset < map1->region_offset) 2048 return -1; 2049 else if (map0->region_offset > map1->region_offset) 2050 return 1; 2051 return 0; 2052 } 2053 2054 static int cmp_map2(const void *m0, const void *m1) 2055 { 2056 const struct nfit_set_info_map2 *map0 = m0; 2057 const struct nfit_set_info_map2 *map1 = m1; 2058 2059 if (map0->region_offset < map1->region_offset) 2060 return -1; 2061 else if (map0->region_offset > map1->region_offset) 2062 return 1; 2063 return 0; 2064 } 2065 2066 /* Retrieve the nth entry referencing this spa */ 2067 static struct acpi_nfit_memory_map *memdev_from_spa( 2068 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 2069 { 2070 struct nfit_memdev *nfit_memdev; 2071 2072 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 2073 if (nfit_memdev->memdev->range_index == range_index) 2074 if (n-- == 0) 2075 return nfit_memdev->memdev; 2076 return NULL; 2077 } 2078 2079 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 2080 struct nd_region_desc *ndr_desc, 2081 struct acpi_nfit_system_address *spa) 2082 { 2083 struct device *dev = acpi_desc->dev; 2084 struct nd_interleave_set *nd_set; 2085 u16 nr = ndr_desc->num_mappings; 2086 struct nfit_set_info2 *info2; 2087 struct nfit_set_info *info; 2088 int i; 2089 2090 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2091 if (!nd_set) 2092 return -ENOMEM; 2093 ndr_desc->nd_set = nd_set; 2094 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); 2095 2096 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 2097 if (!info) 2098 return -ENOMEM; 2099 2100 info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL); 2101 if (!info2) 2102 return -ENOMEM; 2103 2104 for (i = 0; i < nr; i++) { 2105 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 2106 struct nfit_set_info_map *map = &info->mapping[i]; 2107 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2108 struct nvdimm *nvdimm = mapping->nvdimm; 2109 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2110 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 2111 spa->range_index, i); 2112 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2113 2114 if (!memdev || !nfit_mem->dcr) { 2115 dev_err(dev, "%s: failed to find DCR\n", __func__); 2116 return -ENODEV; 2117 } 2118 2119 map->region_offset = memdev->region_offset; 2120 map->serial_number = dcr->serial_number; 2121 2122 map2->region_offset = memdev->region_offset; 2123 map2->serial_number = dcr->serial_number; 2124 map2->vendor_id = dcr->vendor_id; 2125 map2->manufacturing_date = dcr->manufacturing_date; 2126 map2->manufacturing_location = dcr->manufacturing_location; 2127 } 2128 2129 /* v1.1 namespaces */ 2130 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2131 cmp_map, NULL); 2132 nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2133 2134 /* v1.2 namespaces */ 2135 sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2), 2136 cmp_map2, NULL); 2137 nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0); 2138 2139 /* support v1.1 namespaces created with the wrong sort order */ 2140 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2141 cmp_map_compat, NULL); 2142 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2143 2144 /* record the result of the sort for the mapping position */ 2145 for (i = 0; i < nr; i++) { 2146 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2147 int j; 2148 2149 for (j = 0; j < nr; j++) { 2150 struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; 2151 struct nvdimm *nvdimm = mapping->nvdimm; 2152 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2153 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2154 2155 if (map2->serial_number == dcr->serial_number && 2156 map2->vendor_id == dcr->vendor_id && 2157 map2->manufacturing_date == dcr->manufacturing_date && 2158 map2->manufacturing_location 2159 == dcr->manufacturing_location) { 2160 mapping->position = i; 2161 break; 2162 } 2163 } 2164 } 2165 2166 ndr_desc->nd_set = nd_set; 2167 devm_kfree(dev, info); 2168 devm_kfree(dev, info2); 2169 2170 return 0; 2171 } 2172 2173 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 2174 { 2175 struct acpi_nfit_interleave *idt = mmio->idt; 2176 u32 sub_line_offset, line_index, line_offset; 2177 u64 line_no, table_skip_count, table_offset; 2178 2179 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 2180 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 2181 line_offset = idt->line_offset[line_index] 2182 * mmio->line_size; 2183 table_offset = table_skip_count * mmio->table_size; 2184 2185 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 2186 } 2187 2188 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 2189 { 2190 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2191 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 2192 const u32 STATUS_MASK = 0x80000037; 2193 2194 if (mmio->num_lines) 2195 offset = to_interleave_offset(offset, mmio); 2196 2197 return readl(mmio->addr.base + offset) & STATUS_MASK; 2198 } 2199 2200 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 2201 resource_size_t dpa, unsigned int len, unsigned int write) 2202 { 2203 u64 cmd, offset; 2204 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2205 2206 enum { 2207 BCW_OFFSET_MASK = (1ULL << 48)-1, 2208 BCW_LEN_SHIFT = 48, 2209 BCW_LEN_MASK = (1ULL << 8) - 1, 2210 BCW_CMD_SHIFT = 56, 2211 }; 2212 2213 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 2214 len = len >> L1_CACHE_SHIFT; 2215 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 2216 cmd |= ((u64) write) << BCW_CMD_SHIFT; 2217 2218 offset = nfit_blk->cmd_offset + mmio->size * bw; 2219 if (mmio->num_lines) 2220 offset = to_interleave_offset(offset, mmio); 2221 2222 writeq(cmd, mmio->addr.base + offset); 2223 nvdimm_flush(nfit_blk->nd_region); 2224 2225 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) 2226 readq(mmio->addr.base + offset); 2227 } 2228 2229 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 2230 resource_size_t dpa, void *iobuf, size_t len, int rw, 2231 unsigned int lane) 2232 { 2233 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2234 unsigned int copied = 0; 2235 u64 base_offset; 2236 int rc; 2237 2238 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 2239 + lane * mmio->size; 2240 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 2241 while (len) { 2242 unsigned int c; 2243 u64 offset; 2244 2245 if (mmio->num_lines) { 2246 u32 line_offset; 2247 2248 offset = to_interleave_offset(base_offset + copied, 2249 mmio); 2250 div_u64_rem(offset, mmio->line_size, &line_offset); 2251 c = min_t(size_t, len, mmio->line_size - line_offset); 2252 } else { 2253 offset = base_offset + nfit_blk->bdw_offset; 2254 c = len; 2255 } 2256 2257 if (rw) 2258 memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); 2259 else { 2260 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) 2261 arch_invalidate_pmem((void __force *) 2262 mmio->addr.aperture + offset, c); 2263 2264 memcpy(iobuf + copied, mmio->addr.aperture + offset, c); 2265 } 2266 2267 copied += c; 2268 len -= c; 2269 } 2270 2271 if (rw) 2272 nvdimm_flush(nfit_blk->nd_region); 2273 2274 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 2275 return rc; 2276 } 2277 2278 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 2279 resource_size_t dpa, void *iobuf, u64 len, int rw) 2280 { 2281 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 2282 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2283 struct nd_region *nd_region = nfit_blk->nd_region; 2284 unsigned int lane, copied = 0; 2285 int rc = 0; 2286 2287 lane = nd_region_acquire_lane(nd_region); 2288 while (len) { 2289 u64 c = min(len, mmio->size); 2290 2291 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 2292 iobuf + copied, c, rw, lane); 2293 if (rc) 2294 break; 2295 2296 copied += c; 2297 len -= c; 2298 } 2299 nd_region_release_lane(nd_region, lane); 2300 2301 return rc; 2302 } 2303 2304 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 2305 struct acpi_nfit_interleave *idt, u16 interleave_ways) 2306 { 2307 if (idt) { 2308 mmio->num_lines = idt->line_count; 2309 mmio->line_size = idt->line_size; 2310 if (interleave_ways == 0) 2311 return -ENXIO; 2312 mmio->table_size = mmio->num_lines * interleave_ways 2313 * mmio->line_size; 2314 } 2315 2316 return 0; 2317 } 2318 2319 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 2320 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 2321 { 2322 struct nd_cmd_dimm_flags flags; 2323 int rc; 2324 2325 memset(&flags, 0, sizeof(flags)); 2326 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 2327 sizeof(flags), NULL); 2328 2329 if (rc >= 0 && flags.status == 0) 2330 nfit_blk->dimm_flags = flags.flags; 2331 else if (rc == -ENOTTY) { 2332 /* fall back to a conservative default */ 2333 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; 2334 rc = 0; 2335 } else 2336 rc = -ENXIO; 2337 2338 return rc; 2339 } 2340 2341 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 2342 struct device *dev) 2343 { 2344 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 2345 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 2346 struct nfit_blk_mmio *mmio; 2347 struct nfit_blk *nfit_blk; 2348 struct nfit_mem *nfit_mem; 2349 struct nvdimm *nvdimm; 2350 int rc; 2351 2352 nvdimm = nd_blk_region_to_dimm(ndbr); 2353 nfit_mem = nvdimm_provider_data(nvdimm); 2354 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 2355 dev_dbg(dev, "missing%s%s%s\n", 2356 nfit_mem ? "" : " nfit_mem", 2357 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 2358 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 2359 return -ENXIO; 2360 } 2361 2362 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 2363 if (!nfit_blk) 2364 return -ENOMEM; 2365 nd_blk_region_set_provider_data(ndbr, nfit_blk); 2366 nfit_blk->nd_region = to_nd_region(dev); 2367 2368 /* map block aperture memory */ 2369 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 2370 mmio = &nfit_blk->mmio[BDW]; 2371 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, 2372 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr)); 2373 if (!mmio->addr.base) { 2374 dev_dbg(dev, "%s failed to map bdw\n", 2375 nvdimm_name(nvdimm)); 2376 return -ENOMEM; 2377 } 2378 mmio->size = nfit_mem->bdw->size; 2379 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 2380 mmio->idt = nfit_mem->idt_bdw; 2381 mmio->spa = nfit_mem->spa_bdw; 2382 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 2383 nfit_mem->memdev_bdw->interleave_ways); 2384 if (rc) { 2385 dev_dbg(dev, "%s failed to init bdw interleave\n", 2386 nvdimm_name(nvdimm)); 2387 return rc; 2388 } 2389 2390 /* map block control memory */ 2391 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 2392 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 2393 mmio = &nfit_blk->mmio[DCR]; 2394 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, 2395 nfit_mem->spa_dcr->length); 2396 if (!mmio->addr.base) { 2397 dev_dbg(dev, "%s failed to map dcr\n", 2398 nvdimm_name(nvdimm)); 2399 return -ENOMEM; 2400 } 2401 mmio->size = nfit_mem->dcr->window_size; 2402 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 2403 mmio->idt = nfit_mem->idt_dcr; 2404 mmio->spa = nfit_mem->spa_dcr; 2405 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 2406 nfit_mem->memdev_dcr->interleave_ways); 2407 if (rc) { 2408 dev_dbg(dev, "%s failed to init dcr interleave\n", 2409 nvdimm_name(nvdimm)); 2410 return rc; 2411 } 2412 2413 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 2414 if (rc < 0) { 2415 dev_dbg(dev, "%s failed get DIMM flags\n", 2416 nvdimm_name(nvdimm)); 2417 return rc; 2418 } 2419 2420 if (nvdimm_has_flush(nfit_blk->nd_region) < 0) 2421 dev_warn(dev, "unable to guarantee persistence of writes\n"); 2422 2423 if (mmio->line_size == 0) 2424 return 0; 2425 2426 if ((u32) nfit_blk->cmd_offset % mmio->line_size 2427 + 8 > mmio->line_size) { 2428 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 2429 return -ENXIO; 2430 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 2431 + 8 > mmio->line_size) { 2432 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 2433 return -ENXIO; 2434 } 2435 2436 return 0; 2437 } 2438 2439 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 2440 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 2441 { 2442 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2443 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2444 int cmd_rc, rc; 2445 2446 cmd->address = spa->address; 2447 cmd->length = spa->length; 2448 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 2449 sizeof(*cmd), &cmd_rc); 2450 if (rc < 0) 2451 return rc; 2452 return cmd_rc; 2453 } 2454 2455 static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) 2456 { 2457 int rc; 2458 int cmd_rc; 2459 struct nd_cmd_ars_start ars_start; 2460 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2461 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2462 2463 memset(&ars_start, 0, sizeof(ars_start)); 2464 ars_start.address = spa->address; 2465 ars_start.length = spa->length; 2466 if (test_bit(ARS_SHORT, &nfit_spa->ars_state)) 2467 ars_start.flags = ND_ARS_RETURN_PREV_DATA; 2468 if (nfit_spa_type(spa) == NFIT_SPA_PM) 2469 ars_start.type = ND_ARS_PERSISTENT; 2470 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) 2471 ars_start.type = ND_ARS_VOLATILE; 2472 else 2473 return -ENOTTY; 2474 2475 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2476 sizeof(ars_start), &cmd_rc); 2477 2478 if (rc < 0) 2479 return rc; 2480 return cmd_rc; 2481 } 2482 2483 static int ars_continue(struct acpi_nfit_desc *acpi_desc) 2484 { 2485 int rc, cmd_rc; 2486 struct nd_cmd_ars_start ars_start; 2487 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2488 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2489 2490 memset(&ars_start, 0, sizeof(ars_start)); 2491 ars_start.address = ars_status->restart_address; 2492 ars_start.length = ars_status->restart_length; 2493 ars_start.type = ars_status->type; 2494 ars_start.flags = acpi_desc->ars_start_flags; 2495 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2496 sizeof(ars_start), &cmd_rc); 2497 if (rc < 0) 2498 return rc; 2499 return cmd_rc; 2500 } 2501 2502 static int ars_get_status(struct acpi_nfit_desc *acpi_desc) 2503 { 2504 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2505 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2506 int rc, cmd_rc; 2507 2508 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, 2509 acpi_desc->max_ars, &cmd_rc); 2510 if (rc < 0) 2511 return rc; 2512 return cmd_rc; 2513 } 2514 2515 static void ars_complete(struct acpi_nfit_desc *acpi_desc, 2516 struct nfit_spa *nfit_spa) 2517 { 2518 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2519 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2520 struct nd_region *nd_region = nfit_spa->nd_region; 2521 struct device *dev; 2522 2523 if ((ars_status->address >= spa->address && ars_status->address 2524 < spa->address + spa->length) 2525 || (ars_status->address < spa->address)) { 2526 /* 2527 * Assume that if a scrub starts at an offset from the 2528 * start of nfit_spa that we are in the continuation 2529 * case. 2530 * 2531 * Otherwise, if the scrub covers the spa range, mark 2532 * any pending request complete. 2533 */ 2534 if (ars_status->address + ars_status->length 2535 >= spa->address + spa->length) 2536 /* complete */; 2537 else 2538 return; 2539 } else 2540 return; 2541 2542 if (test_bit(ARS_DONE, &nfit_spa->ars_state)) 2543 return; 2544 2545 if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state)) 2546 return; 2547 2548 if (nd_region) { 2549 dev = nd_region_dev(nd_region); 2550 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON); 2551 } else 2552 dev = acpi_desc->dev; 2553 2554 dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index, 2555 test_bit(ARS_SHORT, &nfit_spa->ars_state) 2556 ? "short" : "long"); 2557 clear_bit(ARS_SHORT, &nfit_spa->ars_state); 2558 set_bit(ARS_DONE, &nfit_spa->ars_state); 2559 } 2560 2561 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) 2562 { 2563 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; 2564 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2565 int rc; 2566 u32 i; 2567 2568 /* 2569 * First record starts at 44 byte offset from the start of the 2570 * payload. 2571 */ 2572 if (ars_status->out_length < 44) 2573 return 0; 2574 for (i = 0; i < ars_status->num_records; i++) { 2575 /* only process full records */ 2576 if (ars_status->out_length 2577 < 44 + sizeof(struct nd_ars_record) * (i + 1)) 2578 break; 2579 rc = nvdimm_bus_add_badrange(nvdimm_bus, 2580 ars_status->records[i].err_address, 2581 ars_status->records[i].length); 2582 if (rc) 2583 return rc; 2584 } 2585 if (i < ars_status->num_records) 2586 dev_warn(acpi_desc->dev, "detected truncated ars results\n"); 2587 2588 return 0; 2589 } 2590 2591 static void acpi_nfit_remove_resource(void *data) 2592 { 2593 struct resource *res = data; 2594 2595 remove_resource(res); 2596 } 2597 2598 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, 2599 struct nd_region_desc *ndr_desc) 2600 { 2601 struct resource *res, *nd_res = ndr_desc->res; 2602 int is_pmem, ret; 2603 2604 /* No operation if the region is already registered as PMEM */ 2605 is_pmem = region_intersects(nd_res->start, resource_size(nd_res), 2606 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); 2607 if (is_pmem == REGION_INTERSECTS) 2608 return 0; 2609 2610 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); 2611 if (!res) 2612 return -ENOMEM; 2613 2614 res->name = "Persistent Memory"; 2615 res->start = nd_res->start; 2616 res->end = nd_res->end; 2617 res->flags = IORESOURCE_MEM; 2618 res->desc = IORES_DESC_PERSISTENT_MEMORY; 2619 2620 ret = insert_resource(&iomem_resource, res); 2621 if (ret) 2622 return ret; 2623 2624 ret = devm_add_action_or_reset(acpi_desc->dev, 2625 acpi_nfit_remove_resource, 2626 res); 2627 if (ret) 2628 return ret; 2629 2630 return 0; 2631 } 2632 2633 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 2634 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, 2635 struct acpi_nfit_memory_map *memdev, 2636 struct nfit_spa *nfit_spa) 2637 { 2638 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 2639 memdev->device_handle); 2640 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2641 struct nd_blk_region_desc *ndbr_desc; 2642 struct nfit_mem *nfit_mem; 2643 int rc; 2644 2645 if (!nvdimm) { 2646 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 2647 spa->range_index, memdev->device_handle); 2648 return -ENODEV; 2649 } 2650 2651 mapping->nvdimm = nvdimm; 2652 switch (nfit_spa_type(spa)) { 2653 case NFIT_SPA_PM: 2654 case NFIT_SPA_VOLATILE: 2655 mapping->start = memdev->address; 2656 mapping->size = memdev->region_size; 2657 break; 2658 case NFIT_SPA_DCR: 2659 nfit_mem = nvdimm_provider_data(nvdimm); 2660 if (!nfit_mem || !nfit_mem->bdw) { 2661 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 2662 spa->range_index, nvdimm_name(nvdimm)); 2663 break; 2664 } 2665 2666 mapping->size = nfit_mem->bdw->capacity; 2667 mapping->start = nfit_mem->bdw->start_address; 2668 ndr_desc->num_lanes = nfit_mem->bdw->windows; 2669 ndr_desc->mapping = mapping; 2670 ndr_desc->num_mappings = 1; 2671 ndbr_desc = to_blk_region_desc(ndr_desc); 2672 ndbr_desc->enable = acpi_nfit_blk_region_enable; 2673 ndbr_desc->do_io = acpi_desc->blk_do_io; 2674 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2675 if (rc) 2676 return rc; 2677 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 2678 ndr_desc); 2679 if (!nfit_spa->nd_region) 2680 return -ENOMEM; 2681 break; 2682 } 2683 2684 return 0; 2685 } 2686 2687 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) 2688 { 2689 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2690 nfit_spa_type(spa) == NFIT_SPA_VCD || 2691 nfit_spa_type(spa) == NFIT_SPA_PDISK || 2692 nfit_spa_type(spa) == NFIT_SPA_PCD); 2693 } 2694 2695 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa) 2696 { 2697 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2698 nfit_spa_type(spa) == NFIT_SPA_VCD || 2699 nfit_spa_type(spa) == NFIT_SPA_VOLATILE); 2700 } 2701 2702 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 2703 struct nfit_spa *nfit_spa) 2704 { 2705 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; 2706 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2707 struct nd_blk_region_desc ndbr_desc; 2708 struct nd_region_desc *ndr_desc; 2709 struct nfit_memdev *nfit_memdev; 2710 struct nvdimm_bus *nvdimm_bus; 2711 struct resource res; 2712 int count = 0, rc; 2713 2714 if (nfit_spa->nd_region) 2715 return 0; 2716 2717 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { 2718 dev_dbg(acpi_desc->dev, "detected invalid spa index\n"); 2719 return 0; 2720 } 2721 2722 memset(&res, 0, sizeof(res)); 2723 memset(&mappings, 0, sizeof(mappings)); 2724 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 2725 res.start = spa->address; 2726 res.end = res.start + spa->length - 1; 2727 ndr_desc = &ndbr_desc.ndr_desc; 2728 ndr_desc->res = &res; 2729 ndr_desc->provider_data = nfit_spa; 2730 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 2731 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 2732 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 2733 spa->proximity_domain); 2734 else 2735 ndr_desc->numa_node = NUMA_NO_NODE; 2736 2737 /* 2738 * Persistence domain bits are hierarchical, if 2739 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then 2740 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. 2741 */ 2742 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) 2743 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); 2744 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) 2745 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); 2746 2747 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2748 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 2749 struct nd_mapping_desc *mapping; 2750 2751 if (memdev->range_index != spa->range_index) 2752 continue; 2753 if (count >= ND_MAX_MAPPINGS) { 2754 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 2755 spa->range_index, ND_MAX_MAPPINGS); 2756 return -ENXIO; 2757 } 2758 mapping = &mappings[count++]; 2759 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, 2760 memdev, nfit_spa); 2761 if (rc) 2762 goto out; 2763 } 2764 2765 ndr_desc->mapping = mappings; 2766 ndr_desc->num_mappings = count; 2767 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2768 if (rc) 2769 goto out; 2770 2771 nvdimm_bus = acpi_desc->nvdimm_bus; 2772 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 2773 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); 2774 if (rc) { 2775 dev_warn(acpi_desc->dev, 2776 "failed to insert pmem resource to iomem: %d\n", 2777 rc); 2778 goto out; 2779 } 2780 2781 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2782 ndr_desc); 2783 if (!nfit_spa->nd_region) 2784 rc = -ENOMEM; 2785 } else if (nfit_spa_is_volatile(spa)) { 2786 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, 2787 ndr_desc); 2788 if (!nfit_spa->nd_region) 2789 rc = -ENOMEM; 2790 } else if (nfit_spa_is_virtual(spa)) { 2791 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2792 ndr_desc); 2793 if (!nfit_spa->nd_region) 2794 rc = -ENOMEM; 2795 } 2796 2797 out: 2798 if (rc) 2799 dev_err(acpi_desc->dev, "failed to register spa range %d\n", 2800 nfit_spa->spa->range_index); 2801 return rc; 2802 } 2803 2804 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc) 2805 { 2806 struct device *dev = acpi_desc->dev; 2807 struct nd_cmd_ars_status *ars_status; 2808 2809 if (acpi_desc->ars_status) { 2810 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2811 return 0; 2812 } 2813 2814 ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL); 2815 if (!ars_status) 2816 return -ENOMEM; 2817 acpi_desc->ars_status = ars_status; 2818 return 0; 2819 } 2820 2821 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) 2822 { 2823 int rc; 2824 2825 if (ars_status_alloc(acpi_desc)) 2826 return -ENOMEM; 2827 2828 rc = ars_get_status(acpi_desc); 2829 2830 if (rc < 0 && rc != -ENOSPC) 2831 return rc; 2832 2833 if (ars_status_process_records(acpi_desc)) 2834 return -ENOMEM; 2835 2836 return 0; 2837 } 2838 2839 static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa, 2840 int *query_rc) 2841 { 2842 int rc = *query_rc; 2843 2844 if (no_init_ars) 2845 return acpi_nfit_register_region(acpi_desc, nfit_spa); 2846 2847 set_bit(ARS_REQ, &nfit_spa->ars_state); 2848 set_bit(ARS_SHORT, &nfit_spa->ars_state); 2849 2850 switch (rc) { 2851 case 0: 2852 case -EAGAIN: 2853 rc = ars_start(acpi_desc, nfit_spa); 2854 if (rc == -EBUSY) { 2855 *query_rc = rc; 2856 break; 2857 } else if (rc == 0) { 2858 rc = acpi_nfit_query_poison(acpi_desc); 2859 } else { 2860 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2861 break; 2862 } 2863 if (rc == -EAGAIN) 2864 clear_bit(ARS_SHORT, &nfit_spa->ars_state); 2865 else if (rc == 0) 2866 ars_complete(acpi_desc, nfit_spa); 2867 break; 2868 case -EBUSY: 2869 case -ENOSPC: 2870 break; 2871 default: 2872 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2873 break; 2874 } 2875 2876 if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state)) 2877 set_bit(ARS_REQ, &nfit_spa->ars_state); 2878 2879 return acpi_nfit_register_region(acpi_desc, nfit_spa); 2880 } 2881 2882 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc) 2883 { 2884 struct nfit_spa *nfit_spa; 2885 2886 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2887 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 2888 continue; 2889 ars_complete(acpi_desc, nfit_spa); 2890 } 2891 } 2892 2893 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, 2894 int query_rc) 2895 { 2896 unsigned int tmo = acpi_desc->scrub_tmo; 2897 struct device *dev = acpi_desc->dev; 2898 struct nfit_spa *nfit_spa; 2899 2900 if (acpi_desc->cancel) 2901 return 0; 2902 2903 if (query_rc == -EBUSY) { 2904 dev_dbg(dev, "ARS: ARS busy\n"); 2905 return min(30U * 60U, tmo * 2); 2906 } 2907 if (query_rc == -ENOSPC) { 2908 dev_dbg(dev, "ARS: ARS continue\n"); 2909 ars_continue(acpi_desc); 2910 return 1; 2911 } 2912 if (query_rc && query_rc != -EAGAIN) { 2913 unsigned long long addr, end; 2914 2915 addr = acpi_desc->ars_status->address; 2916 end = addr + acpi_desc->ars_status->length; 2917 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end, 2918 query_rc); 2919 } 2920 2921 ars_complete_all(acpi_desc); 2922 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2923 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 2924 continue; 2925 if (test_bit(ARS_REQ, &nfit_spa->ars_state)) { 2926 int rc = ars_start(acpi_desc, nfit_spa); 2927 2928 clear_bit(ARS_DONE, &nfit_spa->ars_state); 2929 dev = nd_region_dev(nfit_spa->nd_region); 2930 dev_dbg(dev, "ARS: range %d ARS start (%d)\n", 2931 nfit_spa->spa->range_index, rc); 2932 if (rc == 0 || rc == -EBUSY) 2933 return 1; 2934 dev_err(dev, "ARS: range %d ARS failed (%d)\n", 2935 nfit_spa->spa->range_index, rc); 2936 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2937 } 2938 } 2939 return 0; 2940 } 2941 2942 static void acpi_nfit_scrub(struct work_struct *work) 2943 { 2944 struct acpi_nfit_desc *acpi_desc; 2945 unsigned int tmo; 2946 int query_rc; 2947 2948 acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); 2949 mutex_lock(&acpi_desc->init_mutex); 2950 query_rc = acpi_nfit_query_poison(acpi_desc); 2951 tmo = __acpi_nfit_scrub(acpi_desc, query_rc); 2952 if (tmo) { 2953 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); 2954 acpi_desc->scrub_tmo = tmo; 2955 } else { 2956 acpi_desc->scrub_count++; 2957 if (acpi_desc->scrub_count_state) 2958 sysfs_notify_dirent(acpi_desc->scrub_count_state); 2959 } 2960 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2961 mutex_unlock(&acpi_desc->init_mutex); 2962 } 2963 2964 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, 2965 struct nfit_spa *nfit_spa) 2966 { 2967 int type = nfit_spa_type(nfit_spa->spa); 2968 struct nd_cmd_ars_cap ars_cap; 2969 int rc; 2970 2971 memset(&ars_cap, 0, sizeof(ars_cap)); 2972 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); 2973 if (rc < 0) 2974 return; 2975 /* check that the supported scrub types match the spa type */ 2976 if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16) 2977 & ND_ARS_VOLATILE) == 0) 2978 return; 2979 if (type == NFIT_SPA_PM && ((ars_cap.status >> 16) 2980 & ND_ARS_PERSISTENT) == 0) 2981 return; 2982 2983 nfit_spa->max_ars = ars_cap.max_ars_out; 2984 nfit_spa->clear_err_unit = ars_cap.clear_err_unit; 2985 acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); 2986 clear_bit(ARS_FAILED, &nfit_spa->ars_state); 2987 set_bit(ARS_REQ, &nfit_spa->ars_state); 2988 } 2989 2990 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 2991 { 2992 struct nfit_spa *nfit_spa; 2993 int rc, query_rc; 2994 2995 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2996 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2997 switch (nfit_spa_type(nfit_spa->spa)) { 2998 case NFIT_SPA_VOLATILE: 2999 case NFIT_SPA_PM: 3000 acpi_nfit_init_ars(acpi_desc, nfit_spa); 3001 break; 3002 } 3003 } 3004 3005 /* 3006 * Reap any results that might be pending before starting new 3007 * short requests. 3008 */ 3009 query_rc = acpi_nfit_query_poison(acpi_desc); 3010 if (query_rc == 0) 3011 ars_complete_all(acpi_desc); 3012 3013 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 3014 switch (nfit_spa_type(nfit_spa->spa)) { 3015 case NFIT_SPA_VOLATILE: 3016 case NFIT_SPA_PM: 3017 /* register regions and kick off initial ARS run */ 3018 rc = ars_register(acpi_desc, nfit_spa, &query_rc); 3019 if (rc) 3020 return rc; 3021 break; 3022 case NFIT_SPA_BDW: 3023 /* nothing to register */ 3024 break; 3025 case NFIT_SPA_DCR: 3026 case NFIT_SPA_VDISK: 3027 case NFIT_SPA_VCD: 3028 case NFIT_SPA_PDISK: 3029 case NFIT_SPA_PCD: 3030 /* register known regions that don't support ARS */ 3031 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 3032 if (rc) 3033 return rc; 3034 break; 3035 default: 3036 /* don't register unknown regions */ 3037 break; 3038 } 3039 3040 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); 3041 return 0; 3042 } 3043 3044 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 3045 struct nfit_table_prev *prev) 3046 { 3047 struct device *dev = acpi_desc->dev; 3048 3049 if (!list_empty(&prev->spas) || 3050 !list_empty(&prev->memdevs) || 3051 !list_empty(&prev->dcrs) || 3052 !list_empty(&prev->bdws) || 3053 !list_empty(&prev->idts) || 3054 !list_empty(&prev->flushes)) { 3055 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 3056 return -ENXIO; 3057 } 3058 return 0; 3059 } 3060 3061 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) 3062 { 3063 struct device *dev = acpi_desc->dev; 3064 struct kernfs_node *nfit; 3065 struct device *bus_dev; 3066 3067 if (!ars_supported(acpi_desc->nvdimm_bus)) 3068 return 0; 3069 3070 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3071 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); 3072 if (!nfit) { 3073 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); 3074 return -ENODEV; 3075 } 3076 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); 3077 sysfs_put(nfit); 3078 if (!acpi_desc->scrub_count_state) { 3079 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); 3080 return -ENODEV; 3081 } 3082 3083 return 0; 3084 } 3085 3086 static void acpi_nfit_unregister(void *data) 3087 { 3088 struct acpi_nfit_desc *acpi_desc = data; 3089 3090 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 3091 } 3092 3093 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) 3094 { 3095 struct device *dev = acpi_desc->dev; 3096 struct nfit_table_prev prev; 3097 const void *end; 3098 int rc; 3099 3100 if (!acpi_desc->nvdimm_bus) { 3101 acpi_nfit_init_dsms(acpi_desc); 3102 3103 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, 3104 &acpi_desc->nd_desc); 3105 if (!acpi_desc->nvdimm_bus) 3106 return -ENOMEM; 3107 3108 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, 3109 acpi_desc); 3110 if (rc) 3111 return rc; 3112 3113 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); 3114 if (rc) 3115 return rc; 3116 3117 /* register this acpi_desc for mce notifications */ 3118 mutex_lock(&acpi_desc_lock); 3119 list_add_tail(&acpi_desc->list, &acpi_descs); 3120 mutex_unlock(&acpi_desc_lock); 3121 } 3122 3123 mutex_lock(&acpi_desc->init_mutex); 3124 3125 INIT_LIST_HEAD(&prev.spas); 3126 INIT_LIST_HEAD(&prev.memdevs); 3127 INIT_LIST_HEAD(&prev.dcrs); 3128 INIT_LIST_HEAD(&prev.bdws); 3129 INIT_LIST_HEAD(&prev.idts); 3130 INIT_LIST_HEAD(&prev.flushes); 3131 3132 list_cut_position(&prev.spas, &acpi_desc->spas, 3133 acpi_desc->spas.prev); 3134 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 3135 acpi_desc->memdevs.prev); 3136 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 3137 acpi_desc->dcrs.prev); 3138 list_cut_position(&prev.bdws, &acpi_desc->bdws, 3139 acpi_desc->bdws.prev); 3140 list_cut_position(&prev.idts, &acpi_desc->idts, 3141 acpi_desc->idts.prev); 3142 list_cut_position(&prev.flushes, &acpi_desc->flushes, 3143 acpi_desc->flushes.prev); 3144 3145 end = data + sz; 3146 while (!IS_ERR_OR_NULL(data)) 3147 data = add_table(acpi_desc, &prev, data, end); 3148 3149 if (IS_ERR(data)) { 3150 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data)); 3151 rc = PTR_ERR(data); 3152 goto out_unlock; 3153 } 3154 3155 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 3156 if (rc) 3157 goto out_unlock; 3158 3159 rc = nfit_mem_init(acpi_desc); 3160 if (rc) 3161 goto out_unlock; 3162 3163 rc = acpi_nfit_register_dimms(acpi_desc); 3164 if (rc) 3165 goto out_unlock; 3166 3167 rc = acpi_nfit_register_regions(acpi_desc); 3168 3169 out_unlock: 3170 mutex_unlock(&acpi_desc->init_mutex); 3171 return rc; 3172 } 3173 EXPORT_SYMBOL_GPL(acpi_nfit_init); 3174 3175 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3176 { 3177 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3178 struct device *dev = acpi_desc->dev; 3179 3180 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3181 device_lock(dev); 3182 device_unlock(dev); 3183 3184 /* Bounce the init_mutex to complete initial registration */ 3185 mutex_lock(&acpi_desc->init_mutex); 3186 mutex_unlock(&acpi_desc->init_mutex); 3187 3188 return 0; 3189 } 3190 3191 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3192 struct nvdimm *nvdimm, unsigned int cmd) 3193 { 3194 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3195 3196 if (nvdimm) 3197 return 0; 3198 if (cmd != ND_CMD_ARS_START) 3199 return 0; 3200 3201 /* 3202 * The kernel and userspace may race to initiate a scrub, but 3203 * the scrub thread is prepared to lose that initial race. It 3204 * just needs guarantees that any ars it initiates are not 3205 * interrupted by any intervening start reqeusts from userspace. 3206 */ 3207 if (work_busy(&acpi_desc->dwork.work)) 3208 return -EBUSY; 3209 3210 return 0; 3211 } 3212 3213 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) 3214 { 3215 struct device *dev = acpi_desc->dev; 3216 int scheduled = 0, busy = 0; 3217 struct nfit_spa *nfit_spa; 3218 3219 mutex_lock(&acpi_desc->init_mutex); 3220 if (acpi_desc->cancel) { 3221 mutex_unlock(&acpi_desc->init_mutex); 3222 return 0; 3223 } 3224 3225 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3226 int type = nfit_spa_type(nfit_spa->spa); 3227 3228 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE) 3229 continue; 3230 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3231 continue; 3232 3233 if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) 3234 busy++; 3235 else { 3236 if (test_bit(ARS_SHORT, &flags)) 3237 set_bit(ARS_SHORT, &nfit_spa->ars_state); 3238 scheduled++; 3239 } 3240 } 3241 if (scheduled) { 3242 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); 3243 dev_dbg(dev, "ars_scan triggered\n"); 3244 } 3245 mutex_unlock(&acpi_desc->init_mutex); 3246 3247 if (scheduled) 3248 return 0; 3249 if (busy) 3250 return -EBUSY; 3251 return -ENOTTY; 3252 } 3253 3254 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 3255 { 3256 struct nvdimm_bus_descriptor *nd_desc; 3257 3258 dev_set_drvdata(dev, acpi_desc); 3259 acpi_desc->dev = dev; 3260 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 3261 nd_desc = &acpi_desc->nd_desc; 3262 nd_desc->provider_name = "ACPI.NFIT"; 3263 nd_desc->module = THIS_MODULE; 3264 nd_desc->ndctl = acpi_nfit_ctl; 3265 nd_desc->flush_probe = acpi_nfit_flush_probe; 3266 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 3267 nd_desc->attr_groups = acpi_nfit_attribute_groups; 3268 3269 INIT_LIST_HEAD(&acpi_desc->spas); 3270 INIT_LIST_HEAD(&acpi_desc->dcrs); 3271 INIT_LIST_HEAD(&acpi_desc->bdws); 3272 INIT_LIST_HEAD(&acpi_desc->idts); 3273 INIT_LIST_HEAD(&acpi_desc->flushes); 3274 INIT_LIST_HEAD(&acpi_desc->memdevs); 3275 INIT_LIST_HEAD(&acpi_desc->dimms); 3276 INIT_LIST_HEAD(&acpi_desc->list); 3277 mutex_init(&acpi_desc->init_mutex); 3278 acpi_desc->scrub_tmo = 1; 3279 INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub); 3280 } 3281 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); 3282 3283 static void acpi_nfit_put_table(void *table) 3284 { 3285 acpi_put_table(table); 3286 } 3287 3288 void acpi_nfit_shutdown(void *data) 3289 { 3290 struct acpi_nfit_desc *acpi_desc = data; 3291 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3292 3293 /* 3294 * Destruct under acpi_desc_lock so that nfit_handle_mce does not 3295 * race teardown 3296 */ 3297 mutex_lock(&acpi_desc_lock); 3298 list_del(&acpi_desc->list); 3299 mutex_unlock(&acpi_desc_lock); 3300 3301 mutex_lock(&acpi_desc->init_mutex); 3302 acpi_desc->cancel = 1; 3303 cancel_delayed_work_sync(&acpi_desc->dwork); 3304 mutex_unlock(&acpi_desc->init_mutex); 3305 3306 /* 3307 * Bounce the nvdimm bus lock to make sure any in-flight 3308 * acpi_nfit_ars_rescan() submissions have had a chance to 3309 * either submit or see ->cancel set. 3310 */ 3311 device_lock(bus_dev); 3312 device_unlock(bus_dev); 3313 3314 flush_workqueue(nfit_wq); 3315 } 3316 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); 3317 3318 static int acpi_nfit_add(struct acpi_device *adev) 3319 { 3320 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3321 struct acpi_nfit_desc *acpi_desc; 3322 struct device *dev = &adev->dev; 3323 struct acpi_table_header *tbl; 3324 acpi_status status = AE_OK; 3325 acpi_size sz; 3326 int rc = 0; 3327 3328 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); 3329 if (ACPI_FAILURE(status)) { 3330 /* This is ok, we could have an nvdimm hotplugged later */ 3331 dev_dbg(dev, "failed to find NFIT at startup\n"); 3332 return 0; 3333 } 3334 3335 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); 3336 if (rc) 3337 return rc; 3338 sz = tbl->length; 3339 3340 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3341 if (!acpi_desc) 3342 return -ENOMEM; 3343 acpi_nfit_desc_init(acpi_desc, &adev->dev); 3344 3345 /* Save the acpi header for exporting the revision via sysfs */ 3346 acpi_desc->acpi_header = *tbl; 3347 3348 /* Evaluate _FIT and override with that if present */ 3349 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 3350 if (ACPI_SUCCESS(status) && buf.length > 0) { 3351 union acpi_object *obj = buf.pointer; 3352 3353 if (obj->type == ACPI_TYPE_BUFFER) 3354 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3355 obj->buffer.length); 3356 else 3357 dev_dbg(dev, "invalid type %d, ignoring _FIT\n", 3358 (int) obj->type); 3359 kfree(buf.pointer); 3360 } else 3361 /* skip over the lead-in header table */ 3362 rc = acpi_nfit_init(acpi_desc, (void *) tbl 3363 + sizeof(struct acpi_table_nfit), 3364 sz - sizeof(struct acpi_table_nfit)); 3365 3366 if (rc) 3367 return rc; 3368 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); 3369 } 3370 3371 static int acpi_nfit_remove(struct acpi_device *adev) 3372 { 3373 /* see acpi_nfit_unregister */ 3374 return 0; 3375 } 3376 3377 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) 3378 { 3379 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3380 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3381 union acpi_object *obj; 3382 acpi_status status; 3383 int ret; 3384 3385 if (!dev->driver) { 3386 /* dev->driver may be null if we're being removed */ 3387 dev_dbg(dev, "no driver found for dev\n"); 3388 return; 3389 } 3390 3391 if (!acpi_desc) { 3392 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3393 if (!acpi_desc) 3394 return; 3395 acpi_nfit_desc_init(acpi_desc, dev); 3396 } else { 3397 /* 3398 * Finish previous registration before considering new 3399 * regions. 3400 */ 3401 flush_workqueue(nfit_wq); 3402 } 3403 3404 /* Evaluate _FIT */ 3405 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); 3406 if (ACPI_FAILURE(status)) { 3407 dev_err(dev, "failed to evaluate _FIT\n"); 3408 return; 3409 } 3410 3411 obj = buf.pointer; 3412 if (obj->type == ACPI_TYPE_BUFFER) { 3413 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3414 obj->buffer.length); 3415 if (ret) 3416 dev_err(dev, "failed to merge updated NFIT\n"); 3417 } else 3418 dev_err(dev, "Invalid _FIT\n"); 3419 kfree(buf.pointer); 3420 } 3421 3422 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) 3423 { 3424 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3425 unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ? 3426 0 : 1 << ARS_SHORT; 3427 3428 acpi_nfit_ars_rescan(acpi_desc, flags); 3429 } 3430 3431 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) 3432 { 3433 dev_dbg(dev, "event: 0x%x\n", event); 3434 3435 switch (event) { 3436 case NFIT_NOTIFY_UPDATE: 3437 return acpi_nfit_update_notify(dev, handle); 3438 case NFIT_NOTIFY_UC_MEMORY_ERROR: 3439 return acpi_nfit_uc_error_notify(dev, handle); 3440 default: 3441 return; 3442 } 3443 } 3444 EXPORT_SYMBOL_GPL(__acpi_nfit_notify); 3445 3446 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 3447 { 3448 device_lock(&adev->dev); 3449 __acpi_nfit_notify(&adev->dev, adev->handle, event); 3450 device_unlock(&adev->dev); 3451 } 3452 3453 static const struct acpi_device_id acpi_nfit_ids[] = { 3454 { "ACPI0012", 0 }, 3455 { "", 0 }, 3456 }; 3457 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 3458 3459 static struct acpi_driver acpi_nfit_driver = { 3460 .name = KBUILD_MODNAME, 3461 .ids = acpi_nfit_ids, 3462 .ops = { 3463 .add = acpi_nfit_add, 3464 .remove = acpi_nfit_remove, 3465 .notify = acpi_nfit_notify, 3466 }, 3467 }; 3468 3469 static __init int nfit_init(void) 3470 { 3471 int ret; 3472 3473 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 3474 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 3475 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 3476 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 3477 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 3478 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 3479 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 3480 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); 3481 3482 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]); 3483 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]); 3484 guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]); 3485 guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]); 3486 guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]); 3487 guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]); 3488 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]); 3489 guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]); 3490 guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]); 3491 guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]); 3492 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 3493 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 3494 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); 3495 3496 nfit_wq = create_singlethread_workqueue("nfit"); 3497 if (!nfit_wq) 3498 return -ENOMEM; 3499 3500 nfit_mce_register(); 3501 ret = acpi_bus_register_driver(&acpi_nfit_driver); 3502 if (ret) { 3503 nfit_mce_unregister(); 3504 destroy_workqueue(nfit_wq); 3505 } 3506 3507 return ret; 3508 3509 } 3510 3511 static __exit void nfit_exit(void) 3512 { 3513 nfit_mce_unregister(); 3514 acpi_bus_unregister_driver(&acpi_nfit_driver); 3515 destroy_workqueue(nfit_wq); 3516 WARN_ON(!list_empty(&acpi_descs)); 3517 } 3518 3519 module_init(nfit_init); 3520 module_exit(nfit_exit); 3521 MODULE_LICENSE("GPL v2"); 3522 MODULE_AUTHOR("Intel Corporation"); 3523