1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/libnvdimm.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/ndctl.h> 18 #include <linux/sysfs.h> 19 #include <linux/delay.h> 20 #include <linux/list.h> 21 #include <linux/acpi.h> 22 #include <linux/sort.h> 23 #include <linux/io.h> 24 #include <linux/nd.h> 25 #include <asm/cacheflush.h> 26 #include <acpi/nfit.h> 27 #include "nfit.h" 28 29 /* 30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 31 * irrelevant. 32 */ 33 #include <linux/io-64-nonatomic-hi-lo.h> 34 35 static bool force_enable_dimms; 36 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 37 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 38 39 static bool disable_vendor_specific; 40 module_param(disable_vendor_specific, bool, S_IRUGO); 41 MODULE_PARM_DESC(disable_vendor_specific, 42 "Limit commands to the publicly specified set"); 43 44 static unsigned long override_dsm_mask; 45 module_param(override_dsm_mask, ulong, S_IRUGO); 46 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); 47 48 static int default_dsm_family = -1; 49 module_param(default_dsm_family, int, S_IRUGO); 50 MODULE_PARM_DESC(default_dsm_family, 51 "Try this DSM type first when identifying NVDIMM family"); 52 53 static bool no_init_ars; 54 module_param(no_init_ars, bool, 0644); 55 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); 56 57 LIST_HEAD(acpi_descs); 58 DEFINE_MUTEX(acpi_desc_lock); 59 60 static struct workqueue_struct *nfit_wq; 61 62 struct nfit_table_prev { 63 struct list_head spas; 64 struct list_head memdevs; 65 struct list_head dcrs; 66 struct list_head bdws; 67 struct list_head idts; 68 struct list_head flushes; 69 }; 70 71 static guid_t nfit_uuid[NFIT_UUID_MAX]; 72 73 const guid_t *to_nfit_uuid(enum nfit_uuids id) 74 { 75 return &nfit_uuid[id]; 76 } 77 EXPORT_SYMBOL(to_nfit_uuid); 78 79 static struct acpi_nfit_desc *to_acpi_nfit_desc( 80 struct nvdimm_bus_descriptor *nd_desc) 81 { 82 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 83 } 84 85 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 86 { 87 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 88 89 /* 90 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 91 * acpi_device. 92 */ 93 if (!nd_desc->provider_name 94 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 95 return NULL; 96 97 return to_acpi_device(acpi_desc->dev); 98 } 99 100 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) 101 { 102 struct nd_cmd_clear_error *clear_err; 103 struct nd_cmd_ars_status *ars_status; 104 u16 flags; 105 106 switch (cmd) { 107 case ND_CMD_ARS_CAP: 108 if ((status & 0xffff) == NFIT_ARS_CAP_NONE) 109 return -ENOTTY; 110 111 /* Command failed */ 112 if (status & 0xffff) 113 return -EIO; 114 115 /* No supported scan types for this range */ 116 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 117 if ((status >> 16 & flags) == 0) 118 return -ENOTTY; 119 return 0; 120 case ND_CMD_ARS_START: 121 /* ARS is in progress */ 122 if ((status & 0xffff) == NFIT_ARS_START_BUSY) 123 return -EBUSY; 124 125 /* Command failed */ 126 if (status & 0xffff) 127 return -EIO; 128 return 0; 129 case ND_CMD_ARS_STATUS: 130 ars_status = buf; 131 /* Command failed */ 132 if (status & 0xffff) 133 return -EIO; 134 /* Check extended status (Upper two bytes) */ 135 if (status == NFIT_ARS_STATUS_DONE) 136 return 0; 137 138 /* ARS is in progress */ 139 if (status == NFIT_ARS_STATUS_BUSY) 140 return -EBUSY; 141 142 /* No ARS performed for the current boot */ 143 if (status == NFIT_ARS_STATUS_NONE) 144 return -EAGAIN; 145 146 /* 147 * ARS interrupted, either we overflowed or some other 148 * agent wants the scan to stop. If we didn't overflow 149 * then just continue with the returned results. 150 */ 151 if (status == NFIT_ARS_STATUS_INTR) { 152 if (ars_status->out_length >= 40 && (ars_status->flags 153 & NFIT_ARS_F_OVERFLOW)) 154 return -ENOSPC; 155 return 0; 156 } 157 158 /* Unknown status */ 159 if (status >> 16) 160 return -EIO; 161 return 0; 162 case ND_CMD_CLEAR_ERROR: 163 clear_err = buf; 164 if (status & 0xffff) 165 return -EIO; 166 if (!clear_err->cleared) 167 return -EIO; 168 if (clear_err->length > clear_err->cleared) 169 return clear_err->cleared; 170 return 0; 171 default: 172 break; 173 } 174 175 /* all other non-zero status results in an error */ 176 if (status) 177 return -EIO; 178 return 0; 179 } 180 181 #define ACPI_LABELS_LOCKED 3 182 183 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 184 u32 status) 185 { 186 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 187 188 switch (cmd) { 189 case ND_CMD_GET_CONFIG_SIZE: 190 /* 191 * In the _LSI, _LSR, _LSW case the locked status is 192 * communicated via the read/write commands 193 */ 194 if (nfit_mem->has_lsr) 195 break; 196 197 if (status >> 16 & ND_CONFIG_LOCKED) 198 return -EACCES; 199 break; 200 case ND_CMD_GET_CONFIG_DATA: 201 if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED) 202 return -EACCES; 203 break; 204 case ND_CMD_SET_CONFIG_DATA: 205 if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED) 206 return -EACCES; 207 break; 208 default: 209 break; 210 } 211 212 /* all other non-zero status results in an error */ 213 if (status) 214 return -EIO; 215 return 0; 216 } 217 218 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 219 u32 status) 220 { 221 if (!nvdimm) 222 return xlat_bus_status(buf, cmd, status); 223 return xlat_nvdimm_status(nvdimm, buf, cmd, status); 224 } 225 226 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */ 227 static union acpi_object *pkg_to_buf(union acpi_object *pkg) 228 { 229 int i; 230 void *dst; 231 size_t size = 0; 232 union acpi_object *buf = NULL; 233 234 if (pkg->type != ACPI_TYPE_PACKAGE) { 235 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 236 pkg->type); 237 goto err; 238 } 239 240 for (i = 0; i < pkg->package.count; i++) { 241 union acpi_object *obj = &pkg->package.elements[i]; 242 243 if (obj->type == ACPI_TYPE_INTEGER) 244 size += 4; 245 else if (obj->type == ACPI_TYPE_BUFFER) 246 size += obj->buffer.length; 247 else { 248 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 249 obj->type); 250 goto err; 251 } 252 } 253 254 buf = ACPI_ALLOCATE(sizeof(*buf) + size); 255 if (!buf) 256 goto err; 257 258 dst = buf + 1; 259 buf->type = ACPI_TYPE_BUFFER; 260 buf->buffer.length = size; 261 buf->buffer.pointer = dst; 262 for (i = 0; i < pkg->package.count; i++) { 263 union acpi_object *obj = &pkg->package.elements[i]; 264 265 if (obj->type == ACPI_TYPE_INTEGER) { 266 memcpy(dst, &obj->integer.value, 4); 267 dst += 4; 268 } else if (obj->type == ACPI_TYPE_BUFFER) { 269 memcpy(dst, obj->buffer.pointer, obj->buffer.length); 270 dst += obj->buffer.length; 271 } 272 } 273 err: 274 ACPI_FREE(pkg); 275 return buf; 276 } 277 278 static union acpi_object *int_to_buf(union acpi_object *integer) 279 { 280 union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4); 281 void *dst = NULL; 282 283 if (!buf) 284 goto err; 285 286 if (integer->type != ACPI_TYPE_INTEGER) { 287 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 288 integer->type); 289 goto err; 290 } 291 292 dst = buf + 1; 293 buf->type = ACPI_TYPE_BUFFER; 294 buf->buffer.length = 4; 295 buf->buffer.pointer = dst; 296 memcpy(dst, &integer->integer.value, 4); 297 err: 298 ACPI_FREE(integer); 299 return buf; 300 } 301 302 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset, 303 u32 len, void *data) 304 { 305 acpi_status rc; 306 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 307 struct acpi_object_list input = { 308 .count = 3, 309 .pointer = (union acpi_object []) { 310 [0] = { 311 .integer.type = ACPI_TYPE_INTEGER, 312 .integer.value = offset, 313 }, 314 [1] = { 315 .integer.type = ACPI_TYPE_INTEGER, 316 .integer.value = len, 317 }, 318 [2] = { 319 .buffer.type = ACPI_TYPE_BUFFER, 320 .buffer.pointer = data, 321 .buffer.length = len, 322 }, 323 }, 324 }; 325 326 rc = acpi_evaluate_object(handle, "_LSW", &input, &buf); 327 if (ACPI_FAILURE(rc)) 328 return NULL; 329 return int_to_buf(buf.pointer); 330 } 331 332 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset, 333 u32 len) 334 { 335 acpi_status rc; 336 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 337 struct acpi_object_list input = { 338 .count = 2, 339 .pointer = (union acpi_object []) { 340 [0] = { 341 .integer.type = ACPI_TYPE_INTEGER, 342 .integer.value = offset, 343 }, 344 [1] = { 345 .integer.type = ACPI_TYPE_INTEGER, 346 .integer.value = len, 347 }, 348 }, 349 }; 350 351 rc = acpi_evaluate_object(handle, "_LSR", &input, &buf); 352 if (ACPI_FAILURE(rc)) 353 return NULL; 354 return pkg_to_buf(buf.pointer); 355 } 356 357 static union acpi_object *acpi_label_info(acpi_handle handle) 358 { 359 acpi_status rc; 360 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 361 362 rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf); 363 if (ACPI_FAILURE(rc)) 364 return NULL; 365 return pkg_to_buf(buf.pointer); 366 } 367 368 static u8 nfit_dsm_revid(unsigned family, unsigned func) 369 { 370 static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { 371 [NVDIMM_FAMILY_INTEL] = { 372 [NVDIMM_INTEL_GET_MODES] = 2, 373 [NVDIMM_INTEL_GET_FWINFO] = 2, 374 [NVDIMM_INTEL_START_FWUPDATE] = 2, 375 [NVDIMM_INTEL_SEND_FWUPDATE] = 2, 376 [NVDIMM_INTEL_FINISH_FWUPDATE] = 2, 377 [NVDIMM_INTEL_QUERY_FWUPDATE] = 2, 378 [NVDIMM_INTEL_SET_THRESHOLD] = 2, 379 [NVDIMM_INTEL_INJECT_ERROR] = 2, 380 }, 381 }; 382 u8 id; 383 384 if (family > NVDIMM_FAMILY_MAX) 385 return 0; 386 if (func > 31) 387 return 0; 388 id = revid_table[family][func]; 389 if (id == 0) 390 return 1; /* default */ 391 return id; 392 } 393 394 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 395 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 396 { 397 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 398 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 399 union acpi_object in_obj, in_buf, *out_obj; 400 const struct nd_cmd_desc *desc = NULL; 401 struct device *dev = acpi_desc->dev; 402 struct nd_cmd_pkg *call_pkg = NULL; 403 const char *cmd_name, *dimm_name; 404 unsigned long cmd_mask, dsm_mask; 405 u32 offset, fw_status = 0; 406 acpi_handle handle; 407 unsigned int func; 408 const guid_t *guid; 409 int rc, i; 410 411 func = cmd; 412 if (cmd == ND_CMD_CALL) { 413 call_pkg = buf; 414 func = call_pkg->nd_command; 415 416 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) 417 if (call_pkg->nd_reserved2[i]) 418 return -EINVAL; 419 } 420 421 if (nvdimm) { 422 struct acpi_device *adev = nfit_mem->adev; 423 424 if (!adev) 425 return -ENOTTY; 426 if (call_pkg && nfit_mem->family != call_pkg->nd_family) 427 return -ENOTTY; 428 429 dimm_name = nvdimm_name(nvdimm); 430 cmd_name = nvdimm_cmd_name(cmd); 431 cmd_mask = nvdimm_cmd_mask(nvdimm); 432 dsm_mask = nfit_mem->dsm_mask; 433 desc = nd_cmd_dimm_desc(cmd); 434 guid = to_nfit_uuid(nfit_mem->family); 435 handle = adev->handle; 436 } else { 437 struct acpi_device *adev = to_acpi_dev(acpi_desc); 438 439 cmd_name = nvdimm_bus_cmd_name(cmd); 440 cmd_mask = nd_desc->cmd_mask; 441 dsm_mask = cmd_mask; 442 if (cmd == ND_CMD_CALL) 443 dsm_mask = nd_desc->bus_dsm_mask; 444 desc = nd_cmd_bus_desc(cmd); 445 guid = to_nfit_uuid(NFIT_DEV_BUS); 446 handle = adev->handle; 447 dimm_name = "bus"; 448 } 449 450 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 451 return -ENOTTY; 452 453 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) 454 return -ENOTTY; 455 456 in_obj.type = ACPI_TYPE_PACKAGE; 457 in_obj.package.count = 1; 458 in_obj.package.elements = &in_buf; 459 in_buf.type = ACPI_TYPE_BUFFER; 460 in_buf.buffer.pointer = buf; 461 in_buf.buffer.length = 0; 462 463 /* libnvdimm has already validated the input envelope */ 464 for (i = 0; i < desc->in_num; i++) 465 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 466 i, buf); 467 468 if (call_pkg) { 469 /* skip over package wrapper */ 470 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; 471 in_buf.buffer.length = call_pkg->nd_size_in; 472 } 473 474 dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n", 475 dimm_name, cmd, func, in_buf.buffer.length); 476 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, 477 in_buf.buffer.pointer, 478 min_t(u32, 256, in_buf.buffer.length), true); 479 480 /* call the BIOS, prefer the named methods over _DSM if available */ 481 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr) 482 out_obj = acpi_label_info(handle); 483 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) { 484 struct nd_cmd_get_config_data_hdr *p = buf; 485 486 out_obj = acpi_label_read(handle, p->in_offset, p->in_length); 487 } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA 488 && nfit_mem->has_lsw) { 489 struct nd_cmd_set_config_hdr *p = buf; 490 491 out_obj = acpi_label_write(handle, p->in_offset, p->in_length, 492 p->in_buf); 493 } else { 494 u8 revid; 495 496 if (nvdimm) 497 revid = nfit_dsm_revid(nfit_mem->family, func); 498 else 499 revid = 1; 500 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 501 } 502 503 if (!out_obj) { 504 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name); 505 return -EINVAL; 506 } 507 508 if (call_pkg) { 509 call_pkg->nd_fw_size = out_obj->buffer.length; 510 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, 511 out_obj->buffer.pointer, 512 min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); 513 514 ACPI_FREE(out_obj); 515 /* 516 * Need to support FW function w/o known size in advance. 517 * Caller can determine required size based upon nd_fw_size. 518 * If we return an error (like elsewhere) then caller wouldn't 519 * be able to rely upon data returned to make calculation. 520 */ 521 return 0; 522 } 523 524 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 525 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", 526 dimm_name, cmd_name, out_obj->type); 527 rc = -EINVAL; 528 goto out; 529 } 530 531 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, 532 cmd_name, out_obj->buffer.length); 533 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, 534 out_obj->buffer.pointer, 535 min_t(u32, 128, out_obj->buffer.length), true); 536 537 for (i = 0, offset = 0; i < desc->out_num; i++) { 538 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 539 (u32 *) out_obj->buffer.pointer, 540 out_obj->buffer.length - offset); 541 542 if (offset + out_size > out_obj->buffer.length) { 543 dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n", 544 dimm_name, cmd_name, i); 545 break; 546 } 547 548 if (in_buf.buffer.length + offset + out_size > buf_len) { 549 dev_dbg(dev, "%s output overrun cmd: %s field: %d\n", 550 dimm_name, cmd_name, i); 551 rc = -ENXIO; 552 goto out; 553 } 554 memcpy(buf + in_buf.buffer.length + offset, 555 out_obj->buffer.pointer + offset, out_size); 556 offset += out_size; 557 } 558 559 /* 560 * Set fw_status for all the commands with a known format to be 561 * later interpreted by xlat_status(). 562 */ 563 if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP 564 && cmd <= ND_CMD_CLEAR_ERROR) 565 || (nvdimm && cmd >= ND_CMD_SMART 566 && cmd <= ND_CMD_VENDOR))) 567 fw_status = *(u32 *) out_obj->buffer.pointer; 568 569 if (offset + in_buf.buffer.length < buf_len) { 570 if (i >= 1) { 571 /* 572 * status valid, return the number of bytes left 573 * unfilled in the output buffer 574 */ 575 rc = buf_len - offset - in_buf.buffer.length; 576 if (cmd_rc) 577 *cmd_rc = xlat_status(nvdimm, buf, cmd, 578 fw_status); 579 } else { 580 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 581 __func__, dimm_name, cmd_name, buf_len, 582 offset); 583 rc = -ENXIO; 584 } 585 } else { 586 rc = 0; 587 if (cmd_rc) 588 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); 589 } 590 591 out: 592 ACPI_FREE(out_obj); 593 594 return rc; 595 } 596 EXPORT_SYMBOL_GPL(acpi_nfit_ctl); 597 598 static const char *spa_type_name(u16 type) 599 { 600 static const char *to_name[] = { 601 [NFIT_SPA_VOLATILE] = "volatile", 602 [NFIT_SPA_PM] = "pmem", 603 [NFIT_SPA_DCR] = "dimm-control-region", 604 [NFIT_SPA_BDW] = "block-data-window", 605 [NFIT_SPA_VDISK] = "volatile-disk", 606 [NFIT_SPA_VCD] = "volatile-cd", 607 [NFIT_SPA_PDISK] = "persistent-disk", 608 [NFIT_SPA_PCD] = "persistent-cd", 609 610 }; 611 612 if (type > NFIT_SPA_PCD) 613 return "unknown"; 614 615 return to_name[type]; 616 } 617 618 int nfit_spa_type(struct acpi_nfit_system_address *spa) 619 { 620 int i; 621 622 for (i = 0; i < NFIT_UUID_MAX; i++) 623 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid)) 624 return i; 625 return -1; 626 } 627 628 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 629 struct nfit_table_prev *prev, 630 struct acpi_nfit_system_address *spa) 631 { 632 struct device *dev = acpi_desc->dev; 633 struct nfit_spa *nfit_spa; 634 635 if (spa->header.length != sizeof(*spa)) 636 return false; 637 638 list_for_each_entry(nfit_spa, &prev->spas, list) { 639 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 640 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 641 return true; 642 } 643 } 644 645 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), 646 GFP_KERNEL); 647 if (!nfit_spa) 648 return false; 649 INIT_LIST_HEAD(&nfit_spa->list); 650 memcpy(nfit_spa->spa, spa, sizeof(*spa)); 651 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 652 dev_dbg(dev, "spa index: %d type: %s\n", 653 spa->range_index, 654 spa_type_name(nfit_spa_type(spa))); 655 return true; 656 } 657 658 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 659 struct nfit_table_prev *prev, 660 struct acpi_nfit_memory_map *memdev) 661 { 662 struct device *dev = acpi_desc->dev; 663 struct nfit_memdev *nfit_memdev; 664 665 if (memdev->header.length != sizeof(*memdev)) 666 return false; 667 668 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 669 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 670 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 671 return true; 672 } 673 674 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), 675 GFP_KERNEL); 676 if (!nfit_memdev) 677 return false; 678 INIT_LIST_HEAD(&nfit_memdev->list); 679 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); 680 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 681 dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n", 682 memdev->device_handle, memdev->range_index, 683 memdev->region_index, memdev->flags); 684 return true; 685 } 686 687 int nfit_get_smbios_id(u32 device_handle, u16 *flags) 688 { 689 struct acpi_nfit_memory_map *memdev; 690 struct acpi_nfit_desc *acpi_desc; 691 struct nfit_mem *nfit_mem; 692 693 mutex_lock(&acpi_desc_lock); 694 list_for_each_entry(acpi_desc, &acpi_descs, list) { 695 mutex_lock(&acpi_desc->init_mutex); 696 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 697 memdev = __to_nfit_memdev(nfit_mem); 698 if (memdev->device_handle == device_handle) { 699 mutex_unlock(&acpi_desc->init_mutex); 700 mutex_unlock(&acpi_desc_lock); 701 *flags = memdev->flags; 702 return memdev->physical_id; 703 } 704 } 705 mutex_unlock(&acpi_desc->init_mutex); 706 } 707 mutex_unlock(&acpi_desc_lock); 708 709 return -ENODEV; 710 } 711 EXPORT_SYMBOL_GPL(nfit_get_smbios_id); 712 713 /* 714 * An implementation may provide a truncated control region if no block windows 715 * are defined. 716 */ 717 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) 718 { 719 if (dcr->header.length < offsetof(struct acpi_nfit_control_region, 720 window_size)) 721 return 0; 722 if (dcr->windows) 723 return sizeof(*dcr); 724 return offsetof(struct acpi_nfit_control_region, window_size); 725 } 726 727 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 728 struct nfit_table_prev *prev, 729 struct acpi_nfit_control_region *dcr) 730 { 731 struct device *dev = acpi_desc->dev; 732 struct nfit_dcr *nfit_dcr; 733 734 if (!sizeof_dcr(dcr)) 735 return false; 736 737 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 738 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { 739 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 740 return true; 741 } 742 743 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), 744 GFP_KERNEL); 745 if (!nfit_dcr) 746 return false; 747 INIT_LIST_HEAD(&nfit_dcr->list); 748 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); 749 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 750 dev_dbg(dev, "dcr index: %d windows: %d\n", 751 dcr->region_index, dcr->windows); 752 return true; 753 } 754 755 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 756 struct nfit_table_prev *prev, 757 struct acpi_nfit_data_region *bdw) 758 { 759 struct device *dev = acpi_desc->dev; 760 struct nfit_bdw *nfit_bdw; 761 762 if (bdw->header.length != sizeof(*bdw)) 763 return false; 764 list_for_each_entry(nfit_bdw, &prev->bdws, list) 765 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 766 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 767 return true; 768 } 769 770 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), 771 GFP_KERNEL); 772 if (!nfit_bdw) 773 return false; 774 INIT_LIST_HEAD(&nfit_bdw->list); 775 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); 776 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 777 dev_dbg(dev, "bdw dcr: %d windows: %d\n", 778 bdw->region_index, bdw->windows); 779 return true; 780 } 781 782 static size_t sizeof_idt(struct acpi_nfit_interleave *idt) 783 { 784 if (idt->header.length < sizeof(*idt)) 785 return 0; 786 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); 787 } 788 789 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 790 struct nfit_table_prev *prev, 791 struct acpi_nfit_interleave *idt) 792 { 793 struct device *dev = acpi_desc->dev; 794 struct nfit_idt *nfit_idt; 795 796 if (!sizeof_idt(idt)) 797 return false; 798 799 list_for_each_entry(nfit_idt, &prev->idts, list) { 800 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) 801 continue; 802 803 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { 804 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 805 return true; 806 } 807 } 808 809 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), 810 GFP_KERNEL); 811 if (!nfit_idt) 812 return false; 813 INIT_LIST_HEAD(&nfit_idt->list); 814 memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); 815 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 816 dev_dbg(dev, "idt index: %d num_lines: %d\n", 817 idt->interleave_index, idt->line_count); 818 return true; 819 } 820 821 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) 822 { 823 if (flush->header.length < sizeof(*flush)) 824 return 0; 825 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); 826 } 827 828 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 829 struct nfit_table_prev *prev, 830 struct acpi_nfit_flush_address *flush) 831 { 832 struct device *dev = acpi_desc->dev; 833 struct nfit_flush *nfit_flush; 834 835 if (!sizeof_flush(flush)) 836 return false; 837 838 list_for_each_entry(nfit_flush, &prev->flushes, list) { 839 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) 840 continue; 841 842 if (memcmp(nfit_flush->flush, flush, 843 sizeof_flush(flush)) == 0) { 844 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 845 return true; 846 } 847 } 848 849 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) 850 + sizeof_flush(flush), GFP_KERNEL); 851 if (!nfit_flush) 852 return false; 853 INIT_LIST_HEAD(&nfit_flush->list); 854 memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); 855 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 856 dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n", 857 flush->device_handle, flush->hint_count); 858 return true; 859 } 860 861 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, 862 struct acpi_nfit_capabilities *pcap) 863 { 864 struct device *dev = acpi_desc->dev; 865 u32 mask; 866 867 mask = (1 << (pcap->highest_capability + 1)) - 1; 868 acpi_desc->platform_cap = pcap->capabilities & mask; 869 dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap); 870 return true; 871 } 872 873 static void *add_table(struct acpi_nfit_desc *acpi_desc, 874 struct nfit_table_prev *prev, void *table, const void *end) 875 { 876 struct device *dev = acpi_desc->dev; 877 struct acpi_nfit_header *hdr; 878 void *err = ERR_PTR(-ENOMEM); 879 880 if (table >= end) 881 return NULL; 882 883 hdr = table; 884 if (!hdr->length) { 885 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 886 hdr->type); 887 return NULL; 888 } 889 890 switch (hdr->type) { 891 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 892 if (!add_spa(acpi_desc, prev, table)) 893 return err; 894 break; 895 case ACPI_NFIT_TYPE_MEMORY_MAP: 896 if (!add_memdev(acpi_desc, prev, table)) 897 return err; 898 break; 899 case ACPI_NFIT_TYPE_CONTROL_REGION: 900 if (!add_dcr(acpi_desc, prev, table)) 901 return err; 902 break; 903 case ACPI_NFIT_TYPE_DATA_REGION: 904 if (!add_bdw(acpi_desc, prev, table)) 905 return err; 906 break; 907 case ACPI_NFIT_TYPE_INTERLEAVE: 908 if (!add_idt(acpi_desc, prev, table)) 909 return err; 910 break; 911 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 912 if (!add_flush(acpi_desc, prev, table)) 913 return err; 914 break; 915 case ACPI_NFIT_TYPE_SMBIOS: 916 dev_dbg(dev, "smbios\n"); 917 break; 918 case ACPI_NFIT_TYPE_CAPABILITIES: 919 if (!add_platform_cap(acpi_desc, table)) 920 return err; 921 break; 922 default: 923 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 924 break; 925 } 926 927 return table + hdr->length; 928 } 929 930 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 931 struct nfit_mem *nfit_mem) 932 { 933 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 934 u16 dcr = nfit_mem->dcr->region_index; 935 struct nfit_spa *nfit_spa; 936 937 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 938 u16 range_index = nfit_spa->spa->range_index; 939 int type = nfit_spa_type(nfit_spa->spa); 940 struct nfit_memdev *nfit_memdev; 941 942 if (type != NFIT_SPA_BDW) 943 continue; 944 945 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 946 if (nfit_memdev->memdev->range_index != range_index) 947 continue; 948 if (nfit_memdev->memdev->device_handle != device_handle) 949 continue; 950 if (nfit_memdev->memdev->region_index != dcr) 951 continue; 952 953 nfit_mem->spa_bdw = nfit_spa->spa; 954 return; 955 } 956 } 957 958 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 959 nfit_mem->spa_dcr->range_index); 960 nfit_mem->bdw = NULL; 961 } 962 963 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, 964 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 965 { 966 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 967 struct nfit_memdev *nfit_memdev; 968 struct nfit_bdw *nfit_bdw; 969 struct nfit_idt *nfit_idt; 970 u16 idt_idx, range_index; 971 972 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 973 if (nfit_bdw->bdw->region_index != dcr) 974 continue; 975 nfit_mem->bdw = nfit_bdw->bdw; 976 break; 977 } 978 979 if (!nfit_mem->bdw) 980 return; 981 982 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 983 984 if (!nfit_mem->spa_bdw) 985 return; 986 987 range_index = nfit_mem->spa_bdw->range_index; 988 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 989 if (nfit_memdev->memdev->range_index != range_index || 990 nfit_memdev->memdev->region_index != dcr) 991 continue; 992 nfit_mem->memdev_bdw = nfit_memdev->memdev; 993 idt_idx = nfit_memdev->memdev->interleave_index; 994 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 995 if (nfit_idt->idt->interleave_index != idt_idx) 996 continue; 997 nfit_mem->idt_bdw = nfit_idt->idt; 998 break; 999 } 1000 break; 1001 } 1002 } 1003 1004 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, 1005 struct acpi_nfit_system_address *spa) 1006 { 1007 struct nfit_mem *nfit_mem, *found; 1008 struct nfit_memdev *nfit_memdev; 1009 int type = spa ? nfit_spa_type(spa) : 0; 1010 1011 switch (type) { 1012 case NFIT_SPA_DCR: 1013 case NFIT_SPA_PM: 1014 break; 1015 default: 1016 if (spa) 1017 return 0; 1018 } 1019 1020 /* 1021 * This loop runs in two modes, when a dimm is mapped the loop 1022 * adds memdev associations to an existing dimm, or creates a 1023 * dimm. In the unmapped dimm case this loop sweeps for memdev 1024 * instances with an invalid / zero range_index and adds those 1025 * dimms without spa associations. 1026 */ 1027 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1028 struct nfit_flush *nfit_flush; 1029 struct nfit_dcr *nfit_dcr; 1030 u32 device_handle; 1031 u16 dcr; 1032 1033 if (spa && nfit_memdev->memdev->range_index != spa->range_index) 1034 continue; 1035 if (!spa && nfit_memdev->memdev->range_index) 1036 continue; 1037 found = NULL; 1038 dcr = nfit_memdev->memdev->region_index; 1039 device_handle = nfit_memdev->memdev->device_handle; 1040 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1041 if (__to_nfit_memdev(nfit_mem)->device_handle 1042 == device_handle) { 1043 found = nfit_mem; 1044 break; 1045 } 1046 1047 if (found) 1048 nfit_mem = found; 1049 else { 1050 nfit_mem = devm_kzalloc(acpi_desc->dev, 1051 sizeof(*nfit_mem), GFP_KERNEL); 1052 if (!nfit_mem) 1053 return -ENOMEM; 1054 INIT_LIST_HEAD(&nfit_mem->list); 1055 nfit_mem->acpi_desc = acpi_desc; 1056 list_add(&nfit_mem->list, &acpi_desc->dimms); 1057 } 1058 1059 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1060 if (nfit_dcr->dcr->region_index != dcr) 1061 continue; 1062 /* 1063 * Record the control region for the dimm. For 1064 * the ACPI 6.1 case, where there are separate 1065 * control regions for the pmem vs blk 1066 * interfaces, be sure to record the extended 1067 * blk details. 1068 */ 1069 if (!nfit_mem->dcr) 1070 nfit_mem->dcr = nfit_dcr->dcr; 1071 else if (nfit_mem->dcr->windows == 0 1072 && nfit_dcr->dcr->windows) 1073 nfit_mem->dcr = nfit_dcr->dcr; 1074 break; 1075 } 1076 1077 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 1078 struct acpi_nfit_flush_address *flush; 1079 u16 i; 1080 1081 if (nfit_flush->flush->device_handle != device_handle) 1082 continue; 1083 nfit_mem->nfit_flush = nfit_flush; 1084 flush = nfit_flush->flush; 1085 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, 1086 flush->hint_count 1087 * sizeof(struct resource), GFP_KERNEL); 1088 if (!nfit_mem->flush_wpq) 1089 return -ENOMEM; 1090 for (i = 0; i < flush->hint_count; i++) { 1091 struct resource *res = &nfit_mem->flush_wpq[i]; 1092 1093 res->start = flush->hint_address[i]; 1094 res->end = res->start + 8 - 1; 1095 } 1096 break; 1097 } 1098 1099 if (dcr && !nfit_mem->dcr) { 1100 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 1101 spa->range_index, dcr); 1102 return -ENODEV; 1103 } 1104 1105 if (type == NFIT_SPA_DCR) { 1106 struct nfit_idt *nfit_idt; 1107 u16 idt_idx; 1108 1109 /* multiple dimms may share a SPA when interleaved */ 1110 nfit_mem->spa_dcr = spa; 1111 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1112 idt_idx = nfit_memdev->memdev->interleave_index; 1113 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 1114 if (nfit_idt->idt->interleave_index != idt_idx) 1115 continue; 1116 nfit_mem->idt_dcr = nfit_idt->idt; 1117 break; 1118 } 1119 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); 1120 } else if (type == NFIT_SPA_PM) { 1121 /* 1122 * A single dimm may belong to multiple SPA-PM 1123 * ranges, record at least one in addition to 1124 * any SPA-DCR range. 1125 */ 1126 nfit_mem->memdev_pmem = nfit_memdev->memdev; 1127 } else 1128 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1129 } 1130 1131 return 0; 1132 } 1133 1134 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 1135 { 1136 struct nfit_mem *a = container_of(_a, typeof(*a), list); 1137 struct nfit_mem *b = container_of(_b, typeof(*b), list); 1138 u32 handleA, handleB; 1139 1140 handleA = __to_nfit_memdev(a)->device_handle; 1141 handleB = __to_nfit_memdev(b)->device_handle; 1142 if (handleA < handleB) 1143 return -1; 1144 else if (handleA > handleB) 1145 return 1; 1146 return 0; 1147 } 1148 1149 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 1150 { 1151 struct nfit_spa *nfit_spa; 1152 int rc; 1153 1154 1155 /* 1156 * For each SPA-DCR or SPA-PMEM address range find its 1157 * corresponding MEMDEV(s). From each MEMDEV find the 1158 * corresponding DCR. Then, if we're operating on a SPA-DCR, 1159 * try to find a SPA-BDW and a corresponding BDW that references 1160 * the DCR. Throw it all into an nfit_mem object. Note, that 1161 * BDWs are optional. 1162 */ 1163 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1164 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); 1165 if (rc) 1166 return rc; 1167 } 1168 1169 /* 1170 * If a DIMM has failed to be mapped into SPA there will be no 1171 * SPA entries above. Find and register all the unmapped DIMMs 1172 * for reporting and recovery purposes. 1173 */ 1174 rc = __nfit_mem_init(acpi_desc, NULL); 1175 if (rc) 1176 return rc; 1177 1178 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 1179 1180 return 0; 1181 } 1182 1183 static ssize_t bus_dsm_mask_show(struct device *dev, 1184 struct device_attribute *attr, char *buf) 1185 { 1186 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1187 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1188 1189 return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); 1190 } 1191 static struct device_attribute dev_attr_bus_dsm_mask = 1192 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); 1193 1194 static ssize_t revision_show(struct device *dev, 1195 struct device_attribute *attr, char *buf) 1196 { 1197 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1198 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1199 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1200 1201 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 1202 } 1203 static DEVICE_ATTR_RO(revision); 1204 1205 static ssize_t hw_error_scrub_show(struct device *dev, 1206 struct device_attribute *attr, char *buf) 1207 { 1208 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1209 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1210 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1211 1212 return sprintf(buf, "%d\n", acpi_desc->scrub_mode); 1213 } 1214 1215 /* 1216 * The 'hw_error_scrub' attribute can have the following values written to it: 1217 * '0': Switch to the default mode where an exception will only insert 1218 * the address of the memory error into the poison and badblocks lists. 1219 * '1': Enable a full scrub to happen if an exception for a memory error is 1220 * received. 1221 */ 1222 static ssize_t hw_error_scrub_store(struct device *dev, 1223 struct device_attribute *attr, const char *buf, size_t size) 1224 { 1225 struct nvdimm_bus_descriptor *nd_desc; 1226 ssize_t rc; 1227 long val; 1228 1229 rc = kstrtol(buf, 0, &val); 1230 if (rc) 1231 return rc; 1232 1233 device_lock(dev); 1234 nd_desc = dev_get_drvdata(dev); 1235 if (nd_desc) { 1236 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1237 1238 switch (val) { 1239 case HW_ERROR_SCRUB_ON: 1240 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; 1241 break; 1242 case HW_ERROR_SCRUB_OFF: 1243 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; 1244 break; 1245 default: 1246 rc = -EINVAL; 1247 break; 1248 } 1249 } 1250 device_unlock(dev); 1251 if (rc) 1252 return rc; 1253 return size; 1254 } 1255 static DEVICE_ATTR_RW(hw_error_scrub); 1256 1257 /* 1258 * This shows the number of full Address Range Scrubs that have been 1259 * completed since driver load time. Userspace can wait on this using 1260 * select/poll etc. A '+' at the end indicates an ARS is in progress 1261 */ 1262 static ssize_t scrub_show(struct device *dev, 1263 struct device_attribute *attr, char *buf) 1264 { 1265 struct nvdimm_bus_descriptor *nd_desc; 1266 ssize_t rc = -ENXIO; 1267 1268 device_lock(dev); 1269 nd_desc = dev_get_drvdata(dev); 1270 if (nd_desc) { 1271 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1272 1273 mutex_lock(&acpi_desc->init_mutex); 1274 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1275 work_busy(&acpi_desc->dwork.work) 1276 && !acpi_desc->cancel ? "+\n" : "\n"); 1277 mutex_unlock(&acpi_desc->init_mutex); 1278 } 1279 device_unlock(dev); 1280 return rc; 1281 } 1282 1283 static ssize_t scrub_store(struct device *dev, 1284 struct device_attribute *attr, const char *buf, size_t size) 1285 { 1286 struct nvdimm_bus_descriptor *nd_desc; 1287 ssize_t rc; 1288 long val; 1289 1290 rc = kstrtol(buf, 0, &val); 1291 if (rc) 1292 return rc; 1293 if (val != 1) 1294 return -EINVAL; 1295 1296 device_lock(dev); 1297 nd_desc = dev_get_drvdata(dev); 1298 if (nd_desc) { 1299 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1300 1301 rc = acpi_nfit_ars_rescan(acpi_desc, 0); 1302 } 1303 device_unlock(dev); 1304 if (rc) 1305 return rc; 1306 return size; 1307 } 1308 static DEVICE_ATTR_RW(scrub); 1309 1310 static bool ars_supported(struct nvdimm_bus *nvdimm_bus) 1311 { 1312 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1313 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START 1314 | 1 << ND_CMD_ARS_STATUS; 1315 1316 return (nd_desc->cmd_mask & mask) == mask; 1317 } 1318 1319 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) 1320 { 1321 struct device *dev = container_of(kobj, struct device, kobj); 1322 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1323 1324 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) 1325 return 0; 1326 return a->mode; 1327 } 1328 1329 static struct attribute *acpi_nfit_attributes[] = { 1330 &dev_attr_revision.attr, 1331 &dev_attr_scrub.attr, 1332 &dev_attr_hw_error_scrub.attr, 1333 &dev_attr_bus_dsm_mask.attr, 1334 NULL, 1335 }; 1336 1337 static const struct attribute_group acpi_nfit_attribute_group = { 1338 .name = "nfit", 1339 .attrs = acpi_nfit_attributes, 1340 .is_visible = nfit_visible, 1341 }; 1342 1343 static const struct attribute_group *acpi_nfit_attribute_groups[] = { 1344 &nvdimm_bus_attribute_group, 1345 &acpi_nfit_attribute_group, 1346 NULL, 1347 }; 1348 1349 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 1350 { 1351 struct nvdimm *nvdimm = to_nvdimm(dev); 1352 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1353 1354 return __to_nfit_memdev(nfit_mem); 1355 } 1356 1357 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 1358 { 1359 struct nvdimm *nvdimm = to_nvdimm(dev); 1360 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1361 1362 return nfit_mem->dcr; 1363 } 1364 1365 static ssize_t handle_show(struct device *dev, 1366 struct device_attribute *attr, char *buf) 1367 { 1368 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1369 1370 return sprintf(buf, "%#x\n", memdev->device_handle); 1371 } 1372 static DEVICE_ATTR_RO(handle); 1373 1374 static ssize_t phys_id_show(struct device *dev, 1375 struct device_attribute *attr, char *buf) 1376 { 1377 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1378 1379 return sprintf(buf, "%#x\n", memdev->physical_id); 1380 } 1381 static DEVICE_ATTR_RO(phys_id); 1382 1383 static ssize_t vendor_show(struct device *dev, 1384 struct device_attribute *attr, char *buf) 1385 { 1386 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1387 1388 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1389 } 1390 static DEVICE_ATTR_RO(vendor); 1391 1392 static ssize_t rev_id_show(struct device *dev, 1393 struct device_attribute *attr, char *buf) 1394 { 1395 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1396 1397 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1398 } 1399 static DEVICE_ATTR_RO(rev_id); 1400 1401 static ssize_t device_show(struct device *dev, 1402 struct device_attribute *attr, char *buf) 1403 { 1404 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1405 1406 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1407 } 1408 static DEVICE_ATTR_RO(device); 1409 1410 static ssize_t subsystem_vendor_show(struct device *dev, 1411 struct device_attribute *attr, char *buf) 1412 { 1413 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1414 1415 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1416 } 1417 static DEVICE_ATTR_RO(subsystem_vendor); 1418 1419 static ssize_t subsystem_rev_id_show(struct device *dev, 1420 struct device_attribute *attr, char *buf) 1421 { 1422 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1423 1424 return sprintf(buf, "0x%04x\n", 1425 be16_to_cpu(dcr->subsystem_revision_id)); 1426 } 1427 static DEVICE_ATTR_RO(subsystem_rev_id); 1428 1429 static ssize_t subsystem_device_show(struct device *dev, 1430 struct device_attribute *attr, char *buf) 1431 { 1432 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1433 1434 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1435 } 1436 static DEVICE_ATTR_RO(subsystem_device); 1437 1438 static int num_nvdimm_formats(struct nvdimm *nvdimm) 1439 { 1440 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1441 int formats = 0; 1442 1443 if (nfit_mem->memdev_pmem) 1444 formats++; 1445 if (nfit_mem->memdev_bdw) 1446 formats++; 1447 return formats; 1448 } 1449 1450 static ssize_t format_show(struct device *dev, 1451 struct device_attribute *attr, char *buf) 1452 { 1453 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1454 1455 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1456 } 1457 static DEVICE_ATTR_RO(format); 1458 1459 static ssize_t format1_show(struct device *dev, 1460 struct device_attribute *attr, char *buf) 1461 { 1462 u32 handle; 1463 ssize_t rc = -ENXIO; 1464 struct nfit_mem *nfit_mem; 1465 struct nfit_memdev *nfit_memdev; 1466 struct acpi_nfit_desc *acpi_desc; 1467 struct nvdimm *nvdimm = to_nvdimm(dev); 1468 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1469 1470 nfit_mem = nvdimm_provider_data(nvdimm); 1471 acpi_desc = nfit_mem->acpi_desc; 1472 handle = to_nfit_memdev(dev)->device_handle; 1473 1474 /* assumes DIMMs have at most 2 published interface codes */ 1475 mutex_lock(&acpi_desc->init_mutex); 1476 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1477 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1478 struct nfit_dcr *nfit_dcr; 1479 1480 if (memdev->device_handle != handle) 1481 continue; 1482 1483 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1484 if (nfit_dcr->dcr->region_index != memdev->region_index) 1485 continue; 1486 if (nfit_dcr->dcr->code == dcr->code) 1487 continue; 1488 rc = sprintf(buf, "0x%04x\n", 1489 le16_to_cpu(nfit_dcr->dcr->code)); 1490 break; 1491 } 1492 if (rc != ENXIO) 1493 break; 1494 } 1495 mutex_unlock(&acpi_desc->init_mutex); 1496 return rc; 1497 } 1498 static DEVICE_ATTR_RO(format1); 1499 1500 static ssize_t formats_show(struct device *dev, 1501 struct device_attribute *attr, char *buf) 1502 { 1503 struct nvdimm *nvdimm = to_nvdimm(dev); 1504 1505 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1506 } 1507 static DEVICE_ATTR_RO(formats); 1508 1509 static ssize_t serial_show(struct device *dev, 1510 struct device_attribute *attr, char *buf) 1511 { 1512 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1513 1514 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1515 } 1516 static DEVICE_ATTR_RO(serial); 1517 1518 static ssize_t family_show(struct device *dev, 1519 struct device_attribute *attr, char *buf) 1520 { 1521 struct nvdimm *nvdimm = to_nvdimm(dev); 1522 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1523 1524 if (nfit_mem->family < 0) 1525 return -ENXIO; 1526 return sprintf(buf, "%d\n", nfit_mem->family); 1527 } 1528 static DEVICE_ATTR_RO(family); 1529 1530 static ssize_t dsm_mask_show(struct device *dev, 1531 struct device_attribute *attr, char *buf) 1532 { 1533 struct nvdimm *nvdimm = to_nvdimm(dev); 1534 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1535 1536 if (nfit_mem->family < 0) 1537 return -ENXIO; 1538 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); 1539 } 1540 static DEVICE_ATTR_RO(dsm_mask); 1541 1542 static ssize_t flags_show(struct device *dev, 1543 struct device_attribute *attr, char *buf) 1544 { 1545 u16 flags = to_nfit_memdev(dev)->flags; 1546 1547 return sprintf(buf, "%s%s%s%s%s%s%s\n", 1548 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 1549 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 1550 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 1551 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 1552 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", 1553 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", 1554 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); 1555 } 1556 static DEVICE_ATTR_RO(flags); 1557 1558 static ssize_t id_show(struct device *dev, 1559 struct device_attribute *attr, char *buf) 1560 { 1561 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1562 1563 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) 1564 return sprintf(buf, "%04x-%02x-%04x-%08x\n", 1565 be16_to_cpu(dcr->vendor_id), 1566 dcr->manufacturing_location, 1567 be16_to_cpu(dcr->manufacturing_date), 1568 be32_to_cpu(dcr->serial_number)); 1569 else 1570 return sprintf(buf, "%04x-%08x\n", 1571 be16_to_cpu(dcr->vendor_id), 1572 be32_to_cpu(dcr->serial_number)); 1573 } 1574 static DEVICE_ATTR_RO(id); 1575 1576 static struct attribute *acpi_nfit_dimm_attributes[] = { 1577 &dev_attr_handle.attr, 1578 &dev_attr_phys_id.attr, 1579 &dev_attr_vendor.attr, 1580 &dev_attr_device.attr, 1581 &dev_attr_rev_id.attr, 1582 &dev_attr_subsystem_vendor.attr, 1583 &dev_attr_subsystem_device.attr, 1584 &dev_attr_subsystem_rev_id.attr, 1585 &dev_attr_format.attr, 1586 &dev_attr_formats.attr, 1587 &dev_attr_format1.attr, 1588 &dev_attr_serial.attr, 1589 &dev_attr_flags.attr, 1590 &dev_attr_id.attr, 1591 &dev_attr_family.attr, 1592 &dev_attr_dsm_mask.attr, 1593 NULL, 1594 }; 1595 1596 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 1597 struct attribute *a, int n) 1598 { 1599 struct device *dev = container_of(kobj, struct device, kobj); 1600 struct nvdimm *nvdimm = to_nvdimm(dev); 1601 1602 if (!to_nfit_dcr(dev)) { 1603 /* Without a dcr only the memdev attributes can be surfaced */ 1604 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr 1605 || a == &dev_attr_flags.attr 1606 || a == &dev_attr_family.attr 1607 || a == &dev_attr_dsm_mask.attr) 1608 return a->mode; 1609 return 0; 1610 } 1611 1612 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) 1613 return 0; 1614 return a->mode; 1615 } 1616 1617 static const struct attribute_group acpi_nfit_dimm_attribute_group = { 1618 .name = "nfit", 1619 .attrs = acpi_nfit_dimm_attributes, 1620 .is_visible = acpi_nfit_dimm_attr_visible, 1621 }; 1622 1623 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 1624 &nvdimm_attribute_group, 1625 &nd_device_attribute_group, 1626 &acpi_nfit_dimm_attribute_group, 1627 NULL, 1628 }; 1629 1630 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 1631 u32 device_handle) 1632 { 1633 struct nfit_mem *nfit_mem; 1634 1635 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1636 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 1637 return nfit_mem->nvdimm; 1638 1639 return NULL; 1640 } 1641 1642 void __acpi_nvdimm_notify(struct device *dev, u32 event) 1643 { 1644 struct nfit_mem *nfit_mem; 1645 struct acpi_nfit_desc *acpi_desc; 1646 1647 dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev), 1648 event); 1649 1650 if (event != NFIT_NOTIFY_DIMM_HEALTH) { 1651 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), 1652 event); 1653 return; 1654 } 1655 1656 acpi_desc = dev_get_drvdata(dev->parent); 1657 if (!acpi_desc) 1658 return; 1659 1660 /* 1661 * If we successfully retrieved acpi_desc, then we know nfit_mem data 1662 * is still valid. 1663 */ 1664 nfit_mem = dev_get_drvdata(dev); 1665 if (nfit_mem && nfit_mem->flags_attr) 1666 sysfs_notify_dirent(nfit_mem->flags_attr); 1667 } 1668 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); 1669 1670 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) 1671 { 1672 struct acpi_device *adev = data; 1673 struct device *dev = &adev->dev; 1674 1675 device_lock(dev->parent); 1676 __acpi_nvdimm_notify(dev, event); 1677 device_unlock(dev->parent); 1678 } 1679 1680 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) 1681 { 1682 acpi_handle handle; 1683 acpi_status status; 1684 1685 status = acpi_get_handle(adev->handle, method, &handle); 1686 1687 if (ACPI_SUCCESS(status)) 1688 return true; 1689 return false; 1690 } 1691 1692 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1693 struct nfit_mem *nfit_mem, u32 device_handle) 1694 { 1695 struct acpi_device *adev, *adev_dimm; 1696 struct device *dev = acpi_desc->dev; 1697 unsigned long dsm_mask; 1698 const guid_t *guid; 1699 int i; 1700 int family = -1; 1701 1702 /* nfit test assumes 1:1 relationship between commands and dsms */ 1703 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; 1704 nfit_mem->family = NVDIMM_FAMILY_INTEL; 1705 adev = to_acpi_dev(acpi_desc); 1706 if (!adev) 1707 return 0; 1708 1709 adev_dimm = acpi_find_child_device(adev, device_handle, false); 1710 nfit_mem->adev = adev_dimm; 1711 if (!adev_dimm) { 1712 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 1713 device_handle); 1714 return force_enable_dimms ? 0 : -ENODEV; 1715 } 1716 1717 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, 1718 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { 1719 dev_err(dev, "%s: notification registration failed\n", 1720 dev_name(&adev_dimm->dev)); 1721 return -ENXIO; 1722 } 1723 /* 1724 * Record nfit_mem for the notification path to track back to 1725 * the nfit sysfs attributes for this dimm device object. 1726 */ 1727 dev_set_drvdata(&adev_dimm->dev, nfit_mem); 1728 1729 /* 1730 * Until standardization materializes we need to consider 4 1731 * different command sets. Note, that checking for function0 (bit0) 1732 * tells us if any commands are reachable through this GUID. 1733 */ 1734 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) 1735 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1736 if (family < 0 || i == default_dsm_family) 1737 family = i; 1738 1739 /* limit the supported commands to those that are publicly documented */ 1740 nfit_mem->family = family; 1741 if (override_dsm_mask && !disable_vendor_specific) 1742 dsm_mask = override_dsm_mask; 1743 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1744 dsm_mask = NVDIMM_INTEL_CMDMASK; 1745 if (disable_vendor_specific) 1746 dsm_mask &= ~(1 << ND_CMD_VENDOR); 1747 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { 1748 dsm_mask = 0x1c3c76; 1749 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { 1750 dsm_mask = 0x1fe; 1751 if (disable_vendor_specific) 1752 dsm_mask &= ~(1 << 8); 1753 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { 1754 dsm_mask = 0xffffffff; 1755 } else { 1756 dev_dbg(dev, "unknown dimm command family\n"); 1757 nfit_mem->family = -1; 1758 /* DSMs are optional, continue loading the driver... */ 1759 return 0; 1760 } 1761 1762 guid = to_nfit_uuid(nfit_mem->family); 1763 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1764 if (acpi_check_dsm(adev_dimm->handle, guid, 1765 nfit_dsm_revid(nfit_mem->family, i), 1766 1ULL << i)) 1767 set_bit(i, &nfit_mem->dsm_mask); 1768 1769 if (acpi_nvdimm_has_method(adev_dimm, "_LSI") 1770 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { 1771 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1772 nfit_mem->has_lsr = true; 1773 } 1774 1775 if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { 1776 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); 1777 nfit_mem->has_lsw = true; 1778 } 1779 1780 return 0; 1781 } 1782 1783 static void shutdown_dimm_notify(void *data) 1784 { 1785 struct acpi_nfit_desc *acpi_desc = data; 1786 struct nfit_mem *nfit_mem; 1787 1788 mutex_lock(&acpi_desc->init_mutex); 1789 /* 1790 * Clear out the nfit_mem->flags_attr and shut down dimm event 1791 * notifications. 1792 */ 1793 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1794 struct acpi_device *adev_dimm = nfit_mem->adev; 1795 1796 if (nfit_mem->flags_attr) { 1797 sysfs_put(nfit_mem->flags_attr); 1798 nfit_mem->flags_attr = NULL; 1799 } 1800 if (adev_dimm) { 1801 acpi_remove_notify_handler(adev_dimm->handle, 1802 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); 1803 dev_set_drvdata(&adev_dimm->dev, NULL); 1804 } 1805 } 1806 mutex_unlock(&acpi_desc->init_mutex); 1807 } 1808 1809 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 1810 { 1811 struct nfit_mem *nfit_mem; 1812 int dimm_count = 0, rc; 1813 struct nvdimm *nvdimm; 1814 1815 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1816 struct acpi_nfit_flush_address *flush; 1817 unsigned long flags = 0, cmd_mask; 1818 struct nfit_memdev *nfit_memdev; 1819 u32 device_handle; 1820 u16 mem_flags; 1821 1822 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 1823 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 1824 if (nvdimm) { 1825 dimm_count++; 1826 continue; 1827 } 1828 1829 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 1830 set_bit(NDD_ALIASING, &flags); 1831 1832 /* collate flags across all memdevs for this dimm */ 1833 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1834 struct acpi_nfit_memory_map *dimm_memdev; 1835 1836 dimm_memdev = __to_nfit_memdev(nfit_mem); 1837 if (dimm_memdev->device_handle 1838 != nfit_memdev->memdev->device_handle) 1839 continue; 1840 dimm_memdev->flags |= nfit_memdev->memdev->flags; 1841 } 1842 1843 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 1844 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 1845 set_bit(NDD_UNARMED, &flags); 1846 1847 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 1848 if (rc) 1849 continue; 1850 1851 /* 1852 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL 1853 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the 1854 * userspace interface. 1855 */ 1856 cmd_mask = 1UL << ND_CMD_CALL; 1857 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1858 /* 1859 * These commands have a 1:1 correspondence 1860 * between DSM payload and libnvdimm ioctl 1861 * payload format. 1862 */ 1863 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; 1864 } 1865 1866 if (nfit_mem->has_lsr) { 1867 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 1868 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); 1869 } 1870 if (nfit_mem->has_lsw) 1871 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); 1872 1873 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush 1874 : NULL; 1875 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 1876 acpi_nfit_dimm_attribute_groups, 1877 flags, cmd_mask, flush ? flush->hint_count : 0, 1878 nfit_mem->flush_wpq); 1879 if (!nvdimm) 1880 return -ENOMEM; 1881 1882 nfit_mem->nvdimm = nvdimm; 1883 dimm_count++; 1884 1885 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 1886 continue; 1887 1888 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", 1889 nvdimm_name(nvdimm), 1890 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 1891 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 1892 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 1893 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", 1894 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); 1895 1896 } 1897 1898 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 1899 if (rc) 1900 return rc; 1901 1902 /* 1903 * Now that dimms are successfully registered, and async registration 1904 * is flushed, attempt to enable event notification. 1905 */ 1906 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1907 struct kernfs_node *nfit_kernfs; 1908 1909 nvdimm = nfit_mem->nvdimm; 1910 if (!nvdimm) 1911 continue; 1912 1913 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 1914 if (nfit_kernfs) 1915 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 1916 "flags"); 1917 sysfs_put(nfit_kernfs); 1918 if (!nfit_mem->flags_attr) 1919 dev_warn(acpi_desc->dev, "%s: notifications disabled\n", 1920 nvdimm_name(nvdimm)); 1921 } 1922 1923 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, 1924 acpi_desc); 1925 } 1926 1927 /* 1928 * These constants are private because there are no kernel consumers of 1929 * these commands. 1930 */ 1931 enum nfit_aux_cmds { 1932 NFIT_CMD_TRANSLATE_SPA = 5, 1933 NFIT_CMD_ARS_INJECT_SET = 7, 1934 NFIT_CMD_ARS_INJECT_CLEAR = 8, 1935 NFIT_CMD_ARS_INJECT_GET = 9, 1936 }; 1937 1938 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 1939 { 1940 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1941 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); 1942 struct acpi_device *adev; 1943 unsigned long dsm_mask; 1944 int i; 1945 1946 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; 1947 nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; 1948 adev = to_acpi_dev(acpi_desc); 1949 if (!adev) 1950 return; 1951 1952 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) 1953 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 1954 set_bit(i, &nd_desc->cmd_mask); 1955 set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); 1956 1957 dsm_mask = 1958 (1 << ND_CMD_ARS_CAP) | 1959 (1 << ND_CMD_ARS_START) | 1960 (1 << ND_CMD_ARS_STATUS) | 1961 (1 << ND_CMD_CLEAR_ERROR) | 1962 (1 << NFIT_CMD_TRANSLATE_SPA) | 1963 (1 << NFIT_CMD_ARS_INJECT_SET) | 1964 (1 << NFIT_CMD_ARS_INJECT_CLEAR) | 1965 (1 << NFIT_CMD_ARS_INJECT_GET); 1966 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1967 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 1968 set_bit(i, &nd_desc->bus_dsm_mask); 1969 } 1970 1971 static ssize_t range_index_show(struct device *dev, 1972 struct device_attribute *attr, char *buf) 1973 { 1974 struct nd_region *nd_region = to_nd_region(dev); 1975 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 1976 1977 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 1978 } 1979 static DEVICE_ATTR_RO(range_index); 1980 1981 static ssize_t ecc_unit_size_show(struct device *dev, 1982 struct device_attribute *attr, char *buf) 1983 { 1984 struct nd_region *nd_region = to_nd_region(dev); 1985 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 1986 1987 return sprintf(buf, "%d\n", nfit_spa->clear_err_unit); 1988 } 1989 static DEVICE_ATTR_RO(ecc_unit_size); 1990 1991 static struct attribute *acpi_nfit_region_attributes[] = { 1992 &dev_attr_range_index.attr, 1993 &dev_attr_ecc_unit_size.attr, 1994 NULL, 1995 }; 1996 1997 static const struct attribute_group acpi_nfit_region_attribute_group = { 1998 .name = "nfit", 1999 .attrs = acpi_nfit_region_attributes, 2000 }; 2001 2002 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 2003 &nd_region_attribute_group, 2004 &nd_mapping_attribute_group, 2005 &nd_device_attribute_group, 2006 &nd_numa_attribute_group, 2007 &acpi_nfit_region_attribute_group, 2008 NULL, 2009 }; 2010 2011 /* enough info to uniquely specify an interleave set */ 2012 struct nfit_set_info { 2013 struct nfit_set_info_map { 2014 u64 region_offset; 2015 u32 serial_number; 2016 u32 pad; 2017 } mapping[0]; 2018 }; 2019 2020 struct nfit_set_info2 { 2021 struct nfit_set_info_map2 { 2022 u64 region_offset; 2023 u32 serial_number; 2024 u16 vendor_id; 2025 u16 manufacturing_date; 2026 u8 manufacturing_location; 2027 u8 reserved[31]; 2028 } mapping[0]; 2029 }; 2030 2031 static size_t sizeof_nfit_set_info(int num_mappings) 2032 { 2033 return sizeof(struct nfit_set_info) 2034 + num_mappings * sizeof(struct nfit_set_info_map); 2035 } 2036 2037 static size_t sizeof_nfit_set_info2(int num_mappings) 2038 { 2039 return sizeof(struct nfit_set_info2) 2040 + num_mappings * sizeof(struct nfit_set_info_map2); 2041 } 2042 2043 static int cmp_map_compat(const void *m0, const void *m1) 2044 { 2045 const struct nfit_set_info_map *map0 = m0; 2046 const struct nfit_set_info_map *map1 = m1; 2047 2048 return memcmp(&map0->region_offset, &map1->region_offset, 2049 sizeof(u64)); 2050 } 2051 2052 static int cmp_map(const void *m0, const void *m1) 2053 { 2054 const struct nfit_set_info_map *map0 = m0; 2055 const struct nfit_set_info_map *map1 = m1; 2056 2057 if (map0->region_offset < map1->region_offset) 2058 return -1; 2059 else if (map0->region_offset > map1->region_offset) 2060 return 1; 2061 return 0; 2062 } 2063 2064 static int cmp_map2(const void *m0, const void *m1) 2065 { 2066 const struct nfit_set_info_map2 *map0 = m0; 2067 const struct nfit_set_info_map2 *map1 = m1; 2068 2069 if (map0->region_offset < map1->region_offset) 2070 return -1; 2071 else if (map0->region_offset > map1->region_offset) 2072 return 1; 2073 return 0; 2074 } 2075 2076 /* Retrieve the nth entry referencing this spa */ 2077 static struct acpi_nfit_memory_map *memdev_from_spa( 2078 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 2079 { 2080 struct nfit_memdev *nfit_memdev; 2081 2082 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 2083 if (nfit_memdev->memdev->range_index == range_index) 2084 if (n-- == 0) 2085 return nfit_memdev->memdev; 2086 return NULL; 2087 } 2088 2089 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 2090 struct nd_region_desc *ndr_desc, 2091 struct acpi_nfit_system_address *spa) 2092 { 2093 struct device *dev = acpi_desc->dev; 2094 struct nd_interleave_set *nd_set; 2095 u16 nr = ndr_desc->num_mappings; 2096 struct nfit_set_info2 *info2; 2097 struct nfit_set_info *info; 2098 int i; 2099 2100 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2101 if (!nd_set) 2102 return -ENOMEM; 2103 ndr_desc->nd_set = nd_set; 2104 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); 2105 2106 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 2107 if (!info) 2108 return -ENOMEM; 2109 2110 info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL); 2111 if (!info2) 2112 return -ENOMEM; 2113 2114 for (i = 0; i < nr; i++) { 2115 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 2116 struct nfit_set_info_map *map = &info->mapping[i]; 2117 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2118 struct nvdimm *nvdimm = mapping->nvdimm; 2119 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2120 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 2121 spa->range_index, i); 2122 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2123 2124 if (!memdev || !nfit_mem->dcr) { 2125 dev_err(dev, "%s: failed to find DCR\n", __func__); 2126 return -ENODEV; 2127 } 2128 2129 map->region_offset = memdev->region_offset; 2130 map->serial_number = dcr->serial_number; 2131 2132 map2->region_offset = memdev->region_offset; 2133 map2->serial_number = dcr->serial_number; 2134 map2->vendor_id = dcr->vendor_id; 2135 map2->manufacturing_date = dcr->manufacturing_date; 2136 map2->manufacturing_location = dcr->manufacturing_location; 2137 } 2138 2139 /* v1.1 namespaces */ 2140 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2141 cmp_map, NULL); 2142 nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2143 2144 /* v1.2 namespaces */ 2145 sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2), 2146 cmp_map2, NULL); 2147 nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0); 2148 2149 /* support v1.1 namespaces created with the wrong sort order */ 2150 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2151 cmp_map_compat, NULL); 2152 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2153 2154 /* record the result of the sort for the mapping position */ 2155 for (i = 0; i < nr; i++) { 2156 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2157 int j; 2158 2159 for (j = 0; j < nr; j++) { 2160 struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; 2161 struct nvdimm *nvdimm = mapping->nvdimm; 2162 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2163 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2164 2165 if (map2->serial_number == dcr->serial_number && 2166 map2->vendor_id == dcr->vendor_id && 2167 map2->manufacturing_date == dcr->manufacturing_date && 2168 map2->manufacturing_location 2169 == dcr->manufacturing_location) { 2170 mapping->position = i; 2171 break; 2172 } 2173 } 2174 } 2175 2176 ndr_desc->nd_set = nd_set; 2177 devm_kfree(dev, info); 2178 devm_kfree(dev, info2); 2179 2180 return 0; 2181 } 2182 2183 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 2184 { 2185 struct acpi_nfit_interleave *idt = mmio->idt; 2186 u32 sub_line_offset, line_index, line_offset; 2187 u64 line_no, table_skip_count, table_offset; 2188 2189 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 2190 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 2191 line_offset = idt->line_offset[line_index] 2192 * mmio->line_size; 2193 table_offset = table_skip_count * mmio->table_size; 2194 2195 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 2196 } 2197 2198 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 2199 { 2200 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2201 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 2202 const u32 STATUS_MASK = 0x80000037; 2203 2204 if (mmio->num_lines) 2205 offset = to_interleave_offset(offset, mmio); 2206 2207 return readl(mmio->addr.base + offset) & STATUS_MASK; 2208 } 2209 2210 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 2211 resource_size_t dpa, unsigned int len, unsigned int write) 2212 { 2213 u64 cmd, offset; 2214 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2215 2216 enum { 2217 BCW_OFFSET_MASK = (1ULL << 48)-1, 2218 BCW_LEN_SHIFT = 48, 2219 BCW_LEN_MASK = (1ULL << 8) - 1, 2220 BCW_CMD_SHIFT = 56, 2221 }; 2222 2223 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 2224 len = len >> L1_CACHE_SHIFT; 2225 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 2226 cmd |= ((u64) write) << BCW_CMD_SHIFT; 2227 2228 offset = nfit_blk->cmd_offset + mmio->size * bw; 2229 if (mmio->num_lines) 2230 offset = to_interleave_offset(offset, mmio); 2231 2232 writeq(cmd, mmio->addr.base + offset); 2233 nvdimm_flush(nfit_blk->nd_region); 2234 2235 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) 2236 readq(mmio->addr.base + offset); 2237 } 2238 2239 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 2240 resource_size_t dpa, void *iobuf, size_t len, int rw, 2241 unsigned int lane) 2242 { 2243 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2244 unsigned int copied = 0; 2245 u64 base_offset; 2246 int rc; 2247 2248 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 2249 + lane * mmio->size; 2250 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 2251 while (len) { 2252 unsigned int c; 2253 u64 offset; 2254 2255 if (mmio->num_lines) { 2256 u32 line_offset; 2257 2258 offset = to_interleave_offset(base_offset + copied, 2259 mmio); 2260 div_u64_rem(offset, mmio->line_size, &line_offset); 2261 c = min_t(size_t, len, mmio->line_size - line_offset); 2262 } else { 2263 offset = base_offset + nfit_blk->bdw_offset; 2264 c = len; 2265 } 2266 2267 if (rw) 2268 memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); 2269 else { 2270 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) 2271 arch_invalidate_pmem((void __force *) 2272 mmio->addr.aperture + offset, c); 2273 2274 memcpy(iobuf + copied, mmio->addr.aperture + offset, c); 2275 } 2276 2277 copied += c; 2278 len -= c; 2279 } 2280 2281 if (rw) 2282 nvdimm_flush(nfit_blk->nd_region); 2283 2284 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 2285 return rc; 2286 } 2287 2288 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 2289 resource_size_t dpa, void *iobuf, u64 len, int rw) 2290 { 2291 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 2292 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2293 struct nd_region *nd_region = nfit_blk->nd_region; 2294 unsigned int lane, copied = 0; 2295 int rc = 0; 2296 2297 lane = nd_region_acquire_lane(nd_region); 2298 while (len) { 2299 u64 c = min(len, mmio->size); 2300 2301 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 2302 iobuf + copied, c, rw, lane); 2303 if (rc) 2304 break; 2305 2306 copied += c; 2307 len -= c; 2308 } 2309 nd_region_release_lane(nd_region, lane); 2310 2311 return rc; 2312 } 2313 2314 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 2315 struct acpi_nfit_interleave *idt, u16 interleave_ways) 2316 { 2317 if (idt) { 2318 mmio->num_lines = idt->line_count; 2319 mmio->line_size = idt->line_size; 2320 if (interleave_ways == 0) 2321 return -ENXIO; 2322 mmio->table_size = mmio->num_lines * interleave_ways 2323 * mmio->line_size; 2324 } 2325 2326 return 0; 2327 } 2328 2329 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 2330 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 2331 { 2332 struct nd_cmd_dimm_flags flags; 2333 int rc; 2334 2335 memset(&flags, 0, sizeof(flags)); 2336 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 2337 sizeof(flags), NULL); 2338 2339 if (rc >= 0 && flags.status == 0) 2340 nfit_blk->dimm_flags = flags.flags; 2341 else if (rc == -ENOTTY) { 2342 /* fall back to a conservative default */ 2343 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; 2344 rc = 0; 2345 } else 2346 rc = -ENXIO; 2347 2348 return rc; 2349 } 2350 2351 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 2352 struct device *dev) 2353 { 2354 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 2355 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 2356 struct nfit_blk_mmio *mmio; 2357 struct nfit_blk *nfit_blk; 2358 struct nfit_mem *nfit_mem; 2359 struct nvdimm *nvdimm; 2360 int rc; 2361 2362 nvdimm = nd_blk_region_to_dimm(ndbr); 2363 nfit_mem = nvdimm_provider_data(nvdimm); 2364 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 2365 dev_dbg(dev, "missing%s%s%s\n", 2366 nfit_mem ? "" : " nfit_mem", 2367 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 2368 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 2369 return -ENXIO; 2370 } 2371 2372 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 2373 if (!nfit_blk) 2374 return -ENOMEM; 2375 nd_blk_region_set_provider_data(ndbr, nfit_blk); 2376 nfit_blk->nd_region = to_nd_region(dev); 2377 2378 /* map block aperture memory */ 2379 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 2380 mmio = &nfit_blk->mmio[BDW]; 2381 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, 2382 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr)); 2383 if (!mmio->addr.base) { 2384 dev_dbg(dev, "%s failed to map bdw\n", 2385 nvdimm_name(nvdimm)); 2386 return -ENOMEM; 2387 } 2388 mmio->size = nfit_mem->bdw->size; 2389 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 2390 mmio->idt = nfit_mem->idt_bdw; 2391 mmio->spa = nfit_mem->spa_bdw; 2392 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 2393 nfit_mem->memdev_bdw->interleave_ways); 2394 if (rc) { 2395 dev_dbg(dev, "%s failed to init bdw interleave\n", 2396 nvdimm_name(nvdimm)); 2397 return rc; 2398 } 2399 2400 /* map block control memory */ 2401 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 2402 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 2403 mmio = &nfit_blk->mmio[DCR]; 2404 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, 2405 nfit_mem->spa_dcr->length); 2406 if (!mmio->addr.base) { 2407 dev_dbg(dev, "%s failed to map dcr\n", 2408 nvdimm_name(nvdimm)); 2409 return -ENOMEM; 2410 } 2411 mmio->size = nfit_mem->dcr->window_size; 2412 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 2413 mmio->idt = nfit_mem->idt_dcr; 2414 mmio->spa = nfit_mem->spa_dcr; 2415 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 2416 nfit_mem->memdev_dcr->interleave_ways); 2417 if (rc) { 2418 dev_dbg(dev, "%s failed to init dcr interleave\n", 2419 nvdimm_name(nvdimm)); 2420 return rc; 2421 } 2422 2423 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 2424 if (rc < 0) { 2425 dev_dbg(dev, "%s failed get DIMM flags\n", 2426 nvdimm_name(nvdimm)); 2427 return rc; 2428 } 2429 2430 if (nvdimm_has_flush(nfit_blk->nd_region) < 0) 2431 dev_warn(dev, "unable to guarantee persistence of writes\n"); 2432 2433 if (mmio->line_size == 0) 2434 return 0; 2435 2436 if ((u32) nfit_blk->cmd_offset % mmio->line_size 2437 + 8 > mmio->line_size) { 2438 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 2439 return -ENXIO; 2440 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 2441 + 8 > mmio->line_size) { 2442 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 2443 return -ENXIO; 2444 } 2445 2446 return 0; 2447 } 2448 2449 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 2450 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 2451 { 2452 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2453 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2454 int cmd_rc, rc; 2455 2456 cmd->address = spa->address; 2457 cmd->length = spa->length; 2458 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 2459 sizeof(*cmd), &cmd_rc); 2460 if (rc < 0) 2461 return rc; 2462 return cmd_rc; 2463 } 2464 2465 static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) 2466 { 2467 int rc; 2468 int cmd_rc; 2469 struct nd_cmd_ars_start ars_start; 2470 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2471 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2472 2473 memset(&ars_start, 0, sizeof(ars_start)); 2474 ars_start.address = spa->address; 2475 ars_start.length = spa->length; 2476 if (test_bit(ARS_SHORT, &nfit_spa->ars_state)) 2477 ars_start.flags = ND_ARS_RETURN_PREV_DATA; 2478 if (nfit_spa_type(spa) == NFIT_SPA_PM) 2479 ars_start.type = ND_ARS_PERSISTENT; 2480 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) 2481 ars_start.type = ND_ARS_VOLATILE; 2482 else 2483 return -ENOTTY; 2484 2485 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2486 sizeof(ars_start), &cmd_rc); 2487 2488 if (rc < 0) 2489 return rc; 2490 return cmd_rc; 2491 } 2492 2493 static int ars_continue(struct acpi_nfit_desc *acpi_desc) 2494 { 2495 int rc, cmd_rc; 2496 struct nd_cmd_ars_start ars_start; 2497 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2498 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2499 2500 memset(&ars_start, 0, sizeof(ars_start)); 2501 ars_start.address = ars_status->restart_address; 2502 ars_start.length = ars_status->restart_length; 2503 ars_start.type = ars_status->type; 2504 ars_start.flags = acpi_desc->ars_start_flags; 2505 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2506 sizeof(ars_start), &cmd_rc); 2507 if (rc < 0) 2508 return rc; 2509 return cmd_rc; 2510 } 2511 2512 static int ars_get_status(struct acpi_nfit_desc *acpi_desc) 2513 { 2514 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2515 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2516 int rc, cmd_rc; 2517 2518 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, 2519 acpi_desc->max_ars, &cmd_rc); 2520 if (rc < 0) 2521 return rc; 2522 return cmd_rc; 2523 } 2524 2525 static void ars_complete(struct acpi_nfit_desc *acpi_desc, 2526 struct nfit_spa *nfit_spa) 2527 { 2528 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2529 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2530 struct nd_region *nd_region = nfit_spa->nd_region; 2531 struct device *dev; 2532 2533 if ((ars_status->address >= spa->address && ars_status->address 2534 < spa->address + spa->length) 2535 || (ars_status->address < spa->address)) { 2536 /* 2537 * Assume that if a scrub starts at an offset from the 2538 * start of nfit_spa that we are in the continuation 2539 * case. 2540 * 2541 * Otherwise, if the scrub covers the spa range, mark 2542 * any pending request complete. 2543 */ 2544 if (ars_status->address + ars_status->length 2545 >= spa->address + spa->length) 2546 /* complete */; 2547 else 2548 return; 2549 } else 2550 return; 2551 2552 if (test_bit(ARS_DONE, &nfit_spa->ars_state)) 2553 return; 2554 2555 if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state)) 2556 return; 2557 2558 if (nd_region) { 2559 dev = nd_region_dev(nd_region); 2560 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON); 2561 } else 2562 dev = acpi_desc->dev; 2563 2564 dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index, 2565 test_bit(ARS_SHORT, &nfit_spa->ars_state) 2566 ? "short" : "long"); 2567 clear_bit(ARS_SHORT, &nfit_spa->ars_state); 2568 set_bit(ARS_DONE, &nfit_spa->ars_state); 2569 } 2570 2571 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) 2572 { 2573 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; 2574 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2575 int rc; 2576 u32 i; 2577 2578 /* 2579 * First record starts at 44 byte offset from the start of the 2580 * payload. 2581 */ 2582 if (ars_status->out_length < 44) 2583 return 0; 2584 for (i = 0; i < ars_status->num_records; i++) { 2585 /* only process full records */ 2586 if (ars_status->out_length 2587 < 44 + sizeof(struct nd_ars_record) * (i + 1)) 2588 break; 2589 rc = nvdimm_bus_add_badrange(nvdimm_bus, 2590 ars_status->records[i].err_address, 2591 ars_status->records[i].length); 2592 if (rc) 2593 return rc; 2594 } 2595 if (i < ars_status->num_records) 2596 dev_warn(acpi_desc->dev, "detected truncated ars results\n"); 2597 2598 return 0; 2599 } 2600 2601 static void acpi_nfit_remove_resource(void *data) 2602 { 2603 struct resource *res = data; 2604 2605 remove_resource(res); 2606 } 2607 2608 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, 2609 struct nd_region_desc *ndr_desc) 2610 { 2611 struct resource *res, *nd_res = ndr_desc->res; 2612 int is_pmem, ret; 2613 2614 /* No operation if the region is already registered as PMEM */ 2615 is_pmem = region_intersects(nd_res->start, resource_size(nd_res), 2616 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); 2617 if (is_pmem == REGION_INTERSECTS) 2618 return 0; 2619 2620 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); 2621 if (!res) 2622 return -ENOMEM; 2623 2624 res->name = "Persistent Memory"; 2625 res->start = nd_res->start; 2626 res->end = nd_res->end; 2627 res->flags = IORESOURCE_MEM; 2628 res->desc = IORES_DESC_PERSISTENT_MEMORY; 2629 2630 ret = insert_resource(&iomem_resource, res); 2631 if (ret) 2632 return ret; 2633 2634 ret = devm_add_action_or_reset(acpi_desc->dev, 2635 acpi_nfit_remove_resource, 2636 res); 2637 if (ret) 2638 return ret; 2639 2640 return 0; 2641 } 2642 2643 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 2644 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, 2645 struct acpi_nfit_memory_map *memdev, 2646 struct nfit_spa *nfit_spa) 2647 { 2648 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 2649 memdev->device_handle); 2650 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2651 struct nd_blk_region_desc *ndbr_desc; 2652 struct nfit_mem *nfit_mem; 2653 int rc; 2654 2655 if (!nvdimm) { 2656 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 2657 spa->range_index, memdev->device_handle); 2658 return -ENODEV; 2659 } 2660 2661 mapping->nvdimm = nvdimm; 2662 switch (nfit_spa_type(spa)) { 2663 case NFIT_SPA_PM: 2664 case NFIT_SPA_VOLATILE: 2665 mapping->start = memdev->address; 2666 mapping->size = memdev->region_size; 2667 break; 2668 case NFIT_SPA_DCR: 2669 nfit_mem = nvdimm_provider_data(nvdimm); 2670 if (!nfit_mem || !nfit_mem->bdw) { 2671 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 2672 spa->range_index, nvdimm_name(nvdimm)); 2673 break; 2674 } 2675 2676 mapping->size = nfit_mem->bdw->capacity; 2677 mapping->start = nfit_mem->bdw->start_address; 2678 ndr_desc->num_lanes = nfit_mem->bdw->windows; 2679 ndr_desc->mapping = mapping; 2680 ndr_desc->num_mappings = 1; 2681 ndbr_desc = to_blk_region_desc(ndr_desc); 2682 ndbr_desc->enable = acpi_nfit_blk_region_enable; 2683 ndbr_desc->do_io = acpi_desc->blk_do_io; 2684 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2685 if (rc) 2686 return rc; 2687 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 2688 ndr_desc); 2689 if (!nfit_spa->nd_region) 2690 return -ENOMEM; 2691 break; 2692 } 2693 2694 return 0; 2695 } 2696 2697 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) 2698 { 2699 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2700 nfit_spa_type(spa) == NFIT_SPA_VCD || 2701 nfit_spa_type(spa) == NFIT_SPA_PDISK || 2702 nfit_spa_type(spa) == NFIT_SPA_PCD); 2703 } 2704 2705 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa) 2706 { 2707 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2708 nfit_spa_type(spa) == NFIT_SPA_VCD || 2709 nfit_spa_type(spa) == NFIT_SPA_VOLATILE); 2710 } 2711 2712 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 2713 struct nfit_spa *nfit_spa) 2714 { 2715 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; 2716 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2717 struct nd_blk_region_desc ndbr_desc; 2718 struct nd_region_desc *ndr_desc; 2719 struct nfit_memdev *nfit_memdev; 2720 struct nvdimm_bus *nvdimm_bus; 2721 struct resource res; 2722 int count = 0, rc; 2723 2724 if (nfit_spa->nd_region) 2725 return 0; 2726 2727 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { 2728 dev_dbg(acpi_desc->dev, "detected invalid spa index\n"); 2729 return 0; 2730 } 2731 2732 memset(&res, 0, sizeof(res)); 2733 memset(&mappings, 0, sizeof(mappings)); 2734 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 2735 res.start = spa->address; 2736 res.end = res.start + spa->length - 1; 2737 ndr_desc = &ndbr_desc.ndr_desc; 2738 ndr_desc->res = &res; 2739 ndr_desc->provider_data = nfit_spa; 2740 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 2741 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 2742 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 2743 spa->proximity_domain); 2744 else 2745 ndr_desc->numa_node = NUMA_NO_NODE; 2746 2747 /* 2748 * Persistence domain bits are hierarchical, if 2749 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then 2750 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. 2751 */ 2752 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) 2753 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); 2754 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) 2755 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); 2756 2757 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2758 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 2759 struct nd_mapping_desc *mapping; 2760 2761 if (memdev->range_index != spa->range_index) 2762 continue; 2763 if (count >= ND_MAX_MAPPINGS) { 2764 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 2765 spa->range_index, ND_MAX_MAPPINGS); 2766 return -ENXIO; 2767 } 2768 mapping = &mappings[count++]; 2769 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, 2770 memdev, nfit_spa); 2771 if (rc) 2772 goto out; 2773 } 2774 2775 ndr_desc->mapping = mappings; 2776 ndr_desc->num_mappings = count; 2777 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2778 if (rc) 2779 goto out; 2780 2781 nvdimm_bus = acpi_desc->nvdimm_bus; 2782 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 2783 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); 2784 if (rc) { 2785 dev_warn(acpi_desc->dev, 2786 "failed to insert pmem resource to iomem: %d\n", 2787 rc); 2788 goto out; 2789 } 2790 2791 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2792 ndr_desc); 2793 if (!nfit_spa->nd_region) 2794 rc = -ENOMEM; 2795 } else if (nfit_spa_is_volatile(spa)) { 2796 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, 2797 ndr_desc); 2798 if (!nfit_spa->nd_region) 2799 rc = -ENOMEM; 2800 } else if (nfit_spa_is_virtual(spa)) { 2801 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2802 ndr_desc); 2803 if (!nfit_spa->nd_region) 2804 rc = -ENOMEM; 2805 } 2806 2807 out: 2808 if (rc) 2809 dev_err(acpi_desc->dev, "failed to register spa range %d\n", 2810 nfit_spa->spa->range_index); 2811 return rc; 2812 } 2813 2814 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc) 2815 { 2816 struct device *dev = acpi_desc->dev; 2817 struct nd_cmd_ars_status *ars_status; 2818 2819 if (acpi_desc->ars_status) { 2820 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2821 return 0; 2822 } 2823 2824 ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL); 2825 if (!ars_status) 2826 return -ENOMEM; 2827 acpi_desc->ars_status = ars_status; 2828 return 0; 2829 } 2830 2831 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) 2832 { 2833 int rc; 2834 2835 if (ars_status_alloc(acpi_desc)) 2836 return -ENOMEM; 2837 2838 rc = ars_get_status(acpi_desc); 2839 2840 if (rc < 0 && rc != -ENOSPC) 2841 return rc; 2842 2843 if (ars_status_process_records(acpi_desc)) 2844 return -ENOMEM; 2845 2846 return 0; 2847 } 2848 2849 static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa, 2850 int *query_rc) 2851 { 2852 int rc = *query_rc; 2853 2854 if (no_init_ars) 2855 return acpi_nfit_register_region(acpi_desc, nfit_spa); 2856 2857 set_bit(ARS_REQ, &nfit_spa->ars_state); 2858 set_bit(ARS_SHORT, &nfit_spa->ars_state); 2859 2860 switch (rc) { 2861 case 0: 2862 case -EAGAIN: 2863 rc = ars_start(acpi_desc, nfit_spa); 2864 if (rc == -EBUSY) { 2865 *query_rc = rc; 2866 break; 2867 } else if (rc == 0) { 2868 rc = acpi_nfit_query_poison(acpi_desc); 2869 } else { 2870 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2871 break; 2872 } 2873 if (rc == -EAGAIN) 2874 clear_bit(ARS_SHORT, &nfit_spa->ars_state); 2875 else if (rc == 0) 2876 ars_complete(acpi_desc, nfit_spa); 2877 break; 2878 case -EBUSY: 2879 case -ENOSPC: 2880 break; 2881 default: 2882 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2883 break; 2884 } 2885 2886 if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state)) 2887 set_bit(ARS_REQ, &nfit_spa->ars_state); 2888 2889 return acpi_nfit_register_region(acpi_desc, nfit_spa); 2890 } 2891 2892 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc) 2893 { 2894 struct nfit_spa *nfit_spa; 2895 2896 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2897 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 2898 continue; 2899 ars_complete(acpi_desc, nfit_spa); 2900 } 2901 } 2902 2903 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, 2904 int query_rc) 2905 { 2906 unsigned int tmo = acpi_desc->scrub_tmo; 2907 struct device *dev = acpi_desc->dev; 2908 struct nfit_spa *nfit_spa; 2909 2910 if (acpi_desc->cancel) 2911 return 0; 2912 2913 if (query_rc == -EBUSY) { 2914 dev_dbg(dev, "ARS: ARS busy\n"); 2915 return min(30U * 60U, tmo * 2); 2916 } 2917 if (query_rc == -ENOSPC) { 2918 dev_dbg(dev, "ARS: ARS continue\n"); 2919 ars_continue(acpi_desc); 2920 return 1; 2921 } 2922 if (query_rc && query_rc != -EAGAIN) { 2923 unsigned long long addr, end; 2924 2925 addr = acpi_desc->ars_status->address; 2926 end = addr + acpi_desc->ars_status->length; 2927 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end, 2928 query_rc); 2929 } 2930 2931 ars_complete_all(acpi_desc); 2932 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2933 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 2934 continue; 2935 if (test_bit(ARS_REQ, &nfit_spa->ars_state)) { 2936 int rc = ars_start(acpi_desc, nfit_spa); 2937 2938 clear_bit(ARS_DONE, &nfit_spa->ars_state); 2939 dev = nd_region_dev(nfit_spa->nd_region); 2940 dev_dbg(dev, "ARS: range %d ARS start (%d)\n", 2941 nfit_spa->spa->range_index, rc); 2942 if (rc == 0 || rc == -EBUSY) 2943 return 1; 2944 dev_err(dev, "ARS: range %d ARS failed (%d)\n", 2945 nfit_spa->spa->range_index, rc); 2946 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2947 } 2948 } 2949 return 0; 2950 } 2951 2952 static void acpi_nfit_scrub(struct work_struct *work) 2953 { 2954 struct acpi_nfit_desc *acpi_desc; 2955 unsigned int tmo; 2956 int query_rc; 2957 2958 acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); 2959 mutex_lock(&acpi_desc->init_mutex); 2960 query_rc = acpi_nfit_query_poison(acpi_desc); 2961 tmo = __acpi_nfit_scrub(acpi_desc, query_rc); 2962 if (tmo) { 2963 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); 2964 acpi_desc->scrub_tmo = tmo; 2965 } else { 2966 acpi_desc->scrub_count++; 2967 if (acpi_desc->scrub_count_state) 2968 sysfs_notify_dirent(acpi_desc->scrub_count_state); 2969 } 2970 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2971 mutex_unlock(&acpi_desc->init_mutex); 2972 } 2973 2974 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, 2975 struct nfit_spa *nfit_spa) 2976 { 2977 int type = nfit_spa_type(nfit_spa->spa); 2978 struct nd_cmd_ars_cap ars_cap; 2979 int rc; 2980 2981 memset(&ars_cap, 0, sizeof(ars_cap)); 2982 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); 2983 if (rc < 0) 2984 return; 2985 /* check that the supported scrub types match the spa type */ 2986 if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16) 2987 & ND_ARS_VOLATILE) == 0) 2988 return; 2989 if (type == NFIT_SPA_PM && ((ars_cap.status >> 16) 2990 & ND_ARS_PERSISTENT) == 0) 2991 return; 2992 2993 nfit_spa->max_ars = ars_cap.max_ars_out; 2994 nfit_spa->clear_err_unit = ars_cap.clear_err_unit; 2995 acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); 2996 clear_bit(ARS_FAILED, &nfit_spa->ars_state); 2997 set_bit(ARS_REQ, &nfit_spa->ars_state); 2998 } 2999 3000 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 3001 { 3002 struct nfit_spa *nfit_spa; 3003 int rc, query_rc; 3004 3005 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3006 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3007 switch (nfit_spa_type(nfit_spa->spa)) { 3008 case NFIT_SPA_VOLATILE: 3009 case NFIT_SPA_PM: 3010 acpi_nfit_init_ars(acpi_desc, nfit_spa); 3011 break; 3012 } 3013 } 3014 3015 /* 3016 * Reap any results that might be pending before starting new 3017 * short requests. 3018 */ 3019 query_rc = acpi_nfit_query_poison(acpi_desc); 3020 if (query_rc == 0) 3021 ars_complete_all(acpi_desc); 3022 3023 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 3024 switch (nfit_spa_type(nfit_spa->spa)) { 3025 case NFIT_SPA_VOLATILE: 3026 case NFIT_SPA_PM: 3027 /* register regions and kick off initial ARS run */ 3028 rc = ars_register(acpi_desc, nfit_spa, &query_rc); 3029 if (rc) 3030 return rc; 3031 break; 3032 case NFIT_SPA_BDW: 3033 /* nothing to register */ 3034 break; 3035 case NFIT_SPA_DCR: 3036 case NFIT_SPA_VDISK: 3037 case NFIT_SPA_VCD: 3038 case NFIT_SPA_PDISK: 3039 case NFIT_SPA_PCD: 3040 /* register known regions that don't support ARS */ 3041 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 3042 if (rc) 3043 return rc; 3044 break; 3045 default: 3046 /* don't register unknown regions */ 3047 break; 3048 } 3049 3050 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); 3051 return 0; 3052 } 3053 3054 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 3055 struct nfit_table_prev *prev) 3056 { 3057 struct device *dev = acpi_desc->dev; 3058 3059 if (!list_empty(&prev->spas) || 3060 !list_empty(&prev->memdevs) || 3061 !list_empty(&prev->dcrs) || 3062 !list_empty(&prev->bdws) || 3063 !list_empty(&prev->idts) || 3064 !list_empty(&prev->flushes)) { 3065 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 3066 return -ENXIO; 3067 } 3068 return 0; 3069 } 3070 3071 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) 3072 { 3073 struct device *dev = acpi_desc->dev; 3074 struct kernfs_node *nfit; 3075 struct device *bus_dev; 3076 3077 if (!ars_supported(acpi_desc->nvdimm_bus)) 3078 return 0; 3079 3080 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3081 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); 3082 if (!nfit) { 3083 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); 3084 return -ENODEV; 3085 } 3086 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); 3087 sysfs_put(nfit); 3088 if (!acpi_desc->scrub_count_state) { 3089 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); 3090 return -ENODEV; 3091 } 3092 3093 return 0; 3094 } 3095 3096 static void acpi_nfit_unregister(void *data) 3097 { 3098 struct acpi_nfit_desc *acpi_desc = data; 3099 3100 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 3101 } 3102 3103 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) 3104 { 3105 struct device *dev = acpi_desc->dev; 3106 struct nfit_table_prev prev; 3107 const void *end; 3108 int rc; 3109 3110 if (!acpi_desc->nvdimm_bus) { 3111 acpi_nfit_init_dsms(acpi_desc); 3112 3113 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, 3114 &acpi_desc->nd_desc); 3115 if (!acpi_desc->nvdimm_bus) 3116 return -ENOMEM; 3117 3118 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, 3119 acpi_desc); 3120 if (rc) 3121 return rc; 3122 3123 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); 3124 if (rc) 3125 return rc; 3126 3127 /* register this acpi_desc for mce notifications */ 3128 mutex_lock(&acpi_desc_lock); 3129 list_add_tail(&acpi_desc->list, &acpi_descs); 3130 mutex_unlock(&acpi_desc_lock); 3131 } 3132 3133 mutex_lock(&acpi_desc->init_mutex); 3134 3135 INIT_LIST_HEAD(&prev.spas); 3136 INIT_LIST_HEAD(&prev.memdevs); 3137 INIT_LIST_HEAD(&prev.dcrs); 3138 INIT_LIST_HEAD(&prev.bdws); 3139 INIT_LIST_HEAD(&prev.idts); 3140 INIT_LIST_HEAD(&prev.flushes); 3141 3142 list_cut_position(&prev.spas, &acpi_desc->spas, 3143 acpi_desc->spas.prev); 3144 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 3145 acpi_desc->memdevs.prev); 3146 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 3147 acpi_desc->dcrs.prev); 3148 list_cut_position(&prev.bdws, &acpi_desc->bdws, 3149 acpi_desc->bdws.prev); 3150 list_cut_position(&prev.idts, &acpi_desc->idts, 3151 acpi_desc->idts.prev); 3152 list_cut_position(&prev.flushes, &acpi_desc->flushes, 3153 acpi_desc->flushes.prev); 3154 3155 end = data + sz; 3156 while (!IS_ERR_OR_NULL(data)) 3157 data = add_table(acpi_desc, &prev, data, end); 3158 3159 if (IS_ERR(data)) { 3160 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data)); 3161 rc = PTR_ERR(data); 3162 goto out_unlock; 3163 } 3164 3165 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 3166 if (rc) 3167 goto out_unlock; 3168 3169 rc = nfit_mem_init(acpi_desc); 3170 if (rc) 3171 goto out_unlock; 3172 3173 rc = acpi_nfit_register_dimms(acpi_desc); 3174 if (rc) 3175 goto out_unlock; 3176 3177 rc = acpi_nfit_register_regions(acpi_desc); 3178 3179 out_unlock: 3180 mutex_unlock(&acpi_desc->init_mutex); 3181 return rc; 3182 } 3183 EXPORT_SYMBOL_GPL(acpi_nfit_init); 3184 3185 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3186 { 3187 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3188 struct device *dev = acpi_desc->dev; 3189 3190 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3191 device_lock(dev); 3192 device_unlock(dev); 3193 3194 /* Bounce the init_mutex to complete initial registration */ 3195 mutex_lock(&acpi_desc->init_mutex); 3196 mutex_unlock(&acpi_desc->init_mutex); 3197 3198 return 0; 3199 } 3200 3201 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3202 struct nvdimm *nvdimm, unsigned int cmd) 3203 { 3204 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3205 3206 if (nvdimm) 3207 return 0; 3208 if (cmd != ND_CMD_ARS_START) 3209 return 0; 3210 3211 /* 3212 * The kernel and userspace may race to initiate a scrub, but 3213 * the scrub thread is prepared to lose that initial race. It 3214 * just needs guarantees that any ars it initiates are not 3215 * interrupted by any intervening start reqeusts from userspace. 3216 */ 3217 if (work_busy(&acpi_desc->dwork.work)) 3218 return -EBUSY; 3219 3220 return 0; 3221 } 3222 3223 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) 3224 { 3225 struct device *dev = acpi_desc->dev; 3226 int scheduled = 0, busy = 0; 3227 struct nfit_spa *nfit_spa; 3228 3229 mutex_lock(&acpi_desc->init_mutex); 3230 if (acpi_desc->cancel) { 3231 mutex_unlock(&acpi_desc->init_mutex); 3232 return 0; 3233 } 3234 3235 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3236 int type = nfit_spa_type(nfit_spa->spa); 3237 3238 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE) 3239 continue; 3240 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3241 continue; 3242 3243 if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) 3244 busy++; 3245 else { 3246 if (test_bit(ARS_SHORT, &flags)) 3247 set_bit(ARS_SHORT, &nfit_spa->ars_state); 3248 scheduled++; 3249 } 3250 } 3251 if (scheduled) { 3252 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); 3253 dev_dbg(dev, "ars_scan triggered\n"); 3254 } 3255 mutex_unlock(&acpi_desc->init_mutex); 3256 3257 if (scheduled) 3258 return 0; 3259 if (busy) 3260 return -EBUSY; 3261 return -ENOTTY; 3262 } 3263 3264 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 3265 { 3266 struct nvdimm_bus_descriptor *nd_desc; 3267 3268 dev_set_drvdata(dev, acpi_desc); 3269 acpi_desc->dev = dev; 3270 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 3271 nd_desc = &acpi_desc->nd_desc; 3272 nd_desc->provider_name = "ACPI.NFIT"; 3273 nd_desc->module = THIS_MODULE; 3274 nd_desc->ndctl = acpi_nfit_ctl; 3275 nd_desc->flush_probe = acpi_nfit_flush_probe; 3276 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 3277 nd_desc->attr_groups = acpi_nfit_attribute_groups; 3278 3279 INIT_LIST_HEAD(&acpi_desc->spas); 3280 INIT_LIST_HEAD(&acpi_desc->dcrs); 3281 INIT_LIST_HEAD(&acpi_desc->bdws); 3282 INIT_LIST_HEAD(&acpi_desc->idts); 3283 INIT_LIST_HEAD(&acpi_desc->flushes); 3284 INIT_LIST_HEAD(&acpi_desc->memdevs); 3285 INIT_LIST_HEAD(&acpi_desc->dimms); 3286 INIT_LIST_HEAD(&acpi_desc->list); 3287 mutex_init(&acpi_desc->init_mutex); 3288 acpi_desc->scrub_tmo = 1; 3289 INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub); 3290 } 3291 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); 3292 3293 static void acpi_nfit_put_table(void *table) 3294 { 3295 acpi_put_table(table); 3296 } 3297 3298 void acpi_nfit_shutdown(void *data) 3299 { 3300 struct acpi_nfit_desc *acpi_desc = data; 3301 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3302 3303 /* 3304 * Destruct under acpi_desc_lock so that nfit_handle_mce does not 3305 * race teardown 3306 */ 3307 mutex_lock(&acpi_desc_lock); 3308 list_del(&acpi_desc->list); 3309 mutex_unlock(&acpi_desc_lock); 3310 3311 mutex_lock(&acpi_desc->init_mutex); 3312 acpi_desc->cancel = 1; 3313 cancel_delayed_work_sync(&acpi_desc->dwork); 3314 mutex_unlock(&acpi_desc->init_mutex); 3315 3316 /* 3317 * Bounce the nvdimm bus lock to make sure any in-flight 3318 * acpi_nfit_ars_rescan() submissions have had a chance to 3319 * either submit or see ->cancel set. 3320 */ 3321 device_lock(bus_dev); 3322 device_unlock(bus_dev); 3323 3324 flush_workqueue(nfit_wq); 3325 } 3326 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); 3327 3328 static int acpi_nfit_add(struct acpi_device *adev) 3329 { 3330 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3331 struct acpi_nfit_desc *acpi_desc; 3332 struct device *dev = &adev->dev; 3333 struct acpi_table_header *tbl; 3334 acpi_status status = AE_OK; 3335 acpi_size sz; 3336 int rc = 0; 3337 3338 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); 3339 if (ACPI_FAILURE(status)) { 3340 /* This is ok, we could have an nvdimm hotplugged later */ 3341 dev_dbg(dev, "failed to find NFIT at startup\n"); 3342 return 0; 3343 } 3344 3345 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); 3346 if (rc) 3347 return rc; 3348 sz = tbl->length; 3349 3350 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3351 if (!acpi_desc) 3352 return -ENOMEM; 3353 acpi_nfit_desc_init(acpi_desc, &adev->dev); 3354 3355 /* Save the acpi header for exporting the revision via sysfs */ 3356 acpi_desc->acpi_header = *tbl; 3357 3358 /* Evaluate _FIT and override with that if present */ 3359 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 3360 if (ACPI_SUCCESS(status) && buf.length > 0) { 3361 union acpi_object *obj = buf.pointer; 3362 3363 if (obj->type == ACPI_TYPE_BUFFER) 3364 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3365 obj->buffer.length); 3366 else 3367 dev_dbg(dev, "invalid type %d, ignoring _FIT\n", 3368 (int) obj->type); 3369 kfree(buf.pointer); 3370 } else 3371 /* skip over the lead-in header table */ 3372 rc = acpi_nfit_init(acpi_desc, (void *) tbl 3373 + sizeof(struct acpi_table_nfit), 3374 sz - sizeof(struct acpi_table_nfit)); 3375 3376 if (rc) 3377 return rc; 3378 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); 3379 } 3380 3381 static int acpi_nfit_remove(struct acpi_device *adev) 3382 { 3383 /* see acpi_nfit_unregister */ 3384 return 0; 3385 } 3386 3387 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) 3388 { 3389 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3390 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3391 union acpi_object *obj; 3392 acpi_status status; 3393 int ret; 3394 3395 if (!dev->driver) { 3396 /* dev->driver may be null if we're being removed */ 3397 dev_dbg(dev, "no driver found for dev\n"); 3398 return; 3399 } 3400 3401 if (!acpi_desc) { 3402 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3403 if (!acpi_desc) 3404 return; 3405 acpi_nfit_desc_init(acpi_desc, dev); 3406 } else { 3407 /* 3408 * Finish previous registration before considering new 3409 * regions. 3410 */ 3411 flush_workqueue(nfit_wq); 3412 } 3413 3414 /* Evaluate _FIT */ 3415 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); 3416 if (ACPI_FAILURE(status)) { 3417 dev_err(dev, "failed to evaluate _FIT\n"); 3418 return; 3419 } 3420 3421 obj = buf.pointer; 3422 if (obj->type == ACPI_TYPE_BUFFER) { 3423 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3424 obj->buffer.length); 3425 if (ret) 3426 dev_err(dev, "failed to merge updated NFIT\n"); 3427 } else 3428 dev_err(dev, "Invalid _FIT\n"); 3429 kfree(buf.pointer); 3430 } 3431 3432 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) 3433 { 3434 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3435 unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ? 3436 0 : 1 << ARS_SHORT; 3437 3438 acpi_nfit_ars_rescan(acpi_desc, flags); 3439 } 3440 3441 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) 3442 { 3443 dev_dbg(dev, "event: 0x%x\n", event); 3444 3445 switch (event) { 3446 case NFIT_NOTIFY_UPDATE: 3447 return acpi_nfit_update_notify(dev, handle); 3448 case NFIT_NOTIFY_UC_MEMORY_ERROR: 3449 return acpi_nfit_uc_error_notify(dev, handle); 3450 default: 3451 return; 3452 } 3453 } 3454 EXPORT_SYMBOL_GPL(__acpi_nfit_notify); 3455 3456 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 3457 { 3458 device_lock(&adev->dev); 3459 __acpi_nfit_notify(&adev->dev, adev->handle, event); 3460 device_unlock(&adev->dev); 3461 } 3462 3463 static const struct acpi_device_id acpi_nfit_ids[] = { 3464 { "ACPI0012", 0 }, 3465 { "", 0 }, 3466 }; 3467 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 3468 3469 static struct acpi_driver acpi_nfit_driver = { 3470 .name = KBUILD_MODNAME, 3471 .ids = acpi_nfit_ids, 3472 .ops = { 3473 .add = acpi_nfit_add, 3474 .remove = acpi_nfit_remove, 3475 .notify = acpi_nfit_notify, 3476 }, 3477 }; 3478 3479 static __init int nfit_init(void) 3480 { 3481 int ret; 3482 3483 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 3484 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 3485 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 3486 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 3487 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 3488 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 3489 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 3490 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); 3491 3492 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]); 3493 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]); 3494 guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]); 3495 guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]); 3496 guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]); 3497 guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]); 3498 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]); 3499 guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]); 3500 guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]); 3501 guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]); 3502 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 3503 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 3504 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); 3505 3506 nfit_wq = create_singlethread_workqueue("nfit"); 3507 if (!nfit_wq) 3508 return -ENOMEM; 3509 3510 nfit_mce_register(); 3511 ret = acpi_bus_register_driver(&acpi_nfit_driver); 3512 if (ret) { 3513 nfit_mce_unregister(); 3514 destroy_workqueue(nfit_wq); 3515 } 3516 3517 return ret; 3518 3519 } 3520 3521 static __exit void nfit_exit(void) 3522 { 3523 nfit_mce_unregister(); 3524 acpi_bus_unregister_driver(&acpi_nfit_driver); 3525 destroy_workqueue(nfit_wq); 3526 WARN_ON(!list_empty(&acpi_descs)); 3527 } 3528 3529 module_init(nfit_init); 3530 module_exit(nfit_exit); 3531 MODULE_LICENSE("GPL v2"); 3532 MODULE_AUTHOR("Intel Corporation"); 3533