1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/libnvdimm.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/ndctl.h> 18 #include <linux/sysfs.h> 19 #include <linux/delay.h> 20 #include <linux/list.h> 21 #include <linux/acpi.h> 22 #include <linux/sort.h> 23 #include <linux/pmem.h> 24 #include <linux/io.h> 25 #include <linux/nd.h> 26 #include <asm/cacheflush.h> 27 #include "nfit.h" 28 29 /* 30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 31 * irrelevant. 32 */ 33 #include <linux/io-64-nonatomic-hi-lo.h> 34 35 static bool force_enable_dimms; 36 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 37 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 38 39 static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT; 40 module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR); 41 MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds"); 42 43 /* after three payloads of overflow, it's dead jim */ 44 static unsigned int scrub_overflow_abort = 3; 45 module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR); 46 MODULE_PARM_DESC(scrub_overflow_abort, 47 "Number of times we overflow ARS results before abort"); 48 49 static bool disable_vendor_specific; 50 module_param(disable_vendor_specific, bool, S_IRUGO); 51 MODULE_PARM_DESC(disable_vendor_specific, 52 "Limit commands to the publicly specified set\n"); 53 54 LIST_HEAD(acpi_descs); 55 DEFINE_MUTEX(acpi_desc_lock); 56 57 static struct workqueue_struct *nfit_wq; 58 59 struct nfit_table_prev { 60 struct list_head spas; 61 struct list_head memdevs; 62 struct list_head dcrs; 63 struct list_head bdws; 64 struct list_head idts; 65 struct list_head flushes; 66 }; 67 68 static u8 nfit_uuid[NFIT_UUID_MAX][16]; 69 70 const u8 *to_nfit_uuid(enum nfit_uuids id) 71 { 72 return nfit_uuid[id]; 73 } 74 EXPORT_SYMBOL(to_nfit_uuid); 75 76 static struct acpi_nfit_desc *to_acpi_nfit_desc( 77 struct nvdimm_bus_descriptor *nd_desc) 78 { 79 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 80 } 81 82 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 83 { 84 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 85 86 /* 87 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 88 * acpi_device. 89 */ 90 if (!nd_desc->provider_name 91 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 92 return NULL; 93 94 return to_acpi_device(acpi_desc->dev); 95 } 96 97 static int xlat_status(void *buf, unsigned int cmd) 98 { 99 struct nd_cmd_clear_error *clear_err; 100 struct nd_cmd_ars_status *ars_status; 101 struct nd_cmd_ars_start *ars_start; 102 struct nd_cmd_ars_cap *ars_cap; 103 u16 flags; 104 105 switch (cmd) { 106 case ND_CMD_ARS_CAP: 107 ars_cap = buf; 108 if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE) 109 return -ENOTTY; 110 111 /* Command failed */ 112 if (ars_cap->status & 0xffff) 113 return -EIO; 114 115 /* No supported scan types for this range */ 116 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 117 if ((ars_cap->status >> 16 & flags) == 0) 118 return -ENOTTY; 119 break; 120 case ND_CMD_ARS_START: 121 ars_start = buf; 122 /* ARS is in progress */ 123 if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY) 124 return -EBUSY; 125 126 /* Command failed */ 127 if (ars_start->status & 0xffff) 128 return -EIO; 129 break; 130 case ND_CMD_ARS_STATUS: 131 ars_status = buf; 132 /* Command failed */ 133 if (ars_status->status & 0xffff) 134 return -EIO; 135 /* Check extended status (Upper two bytes) */ 136 if (ars_status->status == NFIT_ARS_STATUS_DONE) 137 return 0; 138 139 /* ARS is in progress */ 140 if (ars_status->status == NFIT_ARS_STATUS_BUSY) 141 return -EBUSY; 142 143 /* No ARS performed for the current boot */ 144 if (ars_status->status == NFIT_ARS_STATUS_NONE) 145 return -EAGAIN; 146 147 /* 148 * ARS interrupted, either we overflowed or some other 149 * agent wants the scan to stop. If we didn't overflow 150 * then just continue with the returned results. 151 */ 152 if (ars_status->status == NFIT_ARS_STATUS_INTR) { 153 if (ars_status->flags & NFIT_ARS_F_OVERFLOW) 154 return -ENOSPC; 155 return 0; 156 } 157 158 /* Unknown status */ 159 if (ars_status->status >> 16) 160 return -EIO; 161 break; 162 case ND_CMD_CLEAR_ERROR: 163 clear_err = buf; 164 if (clear_err->status & 0xffff) 165 return -EIO; 166 if (!clear_err->cleared) 167 return -EIO; 168 if (clear_err->length > clear_err->cleared) 169 return clear_err->cleared; 170 break; 171 default: 172 break; 173 } 174 175 return 0; 176 } 177 178 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, 179 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 180 unsigned int buf_len, int *cmd_rc) 181 { 182 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 183 union acpi_object in_obj, in_buf, *out_obj; 184 const struct nd_cmd_desc *desc = NULL; 185 struct device *dev = acpi_desc->dev; 186 struct nd_cmd_pkg *call_pkg = NULL; 187 const char *cmd_name, *dimm_name; 188 unsigned long cmd_mask, dsm_mask; 189 acpi_handle handle; 190 unsigned int func; 191 const u8 *uuid; 192 u32 offset; 193 int rc, i; 194 195 func = cmd; 196 if (cmd == ND_CMD_CALL) { 197 call_pkg = buf; 198 func = call_pkg->nd_command; 199 } 200 201 if (nvdimm) { 202 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 203 struct acpi_device *adev = nfit_mem->adev; 204 205 if (!adev) 206 return -ENOTTY; 207 if (call_pkg && nfit_mem->family != call_pkg->nd_family) 208 return -ENOTTY; 209 210 dimm_name = nvdimm_name(nvdimm); 211 cmd_name = nvdimm_cmd_name(cmd); 212 cmd_mask = nvdimm_cmd_mask(nvdimm); 213 dsm_mask = nfit_mem->dsm_mask; 214 desc = nd_cmd_dimm_desc(cmd); 215 uuid = to_nfit_uuid(nfit_mem->family); 216 handle = adev->handle; 217 } else { 218 struct acpi_device *adev = to_acpi_dev(acpi_desc); 219 220 cmd_name = nvdimm_bus_cmd_name(cmd); 221 cmd_mask = nd_desc->cmd_mask; 222 dsm_mask = cmd_mask; 223 desc = nd_cmd_bus_desc(cmd); 224 uuid = to_nfit_uuid(NFIT_DEV_BUS); 225 handle = adev->handle; 226 dimm_name = "bus"; 227 } 228 229 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 230 return -ENOTTY; 231 232 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) 233 return -ENOTTY; 234 235 in_obj.type = ACPI_TYPE_PACKAGE; 236 in_obj.package.count = 1; 237 in_obj.package.elements = &in_buf; 238 in_buf.type = ACPI_TYPE_BUFFER; 239 in_buf.buffer.pointer = buf; 240 in_buf.buffer.length = 0; 241 242 /* libnvdimm has already validated the input envelope */ 243 for (i = 0; i < desc->in_num; i++) 244 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 245 i, buf); 246 247 if (call_pkg) { 248 /* skip over package wrapper */ 249 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; 250 in_buf.buffer.length = call_pkg->nd_size_in; 251 } 252 253 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { 254 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n", 255 __func__, dimm_name, cmd, func, 256 in_buf.buffer.length); 257 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, 258 in_buf.buffer.pointer, 259 min_t(u32, 256, in_buf.buffer.length), true); 260 } 261 262 out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj); 263 if (!out_obj) { 264 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, 265 cmd_name); 266 return -EINVAL; 267 } 268 269 if (call_pkg) { 270 call_pkg->nd_fw_size = out_obj->buffer.length; 271 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, 272 out_obj->buffer.pointer, 273 min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); 274 275 ACPI_FREE(out_obj); 276 /* 277 * Need to support FW function w/o known size in advance. 278 * Caller can determine required size based upon nd_fw_size. 279 * If we return an error (like elsewhere) then caller wouldn't 280 * be able to rely upon data returned to make calculation. 281 */ 282 return 0; 283 } 284 285 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 286 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", 287 __func__, dimm_name, cmd_name, out_obj->type); 288 rc = -EINVAL; 289 goto out; 290 } 291 292 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { 293 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, 294 dimm_name, cmd_name, out_obj->buffer.length); 295 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 296 4, out_obj->buffer.pointer, min_t(u32, 128, 297 out_obj->buffer.length), true); 298 } 299 300 for (i = 0, offset = 0; i < desc->out_num; i++) { 301 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 302 (u32 *) out_obj->buffer.pointer); 303 304 if (offset + out_size > out_obj->buffer.length) { 305 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", 306 __func__, dimm_name, cmd_name, i); 307 break; 308 } 309 310 if (in_buf.buffer.length + offset + out_size > buf_len) { 311 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", 312 __func__, dimm_name, cmd_name, i); 313 rc = -ENXIO; 314 goto out; 315 } 316 memcpy(buf + in_buf.buffer.length + offset, 317 out_obj->buffer.pointer + offset, out_size); 318 offset += out_size; 319 } 320 if (offset + in_buf.buffer.length < buf_len) { 321 if (i >= 1) { 322 /* 323 * status valid, return the number of bytes left 324 * unfilled in the output buffer 325 */ 326 rc = buf_len - offset - in_buf.buffer.length; 327 if (cmd_rc) 328 *cmd_rc = xlat_status(buf, cmd); 329 } else { 330 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 331 __func__, dimm_name, cmd_name, buf_len, 332 offset); 333 rc = -ENXIO; 334 } 335 } else { 336 rc = 0; 337 if (cmd_rc) 338 *cmd_rc = xlat_status(buf, cmd); 339 } 340 341 out: 342 ACPI_FREE(out_obj); 343 344 return rc; 345 } 346 347 static const char *spa_type_name(u16 type) 348 { 349 static const char *to_name[] = { 350 [NFIT_SPA_VOLATILE] = "volatile", 351 [NFIT_SPA_PM] = "pmem", 352 [NFIT_SPA_DCR] = "dimm-control-region", 353 [NFIT_SPA_BDW] = "block-data-window", 354 [NFIT_SPA_VDISK] = "volatile-disk", 355 [NFIT_SPA_VCD] = "volatile-cd", 356 [NFIT_SPA_PDISK] = "persistent-disk", 357 [NFIT_SPA_PCD] = "persistent-cd", 358 359 }; 360 361 if (type > NFIT_SPA_PCD) 362 return "unknown"; 363 364 return to_name[type]; 365 } 366 367 int nfit_spa_type(struct acpi_nfit_system_address *spa) 368 { 369 int i; 370 371 for (i = 0; i < NFIT_UUID_MAX; i++) 372 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0) 373 return i; 374 return -1; 375 } 376 377 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 378 struct nfit_table_prev *prev, 379 struct acpi_nfit_system_address *spa) 380 { 381 struct device *dev = acpi_desc->dev; 382 struct nfit_spa *nfit_spa; 383 384 if (spa->header.length != sizeof(*spa)) 385 return false; 386 387 list_for_each_entry(nfit_spa, &prev->spas, list) { 388 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 389 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 390 return true; 391 } 392 } 393 394 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), 395 GFP_KERNEL); 396 if (!nfit_spa) 397 return false; 398 INIT_LIST_HEAD(&nfit_spa->list); 399 memcpy(nfit_spa->spa, spa, sizeof(*spa)); 400 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 401 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, 402 spa->range_index, 403 spa_type_name(nfit_spa_type(spa))); 404 return true; 405 } 406 407 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 408 struct nfit_table_prev *prev, 409 struct acpi_nfit_memory_map *memdev) 410 { 411 struct device *dev = acpi_desc->dev; 412 struct nfit_memdev *nfit_memdev; 413 414 if (memdev->header.length != sizeof(*memdev)) 415 return false; 416 417 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 418 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 419 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 420 return true; 421 } 422 423 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), 424 GFP_KERNEL); 425 if (!nfit_memdev) 426 return false; 427 INIT_LIST_HEAD(&nfit_memdev->list); 428 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); 429 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 430 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", 431 __func__, memdev->device_handle, memdev->range_index, 432 memdev->region_index); 433 return true; 434 } 435 436 /* 437 * An implementation may provide a truncated control region if no block windows 438 * are defined. 439 */ 440 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) 441 { 442 if (dcr->header.length < offsetof(struct acpi_nfit_control_region, 443 window_size)) 444 return 0; 445 if (dcr->windows) 446 return sizeof(*dcr); 447 return offsetof(struct acpi_nfit_control_region, window_size); 448 } 449 450 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 451 struct nfit_table_prev *prev, 452 struct acpi_nfit_control_region *dcr) 453 { 454 struct device *dev = acpi_desc->dev; 455 struct nfit_dcr *nfit_dcr; 456 457 if (!sizeof_dcr(dcr)) 458 return false; 459 460 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 461 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { 462 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 463 return true; 464 } 465 466 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), 467 GFP_KERNEL); 468 if (!nfit_dcr) 469 return false; 470 INIT_LIST_HEAD(&nfit_dcr->list); 471 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); 472 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 473 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, 474 dcr->region_index, dcr->windows); 475 return true; 476 } 477 478 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 479 struct nfit_table_prev *prev, 480 struct acpi_nfit_data_region *bdw) 481 { 482 struct device *dev = acpi_desc->dev; 483 struct nfit_bdw *nfit_bdw; 484 485 if (bdw->header.length != sizeof(*bdw)) 486 return false; 487 list_for_each_entry(nfit_bdw, &prev->bdws, list) 488 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 489 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 490 return true; 491 } 492 493 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), 494 GFP_KERNEL); 495 if (!nfit_bdw) 496 return false; 497 INIT_LIST_HEAD(&nfit_bdw->list); 498 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); 499 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 500 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, 501 bdw->region_index, bdw->windows); 502 return true; 503 } 504 505 static size_t sizeof_idt(struct acpi_nfit_interleave *idt) 506 { 507 if (idt->header.length < sizeof(*idt)) 508 return 0; 509 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); 510 } 511 512 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 513 struct nfit_table_prev *prev, 514 struct acpi_nfit_interleave *idt) 515 { 516 struct device *dev = acpi_desc->dev; 517 struct nfit_idt *nfit_idt; 518 519 if (!sizeof_idt(idt)) 520 return false; 521 522 list_for_each_entry(nfit_idt, &prev->idts, list) { 523 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) 524 continue; 525 526 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { 527 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 528 return true; 529 } 530 } 531 532 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), 533 GFP_KERNEL); 534 if (!nfit_idt) 535 return false; 536 INIT_LIST_HEAD(&nfit_idt->list); 537 memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); 538 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 539 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, 540 idt->interleave_index, idt->line_count); 541 return true; 542 } 543 544 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) 545 { 546 if (flush->header.length < sizeof(*flush)) 547 return 0; 548 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); 549 } 550 551 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 552 struct nfit_table_prev *prev, 553 struct acpi_nfit_flush_address *flush) 554 { 555 struct device *dev = acpi_desc->dev; 556 struct nfit_flush *nfit_flush; 557 558 if (!sizeof_flush(flush)) 559 return false; 560 561 list_for_each_entry(nfit_flush, &prev->flushes, list) { 562 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) 563 continue; 564 565 if (memcmp(nfit_flush->flush, flush, 566 sizeof_flush(flush)) == 0) { 567 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 568 return true; 569 } 570 } 571 572 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) 573 + sizeof_flush(flush), GFP_KERNEL); 574 if (!nfit_flush) 575 return false; 576 INIT_LIST_HEAD(&nfit_flush->list); 577 memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); 578 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 579 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, 580 flush->device_handle, flush->hint_count); 581 return true; 582 } 583 584 static void *add_table(struct acpi_nfit_desc *acpi_desc, 585 struct nfit_table_prev *prev, void *table, const void *end) 586 { 587 struct device *dev = acpi_desc->dev; 588 struct acpi_nfit_header *hdr; 589 void *err = ERR_PTR(-ENOMEM); 590 591 if (table >= end) 592 return NULL; 593 594 hdr = table; 595 if (!hdr->length) { 596 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 597 hdr->type); 598 return NULL; 599 } 600 601 switch (hdr->type) { 602 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 603 if (!add_spa(acpi_desc, prev, table)) 604 return err; 605 break; 606 case ACPI_NFIT_TYPE_MEMORY_MAP: 607 if (!add_memdev(acpi_desc, prev, table)) 608 return err; 609 break; 610 case ACPI_NFIT_TYPE_CONTROL_REGION: 611 if (!add_dcr(acpi_desc, prev, table)) 612 return err; 613 break; 614 case ACPI_NFIT_TYPE_DATA_REGION: 615 if (!add_bdw(acpi_desc, prev, table)) 616 return err; 617 break; 618 case ACPI_NFIT_TYPE_INTERLEAVE: 619 if (!add_idt(acpi_desc, prev, table)) 620 return err; 621 break; 622 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 623 if (!add_flush(acpi_desc, prev, table)) 624 return err; 625 break; 626 case ACPI_NFIT_TYPE_SMBIOS: 627 dev_dbg(dev, "%s: smbios\n", __func__); 628 break; 629 default: 630 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 631 break; 632 } 633 634 return table + hdr->length; 635 } 636 637 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 638 struct nfit_mem *nfit_mem) 639 { 640 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 641 u16 dcr = nfit_mem->dcr->region_index; 642 struct nfit_spa *nfit_spa; 643 644 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 645 u16 range_index = nfit_spa->spa->range_index; 646 int type = nfit_spa_type(nfit_spa->spa); 647 struct nfit_memdev *nfit_memdev; 648 649 if (type != NFIT_SPA_BDW) 650 continue; 651 652 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 653 if (nfit_memdev->memdev->range_index != range_index) 654 continue; 655 if (nfit_memdev->memdev->device_handle != device_handle) 656 continue; 657 if (nfit_memdev->memdev->region_index != dcr) 658 continue; 659 660 nfit_mem->spa_bdw = nfit_spa->spa; 661 return; 662 } 663 } 664 665 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 666 nfit_mem->spa_dcr->range_index); 667 nfit_mem->bdw = NULL; 668 } 669 670 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, 671 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 672 { 673 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 674 struct nfit_memdev *nfit_memdev; 675 struct nfit_bdw *nfit_bdw; 676 struct nfit_idt *nfit_idt; 677 u16 idt_idx, range_index; 678 679 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 680 if (nfit_bdw->bdw->region_index != dcr) 681 continue; 682 nfit_mem->bdw = nfit_bdw->bdw; 683 break; 684 } 685 686 if (!nfit_mem->bdw) 687 return; 688 689 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 690 691 if (!nfit_mem->spa_bdw) 692 return; 693 694 range_index = nfit_mem->spa_bdw->range_index; 695 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 696 if (nfit_memdev->memdev->range_index != range_index || 697 nfit_memdev->memdev->region_index != dcr) 698 continue; 699 nfit_mem->memdev_bdw = nfit_memdev->memdev; 700 idt_idx = nfit_memdev->memdev->interleave_index; 701 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 702 if (nfit_idt->idt->interleave_index != idt_idx) 703 continue; 704 nfit_mem->idt_bdw = nfit_idt->idt; 705 break; 706 } 707 break; 708 } 709 } 710 711 static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, 712 struct acpi_nfit_system_address *spa) 713 { 714 struct nfit_mem *nfit_mem, *found; 715 struct nfit_memdev *nfit_memdev; 716 int type = nfit_spa_type(spa); 717 718 switch (type) { 719 case NFIT_SPA_DCR: 720 case NFIT_SPA_PM: 721 break; 722 default: 723 return 0; 724 } 725 726 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 727 struct nfit_flush *nfit_flush; 728 struct nfit_dcr *nfit_dcr; 729 u32 device_handle; 730 u16 dcr; 731 732 if (nfit_memdev->memdev->range_index != spa->range_index) 733 continue; 734 found = NULL; 735 dcr = nfit_memdev->memdev->region_index; 736 device_handle = nfit_memdev->memdev->device_handle; 737 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 738 if (__to_nfit_memdev(nfit_mem)->device_handle 739 == device_handle) { 740 found = nfit_mem; 741 break; 742 } 743 744 if (found) 745 nfit_mem = found; 746 else { 747 nfit_mem = devm_kzalloc(acpi_desc->dev, 748 sizeof(*nfit_mem), GFP_KERNEL); 749 if (!nfit_mem) 750 return -ENOMEM; 751 INIT_LIST_HEAD(&nfit_mem->list); 752 nfit_mem->acpi_desc = acpi_desc; 753 list_add(&nfit_mem->list, &acpi_desc->dimms); 754 } 755 756 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 757 if (nfit_dcr->dcr->region_index != dcr) 758 continue; 759 /* 760 * Record the control region for the dimm. For 761 * the ACPI 6.1 case, where there are separate 762 * control regions for the pmem vs blk 763 * interfaces, be sure to record the extended 764 * blk details. 765 */ 766 if (!nfit_mem->dcr) 767 nfit_mem->dcr = nfit_dcr->dcr; 768 else if (nfit_mem->dcr->windows == 0 769 && nfit_dcr->dcr->windows) 770 nfit_mem->dcr = nfit_dcr->dcr; 771 break; 772 } 773 774 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 775 struct acpi_nfit_flush_address *flush; 776 u16 i; 777 778 if (nfit_flush->flush->device_handle != device_handle) 779 continue; 780 nfit_mem->nfit_flush = nfit_flush; 781 flush = nfit_flush->flush; 782 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, 783 flush->hint_count 784 * sizeof(struct resource), GFP_KERNEL); 785 if (!nfit_mem->flush_wpq) 786 return -ENOMEM; 787 for (i = 0; i < flush->hint_count; i++) { 788 struct resource *res = &nfit_mem->flush_wpq[i]; 789 790 res->start = flush->hint_address[i]; 791 res->end = res->start + 8 - 1; 792 } 793 break; 794 } 795 796 if (dcr && !nfit_mem->dcr) { 797 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 798 spa->range_index, dcr); 799 return -ENODEV; 800 } 801 802 if (type == NFIT_SPA_DCR) { 803 struct nfit_idt *nfit_idt; 804 u16 idt_idx; 805 806 /* multiple dimms may share a SPA when interleaved */ 807 nfit_mem->spa_dcr = spa; 808 nfit_mem->memdev_dcr = nfit_memdev->memdev; 809 idt_idx = nfit_memdev->memdev->interleave_index; 810 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 811 if (nfit_idt->idt->interleave_index != idt_idx) 812 continue; 813 nfit_mem->idt_dcr = nfit_idt->idt; 814 break; 815 } 816 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); 817 } else { 818 /* 819 * A single dimm may belong to multiple SPA-PM 820 * ranges, record at least one in addition to 821 * any SPA-DCR range. 822 */ 823 nfit_mem->memdev_pmem = nfit_memdev->memdev; 824 } 825 } 826 827 return 0; 828 } 829 830 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 831 { 832 struct nfit_mem *a = container_of(_a, typeof(*a), list); 833 struct nfit_mem *b = container_of(_b, typeof(*b), list); 834 u32 handleA, handleB; 835 836 handleA = __to_nfit_memdev(a)->device_handle; 837 handleB = __to_nfit_memdev(b)->device_handle; 838 if (handleA < handleB) 839 return -1; 840 else if (handleA > handleB) 841 return 1; 842 return 0; 843 } 844 845 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 846 { 847 struct nfit_spa *nfit_spa; 848 849 /* 850 * For each SPA-DCR or SPA-PMEM address range find its 851 * corresponding MEMDEV(s). From each MEMDEV find the 852 * corresponding DCR. Then, if we're operating on a SPA-DCR, 853 * try to find a SPA-BDW and a corresponding BDW that references 854 * the DCR. Throw it all into an nfit_mem object. Note, that 855 * BDWs are optional. 856 */ 857 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 858 int rc; 859 860 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa); 861 if (rc) 862 return rc; 863 } 864 865 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 866 867 return 0; 868 } 869 870 static ssize_t revision_show(struct device *dev, 871 struct device_attribute *attr, char *buf) 872 { 873 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 874 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 875 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 876 877 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 878 } 879 static DEVICE_ATTR_RO(revision); 880 881 /* 882 * This shows the number of full Address Range Scrubs that have been 883 * completed since driver load time. Userspace can wait on this using 884 * select/poll etc. A '+' at the end indicates an ARS is in progress 885 */ 886 static ssize_t scrub_show(struct device *dev, 887 struct device_attribute *attr, char *buf) 888 { 889 struct nvdimm_bus_descriptor *nd_desc; 890 ssize_t rc = -ENXIO; 891 892 device_lock(dev); 893 nd_desc = dev_get_drvdata(dev); 894 if (nd_desc) { 895 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 896 897 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 898 (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); 899 } 900 device_unlock(dev); 901 return rc; 902 } 903 904 static ssize_t scrub_store(struct device *dev, 905 struct device_attribute *attr, const char *buf, size_t size) 906 { 907 struct nvdimm_bus_descriptor *nd_desc; 908 ssize_t rc; 909 long val; 910 911 rc = kstrtol(buf, 0, &val); 912 if (rc) 913 return rc; 914 if (val != 1) 915 return -EINVAL; 916 917 device_lock(dev); 918 nd_desc = dev_get_drvdata(dev); 919 if (nd_desc) { 920 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 921 922 rc = acpi_nfit_ars_rescan(acpi_desc); 923 } 924 device_unlock(dev); 925 if (rc) 926 return rc; 927 return size; 928 } 929 static DEVICE_ATTR_RW(scrub); 930 931 static bool ars_supported(struct nvdimm_bus *nvdimm_bus) 932 { 933 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 934 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START 935 | 1 << ND_CMD_ARS_STATUS; 936 937 return (nd_desc->cmd_mask & mask) == mask; 938 } 939 940 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) 941 { 942 struct device *dev = container_of(kobj, struct device, kobj); 943 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 944 945 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) 946 return 0; 947 return a->mode; 948 } 949 950 static struct attribute *acpi_nfit_attributes[] = { 951 &dev_attr_revision.attr, 952 &dev_attr_scrub.attr, 953 NULL, 954 }; 955 956 static struct attribute_group acpi_nfit_attribute_group = { 957 .name = "nfit", 958 .attrs = acpi_nfit_attributes, 959 .is_visible = nfit_visible, 960 }; 961 962 static const struct attribute_group *acpi_nfit_attribute_groups[] = { 963 &nvdimm_bus_attribute_group, 964 &acpi_nfit_attribute_group, 965 NULL, 966 }; 967 968 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 969 { 970 struct nvdimm *nvdimm = to_nvdimm(dev); 971 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 972 973 return __to_nfit_memdev(nfit_mem); 974 } 975 976 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 977 { 978 struct nvdimm *nvdimm = to_nvdimm(dev); 979 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 980 981 return nfit_mem->dcr; 982 } 983 984 static ssize_t handle_show(struct device *dev, 985 struct device_attribute *attr, char *buf) 986 { 987 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 988 989 return sprintf(buf, "%#x\n", memdev->device_handle); 990 } 991 static DEVICE_ATTR_RO(handle); 992 993 static ssize_t phys_id_show(struct device *dev, 994 struct device_attribute *attr, char *buf) 995 { 996 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 997 998 return sprintf(buf, "%#x\n", memdev->physical_id); 999 } 1000 static DEVICE_ATTR_RO(phys_id); 1001 1002 static ssize_t vendor_show(struct device *dev, 1003 struct device_attribute *attr, char *buf) 1004 { 1005 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1006 1007 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1008 } 1009 static DEVICE_ATTR_RO(vendor); 1010 1011 static ssize_t rev_id_show(struct device *dev, 1012 struct device_attribute *attr, char *buf) 1013 { 1014 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1015 1016 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1017 } 1018 static DEVICE_ATTR_RO(rev_id); 1019 1020 static ssize_t device_show(struct device *dev, 1021 struct device_attribute *attr, char *buf) 1022 { 1023 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1024 1025 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1026 } 1027 static DEVICE_ATTR_RO(device); 1028 1029 static ssize_t subsystem_vendor_show(struct device *dev, 1030 struct device_attribute *attr, char *buf) 1031 { 1032 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1033 1034 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1035 } 1036 static DEVICE_ATTR_RO(subsystem_vendor); 1037 1038 static ssize_t subsystem_rev_id_show(struct device *dev, 1039 struct device_attribute *attr, char *buf) 1040 { 1041 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1042 1043 return sprintf(buf, "0x%04x\n", 1044 be16_to_cpu(dcr->subsystem_revision_id)); 1045 } 1046 static DEVICE_ATTR_RO(subsystem_rev_id); 1047 1048 static ssize_t subsystem_device_show(struct device *dev, 1049 struct device_attribute *attr, char *buf) 1050 { 1051 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1052 1053 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1054 } 1055 static DEVICE_ATTR_RO(subsystem_device); 1056 1057 static int num_nvdimm_formats(struct nvdimm *nvdimm) 1058 { 1059 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1060 int formats = 0; 1061 1062 if (nfit_mem->memdev_pmem) 1063 formats++; 1064 if (nfit_mem->memdev_bdw) 1065 formats++; 1066 return formats; 1067 } 1068 1069 static ssize_t format_show(struct device *dev, 1070 struct device_attribute *attr, char *buf) 1071 { 1072 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1073 1074 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1075 } 1076 static DEVICE_ATTR_RO(format); 1077 1078 static ssize_t format1_show(struct device *dev, 1079 struct device_attribute *attr, char *buf) 1080 { 1081 u32 handle; 1082 ssize_t rc = -ENXIO; 1083 struct nfit_mem *nfit_mem; 1084 struct nfit_memdev *nfit_memdev; 1085 struct acpi_nfit_desc *acpi_desc; 1086 struct nvdimm *nvdimm = to_nvdimm(dev); 1087 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1088 1089 nfit_mem = nvdimm_provider_data(nvdimm); 1090 acpi_desc = nfit_mem->acpi_desc; 1091 handle = to_nfit_memdev(dev)->device_handle; 1092 1093 /* assumes DIMMs have at most 2 published interface codes */ 1094 mutex_lock(&acpi_desc->init_mutex); 1095 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1096 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1097 struct nfit_dcr *nfit_dcr; 1098 1099 if (memdev->device_handle != handle) 1100 continue; 1101 1102 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1103 if (nfit_dcr->dcr->region_index != memdev->region_index) 1104 continue; 1105 if (nfit_dcr->dcr->code == dcr->code) 1106 continue; 1107 rc = sprintf(buf, "0x%04x\n", 1108 le16_to_cpu(nfit_dcr->dcr->code)); 1109 break; 1110 } 1111 if (rc != ENXIO) 1112 break; 1113 } 1114 mutex_unlock(&acpi_desc->init_mutex); 1115 return rc; 1116 } 1117 static DEVICE_ATTR_RO(format1); 1118 1119 static ssize_t formats_show(struct device *dev, 1120 struct device_attribute *attr, char *buf) 1121 { 1122 struct nvdimm *nvdimm = to_nvdimm(dev); 1123 1124 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1125 } 1126 static DEVICE_ATTR_RO(formats); 1127 1128 static ssize_t serial_show(struct device *dev, 1129 struct device_attribute *attr, char *buf) 1130 { 1131 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1132 1133 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1134 } 1135 static DEVICE_ATTR_RO(serial); 1136 1137 static ssize_t family_show(struct device *dev, 1138 struct device_attribute *attr, char *buf) 1139 { 1140 struct nvdimm *nvdimm = to_nvdimm(dev); 1141 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1142 1143 if (nfit_mem->family < 0) 1144 return -ENXIO; 1145 return sprintf(buf, "%d\n", nfit_mem->family); 1146 } 1147 static DEVICE_ATTR_RO(family); 1148 1149 static ssize_t dsm_mask_show(struct device *dev, 1150 struct device_attribute *attr, char *buf) 1151 { 1152 struct nvdimm *nvdimm = to_nvdimm(dev); 1153 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1154 1155 if (nfit_mem->family < 0) 1156 return -ENXIO; 1157 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); 1158 } 1159 static DEVICE_ATTR_RO(dsm_mask); 1160 1161 static ssize_t flags_show(struct device *dev, 1162 struct device_attribute *attr, char *buf) 1163 { 1164 u16 flags = to_nfit_memdev(dev)->flags; 1165 1166 return sprintf(buf, "%s%s%s%s%s\n", 1167 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 1168 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 1169 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 1170 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 1171 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : ""); 1172 } 1173 static DEVICE_ATTR_RO(flags); 1174 1175 static ssize_t id_show(struct device *dev, 1176 struct device_attribute *attr, char *buf) 1177 { 1178 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1179 1180 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) 1181 return sprintf(buf, "%04x-%02x-%04x-%08x\n", 1182 be16_to_cpu(dcr->vendor_id), 1183 dcr->manufacturing_location, 1184 be16_to_cpu(dcr->manufacturing_date), 1185 be32_to_cpu(dcr->serial_number)); 1186 else 1187 return sprintf(buf, "%04x-%08x\n", 1188 be16_to_cpu(dcr->vendor_id), 1189 be32_to_cpu(dcr->serial_number)); 1190 } 1191 static DEVICE_ATTR_RO(id); 1192 1193 static struct attribute *acpi_nfit_dimm_attributes[] = { 1194 &dev_attr_handle.attr, 1195 &dev_attr_phys_id.attr, 1196 &dev_attr_vendor.attr, 1197 &dev_attr_device.attr, 1198 &dev_attr_rev_id.attr, 1199 &dev_attr_subsystem_vendor.attr, 1200 &dev_attr_subsystem_device.attr, 1201 &dev_attr_subsystem_rev_id.attr, 1202 &dev_attr_format.attr, 1203 &dev_attr_formats.attr, 1204 &dev_attr_format1.attr, 1205 &dev_attr_serial.attr, 1206 &dev_attr_flags.attr, 1207 &dev_attr_id.attr, 1208 &dev_attr_family.attr, 1209 &dev_attr_dsm_mask.attr, 1210 NULL, 1211 }; 1212 1213 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 1214 struct attribute *a, int n) 1215 { 1216 struct device *dev = container_of(kobj, struct device, kobj); 1217 struct nvdimm *nvdimm = to_nvdimm(dev); 1218 1219 if (!to_nfit_dcr(dev)) 1220 return 0; 1221 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) 1222 return 0; 1223 return a->mode; 1224 } 1225 1226 static struct attribute_group acpi_nfit_dimm_attribute_group = { 1227 .name = "nfit", 1228 .attrs = acpi_nfit_dimm_attributes, 1229 .is_visible = acpi_nfit_dimm_attr_visible, 1230 }; 1231 1232 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 1233 &nvdimm_attribute_group, 1234 &nd_device_attribute_group, 1235 &acpi_nfit_dimm_attribute_group, 1236 NULL, 1237 }; 1238 1239 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 1240 u32 device_handle) 1241 { 1242 struct nfit_mem *nfit_mem; 1243 1244 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1245 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 1246 return nfit_mem->nvdimm; 1247 1248 return NULL; 1249 } 1250 1251 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1252 struct nfit_mem *nfit_mem, u32 device_handle) 1253 { 1254 struct acpi_device *adev, *adev_dimm; 1255 struct device *dev = acpi_desc->dev; 1256 unsigned long dsm_mask; 1257 const u8 *uuid; 1258 int i; 1259 1260 /* nfit test assumes 1:1 relationship between commands and dsms */ 1261 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; 1262 nfit_mem->family = NVDIMM_FAMILY_INTEL; 1263 adev = to_acpi_dev(acpi_desc); 1264 if (!adev) 1265 return 0; 1266 1267 adev_dimm = acpi_find_child_device(adev, device_handle, false); 1268 nfit_mem->adev = adev_dimm; 1269 if (!adev_dimm) { 1270 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 1271 device_handle); 1272 return force_enable_dimms ? 0 : -ENODEV; 1273 } 1274 1275 /* 1276 * Until standardization materializes we need to consider 4 1277 * different command sets. Note, that checking for function0 (bit0) 1278 * tells us if any commands are reachable through this uuid. 1279 */ 1280 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++) 1281 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1282 break; 1283 1284 /* limit the supported commands to those that are publicly documented */ 1285 nfit_mem->family = i; 1286 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1287 dsm_mask = 0x3fe; 1288 if (disable_vendor_specific) 1289 dsm_mask &= ~(1 << ND_CMD_VENDOR); 1290 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { 1291 dsm_mask = 0x1c3c76; 1292 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { 1293 dsm_mask = 0x1fe; 1294 if (disable_vendor_specific) 1295 dsm_mask &= ~(1 << 8); 1296 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { 1297 dsm_mask = 0xffffffff; 1298 } else { 1299 dev_dbg(dev, "unknown dimm command family\n"); 1300 nfit_mem->family = -1; 1301 /* DSMs are optional, continue loading the driver... */ 1302 return 0; 1303 } 1304 1305 uuid = to_nfit_uuid(nfit_mem->family); 1306 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1307 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) 1308 set_bit(i, &nfit_mem->dsm_mask); 1309 1310 return 0; 1311 } 1312 1313 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 1314 { 1315 struct nfit_mem *nfit_mem; 1316 int dimm_count = 0; 1317 1318 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1319 struct acpi_nfit_flush_address *flush; 1320 unsigned long flags = 0, cmd_mask; 1321 struct nvdimm *nvdimm; 1322 u32 device_handle; 1323 u16 mem_flags; 1324 int rc; 1325 1326 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 1327 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 1328 if (nvdimm) { 1329 dimm_count++; 1330 continue; 1331 } 1332 1333 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 1334 flags |= NDD_ALIASING; 1335 1336 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 1337 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 1338 flags |= NDD_UNARMED; 1339 1340 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 1341 if (rc) 1342 continue; 1343 1344 /* 1345 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL 1346 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the 1347 * userspace interface. 1348 */ 1349 cmd_mask = 1UL << ND_CMD_CALL; 1350 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) 1351 cmd_mask |= nfit_mem->dsm_mask; 1352 1353 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush 1354 : NULL; 1355 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 1356 acpi_nfit_dimm_attribute_groups, 1357 flags, cmd_mask, flush ? flush->hint_count : 0, 1358 nfit_mem->flush_wpq); 1359 if (!nvdimm) 1360 return -ENOMEM; 1361 1362 nfit_mem->nvdimm = nvdimm; 1363 dimm_count++; 1364 1365 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 1366 continue; 1367 1368 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n", 1369 nvdimm_name(nvdimm), 1370 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 1371 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 1372 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 1373 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : ""); 1374 1375 } 1376 1377 return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 1378 } 1379 1380 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 1381 { 1382 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1383 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS); 1384 struct acpi_device *adev; 1385 int i; 1386 1387 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; 1388 adev = to_acpi_dev(acpi_desc); 1389 if (!adev) 1390 return; 1391 1392 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) 1393 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) 1394 set_bit(i, &nd_desc->cmd_mask); 1395 } 1396 1397 static ssize_t range_index_show(struct device *dev, 1398 struct device_attribute *attr, char *buf) 1399 { 1400 struct nd_region *nd_region = to_nd_region(dev); 1401 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 1402 1403 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 1404 } 1405 static DEVICE_ATTR_RO(range_index); 1406 1407 static struct attribute *acpi_nfit_region_attributes[] = { 1408 &dev_attr_range_index.attr, 1409 NULL, 1410 }; 1411 1412 static struct attribute_group acpi_nfit_region_attribute_group = { 1413 .name = "nfit", 1414 .attrs = acpi_nfit_region_attributes, 1415 }; 1416 1417 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 1418 &nd_region_attribute_group, 1419 &nd_mapping_attribute_group, 1420 &nd_device_attribute_group, 1421 &nd_numa_attribute_group, 1422 &acpi_nfit_region_attribute_group, 1423 NULL, 1424 }; 1425 1426 /* enough info to uniquely specify an interleave set */ 1427 struct nfit_set_info { 1428 struct nfit_set_info_map { 1429 u64 region_offset; 1430 u32 serial_number; 1431 u32 pad; 1432 } mapping[0]; 1433 }; 1434 1435 static size_t sizeof_nfit_set_info(int num_mappings) 1436 { 1437 return sizeof(struct nfit_set_info) 1438 + num_mappings * sizeof(struct nfit_set_info_map); 1439 } 1440 1441 static int cmp_map(const void *m0, const void *m1) 1442 { 1443 const struct nfit_set_info_map *map0 = m0; 1444 const struct nfit_set_info_map *map1 = m1; 1445 1446 return memcmp(&map0->region_offset, &map1->region_offset, 1447 sizeof(u64)); 1448 } 1449 1450 /* Retrieve the nth entry referencing this spa */ 1451 static struct acpi_nfit_memory_map *memdev_from_spa( 1452 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 1453 { 1454 struct nfit_memdev *nfit_memdev; 1455 1456 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 1457 if (nfit_memdev->memdev->range_index == range_index) 1458 if (n-- == 0) 1459 return nfit_memdev->memdev; 1460 return NULL; 1461 } 1462 1463 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 1464 struct nd_region_desc *ndr_desc, 1465 struct acpi_nfit_system_address *spa) 1466 { 1467 int i, spa_type = nfit_spa_type(spa); 1468 struct device *dev = acpi_desc->dev; 1469 struct nd_interleave_set *nd_set; 1470 u16 nr = ndr_desc->num_mappings; 1471 struct nfit_set_info *info; 1472 1473 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE) 1474 /* pass */; 1475 else 1476 return 0; 1477 1478 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 1479 if (!nd_set) 1480 return -ENOMEM; 1481 1482 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 1483 if (!info) 1484 return -ENOMEM; 1485 for (i = 0; i < nr; i++) { 1486 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; 1487 struct nfit_set_info_map *map = &info->mapping[i]; 1488 struct nvdimm *nvdimm = nd_mapping->nvdimm; 1489 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1490 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 1491 spa->range_index, i); 1492 1493 if (!memdev || !nfit_mem->dcr) { 1494 dev_err(dev, "%s: failed to find DCR\n", __func__); 1495 return -ENODEV; 1496 } 1497 1498 map->region_offset = memdev->region_offset; 1499 map->serial_number = nfit_mem->dcr->serial_number; 1500 } 1501 1502 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 1503 cmp_map, NULL); 1504 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 1505 ndr_desc->nd_set = nd_set; 1506 devm_kfree(dev, info); 1507 1508 return 0; 1509 } 1510 1511 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 1512 { 1513 struct acpi_nfit_interleave *idt = mmio->idt; 1514 u32 sub_line_offset, line_index, line_offset; 1515 u64 line_no, table_skip_count, table_offset; 1516 1517 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 1518 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 1519 line_offset = idt->line_offset[line_index] 1520 * mmio->line_size; 1521 table_offset = table_skip_count * mmio->table_size; 1522 1523 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 1524 } 1525 1526 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 1527 { 1528 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1529 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 1530 1531 if (mmio->num_lines) 1532 offset = to_interleave_offset(offset, mmio); 1533 1534 return readl(mmio->addr.base + offset); 1535 } 1536 1537 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 1538 resource_size_t dpa, unsigned int len, unsigned int write) 1539 { 1540 u64 cmd, offset; 1541 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1542 1543 enum { 1544 BCW_OFFSET_MASK = (1ULL << 48)-1, 1545 BCW_LEN_SHIFT = 48, 1546 BCW_LEN_MASK = (1ULL << 8) - 1, 1547 BCW_CMD_SHIFT = 56, 1548 }; 1549 1550 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 1551 len = len >> L1_CACHE_SHIFT; 1552 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 1553 cmd |= ((u64) write) << BCW_CMD_SHIFT; 1554 1555 offset = nfit_blk->cmd_offset + mmio->size * bw; 1556 if (mmio->num_lines) 1557 offset = to_interleave_offset(offset, mmio); 1558 1559 writeq(cmd, mmio->addr.base + offset); 1560 nvdimm_flush(nfit_blk->nd_region); 1561 1562 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) 1563 readq(mmio->addr.base + offset); 1564 } 1565 1566 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 1567 resource_size_t dpa, void *iobuf, size_t len, int rw, 1568 unsigned int lane) 1569 { 1570 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1571 unsigned int copied = 0; 1572 u64 base_offset; 1573 int rc; 1574 1575 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 1576 + lane * mmio->size; 1577 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 1578 while (len) { 1579 unsigned int c; 1580 u64 offset; 1581 1582 if (mmio->num_lines) { 1583 u32 line_offset; 1584 1585 offset = to_interleave_offset(base_offset + copied, 1586 mmio); 1587 div_u64_rem(offset, mmio->line_size, &line_offset); 1588 c = min_t(size_t, len, mmio->line_size - line_offset); 1589 } else { 1590 offset = base_offset + nfit_blk->bdw_offset; 1591 c = len; 1592 } 1593 1594 if (rw) 1595 memcpy_to_pmem(mmio->addr.aperture + offset, 1596 iobuf + copied, c); 1597 else { 1598 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) 1599 mmio_flush_range((void __force *) 1600 mmio->addr.aperture + offset, c); 1601 1602 memcpy_from_pmem(iobuf + copied, 1603 mmio->addr.aperture + offset, c); 1604 } 1605 1606 copied += c; 1607 len -= c; 1608 } 1609 1610 if (rw) 1611 nvdimm_flush(nfit_blk->nd_region); 1612 1613 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 1614 return rc; 1615 } 1616 1617 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 1618 resource_size_t dpa, void *iobuf, u64 len, int rw) 1619 { 1620 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 1621 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1622 struct nd_region *nd_region = nfit_blk->nd_region; 1623 unsigned int lane, copied = 0; 1624 int rc = 0; 1625 1626 lane = nd_region_acquire_lane(nd_region); 1627 while (len) { 1628 u64 c = min(len, mmio->size); 1629 1630 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 1631 iobuf + copied, c, rw, lane); 1632 if (rc) 1633 break; 1634 1635 copied += c; 1636 len -= c; 1637 } 1638 nd_region_release_lane(nd_region, lane); 1639 1640 return rc; 1641 } 1642 1643 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 1644 struct acpi_nfit_interleave *idt, u16 interleave_ways) 1645 { 1646 if (idt) { 1647 mmio->num_lines = idt->line_count; 1648 mmio->line_size = idt->line_size; 1649 if (interleave_ways == 0) 1650 return -ENXIO; 1651 mmio->table_size = mmio->num_lines * interleave_ways 1652 * mmio->line_size; 1653 } 1654 1655 return 0; 1656 } 1657 1658 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 1659 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 1660 { 1661 struct nd_cmd_dimm_flags flags; 1662 int rc; 1663 1664 memset(&flags, 0, sizeof(flags)); 1665 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 1666 sizeof(flags), NULL); 1667 1668 if (rc >= 0 && flags.status == 0) 1669 nfit_blk->dimm_flags = flags.flags; 1670 else if (rc == -ENOTTY) { 1671 /* fall back to a conservative default */ 1672 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; 1673 rc = 0; 1674 } else 1675 rc = -ENXIO; 1676 1677 return rc; 1678 } 1679 1680 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 1681 struct device *dev) 1682 { 1683 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1684 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1685 struct nfit_blk_mmio *mmio; 1686 struct nfit_blk *nfit_blk; 1687 struct nfit_mem *nfit_mem; 1688 struct nvdimm *nvdimm; 1689 int rc; 1690 1691 nvdimm = nd_blk_region_to_dimm(ndbr); 1692 nfit_mem = nvdimm_provider_data(nvdimm); 1693 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 1694 dev_dbg(dev, "%s: missing%s%s%s\n", __func__, 1695 nfit_mem ? "" : " nfit_mem", 1696 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 1697 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 1698 return -ENXIO; 1699 } 1700 1701 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 1702 if (!nfit_blk) 1703 return -ENOMEM; 1704 nd_blk_region_set_provider_data(ndbr, nfit_blk); 1705 nfit_blk->nd_region = to_nd_region(dev); 1706 1707 /* map block aperture memory */ 1708 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 1709 mmio = &nfit_blk->mmio[BDW]; 1710 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, 1711 nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM); 1712 if (!mmio->addr.base) { 1713 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, 1714 nvdimm_name(nvdimm)); 1715 return -ENOMEM; 1716 } 1717 mmio->size = nfit_mem->bdw->size; 1718 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 1719 mmio->idt = nfit_mem->idt_bdw; 1720 mmio->spa = nfit_mem->spa_bdw; 1721 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 1722 nfit_mem->memdev_bdw->interleave_ways); 1723 if (rc) { 1724 dev_dbg(dev, "%s: %s failed to init bdw interleave\n", 1725 __func__, nvdimm_name(nvdimm)); 1726 return rc; 1727 } 1728 1729 /* map block control memory */ 1730 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 1731 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 1732 mmio = &nfit_blk->mmio[DCR]; 1733 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, 1734 nfit_mem->spa_dcr->length); 1735 if (!mmio->addr.base) { 1736 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, 1737 nvdimm_name(nvdimm)); 1738 return -ENOMEM; 1739 } 1740 mmio->size = nfit_mem->dcr->window_size; 1741 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 1742 mmio->idt = nfit_mem->idt_dcr; 1743 mmio->spa = nfit_mem->spa_dcr; 1744 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 1745 nfit_mem->memdev_dcr->interleave_ways); 1746 if (rc) { 1747 dev_dbg(dev, "%s: %s failed to init dcr interleave\n", 1748 __func__, nvdimm_name(nvdimm)); 1749 return rc; 1750 } 1751 1752 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 1753 if (rc < 0) { 1754 dev_dbg(dev, "%s: %s failed get DIMM flags\n", 1755 __func__, nvdimm_name(nvdimm)); 1756 return rc; 1757 } 1758 1759 if (nvdimm_has_flush(nfit_blk->nd_region) < 0) 1760 dev_warn(dev, "unable to guarantee persistence of writes\n"); 1761 1762 if (mmio->line_size == 0) 1763 return 0; 1764 1765 if ((u32) nfit_blk->cmd_offset % mmio->line_size 1766 + 8 > mmio->line_size) { 1767 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 1768 return -ENXIO; 1769 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 1770 + 8 > mmio->line_size) { 1771 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 1772 return -ENXIO; 1773 } 1774 1775 return 0; 1776 } 1777 1778 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 1779 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 1780 { 1781 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1782 struct acpi_nfit_system_address *spa = nfit_spa->spa; 1783 int cmd_rc, rc; 1784 1785 cmd->address = spa->address; 1786 cmd->length = spa->length; 1787 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 1788 sizeof(*cmd), &cmd_rc); 1789 if (rc < 0) 1790 return rc; 1791 return cmd_rc; 1792 } 1793 1794 static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) 1795 { 1796 int rc; 1797 int cmd_rc; 1798 struct nd_cmd_ars_start ars_start; 1799 struct acpi_nfit_system_address *spa = nfit_spa->spa; 1800 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1801 1802 memset(&ars_start, 0, sizeof(ars_start)); 1803 ars_start.address = spa->address; 1804 ars_start.length = spa->length; 1805 if (nfit_spa_type(spa) == NFIT_SPA_PM) 1806 ars_start.type = ND_ARS_PERSISTENT; 1807 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) 1808 ars_start.type = ND_ARS_VOLATILE; 1809 else 1810 return -ENOTTY; 1811 1812 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 1813 sizeof(ars_start), &cmd_rc); 1814 1815 if (rc < 0) 1816 return rc; 1817 return cmd_rc; 1818 } 1819 1820 static int ars_continue(struct acpi_nfit_desc *acpi_desc) 1821 { 1822 int rc, cmd_rc; 1823 struct nd_cmd_ars_start ars_start; 1824 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1825 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 1826 1827 memset(&ars_start, 0, sizeof(ars_start)); 1828 ars_start.address = ars_status->restart_address; 1829 ars_start.length = ars_status->restart_length; 1830 ars_start.type = ars_status->type; 1831 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 1832 sizeof(ars_start), &cmd_rc); 1833 if (rc < 0) 1834 return rc; 1835 return cmd_rc; 1836 } 1837 1838 static int ars_get_status(struct acpi_nfit_desc *acpi_desc) 1839 { 1840 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1841 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 1842 int rc, cmd_rc; 1843 1844 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, 1845 acpi_desc->ars_status_size, &cmd_rc); 1846 if (rc < 0) 1847 return rc; 1848 return cmd_rc; 1849 } 1850 1851 static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus, 1852 struct nd_cmd_ars_status *ars_status) 1853 { 1854 int rc; 1855 u32 i; 1856 1857 for (i = 0; i < ars_status->num_records; i++) { 1858 rc = nvdimm_bus_add_poison(nvdimm_bus, 1859 ars_status->records[i].err_address, 1860 ars_status->records[i].length); 1861 if (rc) 1862 return rc; 1863 } 1864 1865 return 0; 1866 } 1867 1868 static void acpi_nfit_remove_resource(void *data) 1869 { 1870 struct resource *res = data; 1871 1872 remove_resource(res); 1873 } 1874 1875 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, 1876 struct nd_region_desc *ndr_desc) 1877 { 1878 struct resource *res, *nd_res = ndr_desc->res; 1879 int is_pmem, ret; 1880 1881 /* No operation if the region is already registered as PMEM */ 1882 is_pmem = region_intersects(nd_res->start, resource_size(nd_res), 1883 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); 1884 if (is_pmem == REGION_INTERSECTS) 1885 return 0; 1886 1887 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); 1888 if (!res) 1889 return -ENOMEM; 1890 1891 res->name = "Persistent Memory"; 1892 res->start = nd_res->start; 1893 res->end = nd_res->end; 1894 res->flags = IORESOURCE_MEM; 1895 res->desc = IORES_DESC_PERSISTENT_MEMORY; 1896 1897 ret = insert_resource(&iomem_resource, res); 1898 if (ret) 1899 return ret; 1900 1901 ret = devm_add_action_or_reset(acpi_desc->dev, 1902 acpi_nfit_remove_resource, 1903 res); 1904 if (ret) 1905 return ret; 1906 1907 return 0; 1908 } 1909 1910 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 1911 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, 1912 struct acpi_nfit_memory_map *memdev, 1913 struct nfit_spa *nfit_spa) 1914 { 1915 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 1916 memdev->device_handle); 1917 struct acpi_nfit_system_address *spa = nfit_spa->spa; 1918 struct nd_blk_region_desc *ndbr_desc; 1919 struct nfit_mem *nfit_mem; 1920 int blk_valid = 0; 1921 1922 if (!nvdimm) { 1923 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 1924 spa->range_index, memdev->device_handle); 1925 return -ENODEV; 1926 } 1927 1928 nd_mapping->nvdimm = nvdimm; 1929 switch (nfit_spa_type(spa)) { 1930 case NFIT_SPA_PM: 1931 case NFIT_SPA_VOLATILE: 1932 nd_mapping->start = memdev->address; 1933 nd_mapping->size = memdev->region_size; 1934 break; 1935 case NFIT_SPA_DCR: 1936 nfit_mem = nvdimm_provider_data(nvdimm); 1937 if (!nfit_mem || !nfit_mem->bdw) { 1938 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 1939 spa->range_index, nvdimm_name(nvdimm)); 1940 } else { 1941 nd_mapping->size = nfit_mem->bdw->capacity; 1942 nd_mapping->start = nfit_mem->bdw->start_address; 1943 ndr_desc->num_lanes = nfit_mem->bdw->windows; 1944 blk_valid = 1; 1945 } 1946 1947 ndr_desc->nd_mapping = nd_mapping; 1948 ndr_desc->num_mappings = blk_valid; 1949 ndbr_desc = to_blk_region_desc(ndr_desc); 1950 ndbr_desc->enable = acpi_nfit_blk_region_enable; 1951 ndbr_desc->do_io = acpi_desc->blk_do_io; 1952 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 1953 ndr_desc); 1954 if (!nfit_spa->nd_region) 1955 return -ENOMEM; 1956 break; 1957 } 1958 1959 return 0; 1960 } 1961 1962 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) 1963 { 1964 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 1965 nfit_spa_type(spa) == NFIT_SPA_VCD || 1966 nfit_spa_type(spa) == NFIT_SPA_PDISK || 1967 nfit_spa_type(spa) == NFIT_SPA_PCD); 1968 } 1969 1970 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 1971 struct nfit_spa *nfit_spa) 1972 { 1973 static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS]; 1974 struct acpi_nfit_system_address *spa = nfit_spa->spa; 1975 struct nd_blk_region_desc ndbr_desc; 1976 struct nd_region_desc *ndr_desc; 1977 struct nfit_memdev *nfit_memdev; 1978 struct nvdimm_bus *nvdimm_bus; 1979 struct resource res; 1980 int count = 0, rc; 1981 1982 if (nfit_spa->nd_region) 1983 return 0; 1984 1985 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { 1986 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", 1987 __func__); 1988 return 0; 1989 } 1990 1991 memset(&res, 0, sizeof(res)); 1992 memset(&nd_mappings, 0, sizeof(nd_mappings)); 1993 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 1994 res.start = spa->address; 1995 res.end = res.start + spa->length - 1; 1996 ndr_desc = &ndbr_desc.ndr_desc; 1997 ndr_desc->res = &res; 1998 ndr_desc->provider_data = nfit_spa; 1999 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 2000 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 2001 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 2002 spa->proximity_domain); 2003 else 2004 ndr_desc->numa_node = NUMA_NO_NODE; 2005 2006 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2007 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 2008 struct nd_mapping *nd_mapping; 2009 2010 if (memdev->range_index != spa->range_index) 2011 continue; 2012 if (count >= ND_MAX_MAPPINGS) { 2013 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 2014 spa->range_index, ND_MAX_MAPPINGS); 2015 return -ENXIO; 2016 } 2017 nd_mapping = &nd_mappings[count++]; 2018 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, 2019 memdev, nfit_spa); 2020 if (rc) 2021 goto out; 2022 } 2023 2024 ndr_desc->nd_mapping = nd_mappings; 2025 ndr_desc->num_mappings = count; 2026 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2027 if (rc) 2028 goto out; 2029 2030 nvdimm_bus = acpi_desc->nvdimm_bus; 2031 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 2032 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); 2033 if (rc) { 2034 dev_warn(acpi_desc->dev, 2035 "failed to insert pmem resource to iomem: %d\n", 2036 rc); 2037 goto out; 2038 } 2039 2040 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2041 ndr_desc); 2042 if (!nfit_spa->nd_region) 2043 rc = -ENOMEM; 2044 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { 2045 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, 2046 ndr_desc); 2047 if (!nfit_spa->nd_region) 2048 rc = -ENOMEM; 2049 } else if (nfit_spa_is_virtual(spa)) { 2050 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2051 ndr_desc); 2052 if (!nfit_spa->nd_region) 2053 rc = -ENOMEM; 2054 } 2055 2056 out: 2057 if (rc) 2058 dev_err(acpi_desc->dev, "failed to register spa range %d\n", 2059 nfit_spa->spa->range_index); 2060 return rc; 2061 } 2062 2063 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc, 2064 u32 max_ars) 2065 { 2066 struct device *dev = acpi_desc->dev; 2067 struct nd_cmd_ars_status *ars_status; 2068 2069 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) { 2070 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size); 2071 return 0; 2072 } 2073 2074 if (acpi_desc->ars_status) 2075 devm_kfree(dev, acpi_desc->ars_status); 2076 acpi_desc->ars_status = NULL; 2077 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL); 2078 if (!ars_status) 2079 return -ENOMEM; 2080 acpi_desc->ars_status = ars_status; 2081 acpi_desc->ars_status_size = max_ars; 2082 return 0; 2083 } 2084 2085 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc, 2086 struct nfit_spa *nfit_spa) 2087 { 2088 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2089 int rc; 2090 2091 if (!nfit_spa->max_ars) { 2092 struct nd_cmd_ars_cap ars_cap; 2093 2094 memset(&ars_cap, 0, sizeof(ars_cap)); 2095 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); 2096 if (rc < 0) 2097 return rc; 2098 nfit_spa->max_ars = ars_cap.max_ars_out; 2099 nfit_spa->clear_err_unit = ars_cap.clear_err_unit; 2100 /* check that the supported scrub types match the spa type */ 2101 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE && 2102 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0) 2103 return -ENOTTY; 2104 else if (nfit_spa_type(spa) == NFIT_SPA_PM && 2105 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0) 2106 return -ENOTTY; 2107 } 2108 2109 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars)) 2110 return -ENOMEM; 2111 2112 rc = ars_get_status(acpi_desc); 2113 if (rc < 0 && rc != -ENOSPC) 2114 return rc; 2115 2116 if (ars_status_process_records(acpi_desc->nvdimm_bus, 2117 acpi_desc->ars_status)) 2118 return -ENOMEM; 2119 2120 return 0; 2121 } 2122 2123 static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc, 2124 struct nfit_spa *nfit_spa) 2125 { 2126 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2127 unsigned int overflow_retry = scrub_overflow_abort; 2128 u64 init_ars_start = 0, init_ars_len = 0; 2129 struct device *dev = acpi_desc->dev; 2130 unsigned int tmo = scrub_timeout; 2131 int rc; 2132 2133 if (!nfit_spa->ars_required || !nfit_spa->nd_region) 2134 return; 2135 2136 rc = ars_start(acpi_desc, nfit_spa); 2137 /* 2138 * If we timed out the initial scan we'll still be busy here, 2139 * and will wait another timeout before giving up permanently. 2140 */ 2141 if (rc < 0 && rc != -EBUSY) 2142 return; 2143 2144 do { 2145 u64 ars_start, ars_len; 2146 2147 if (acpi_desc->cancel) 2148 break; 2149 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); 2150 if (rc == -ENOTTY) 2151 break; 2152 if (rc == -EBUSY && !tmo) { 2153 dev_warn(dev, "range %d ars timeout, aborting\n", 2154 spa->range_index); 2155 break; 2156 } 2157 2158 if (rc == -EBUSY) { 2159 /* 2160 * Note, entries may be appended to the list 2161 * while the lock is dropped, but the workqueue 2162 * being active prevents entries being deleted / 2163 * freed. 2164 */ 2165 mutex_unlock(&acpi_desc->init_mutex); 2166 ssleep(1); 2167 tmo--; 2168 mutex_lock(&acpi_desc->init_mutex); 2169 continue; 2170 } 2171 2172 /* we got some results, but there are more pending... */ 2173 if (rc == -ENOSPC && overflow_retry--) { 2174 if (!init_ars_len) { 2175 init_ars_len = acpi_desc->ars_status->length; 2176 init_ars_start = acpi_desc->ars_status->address; 2177 } 2178 rc = ars_continue(acpi_desc); 2179 } 2180 2181 if (rc < 0) { 2182 dev_warn(dev, "range %d ars continuation failed\n", 2183 spa->range_index); 2184 break; 2185 } 2186 2187 if (init_ars_len) { 2188 ars_start = init_ars_start; 2189 ars_len = init_ars_len; 2190 } else { 2191 ars_start = acpi_desc->ars_status->address; 2192 ars_len = acpi_desc->ars_status->length; 2193 } 2194 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n", 2195 spa->range_index, ars_start, ars_len); 2196 /* notify the region about new poison entries */ 2197 nvdimm_region_notify(nfit_spa->nd_region, 2198 NVDIMM_REVALIDATE_POISON); 2199 break; 2200 } while (1); 2201 } 2202 2203 static void acpi_nfit_scrub(struct work_struct *work) 2204 { 2205 struct device *dev; 2206 u64 init_scrub_length = 0; 2207 struct nfit_spa *nfit_spa; 2208 u64 init_scrub_address = 0; 2209 bool init_ars_done = false; 2210 struct acpi_nfit_desc *acpi_desc; 2211 unsigned int tmo = scrub_timeout; 2212 unsigned int overflow_retry = scrub_overflow_abort; 2213 2214 acpi_desc = container_of(work, typeof(*acpi_desc), work); 2215 dev = acpi_desc->dev; 2216 2217 /* 2218 * We scrub in 2 phases. The first phase waits for any platform 2219 * firmware initiated scrubs to complete and then we go search for the 2220 * affected spa regions to mark them scanned. In the second phase we 2221 * initiate a directed scrub for every range that was not scrubbed in 2222 * phase 1. If we're called for a 'rescan', we harmlessly pass through 2223 * the first phase, but really only care about running phase 2, where 2224 * regions can be notified of new poison. 2225 */ 2226 2227 /* process platform firmware initiated scrubs */ 2228 retry: 2229 mutex_lock(&acpi_desc->init_mutex); 2230 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2231 struct nd_cmd_ars_status *ars_status; 2232 struct acpi_nfit_system_address *spa; 2233 u64 ars_start, ars_len; 2234 int rc; 2235 2236 if (acpi_desc->cancel) 2237 break; 2238 2239 if (nfit_spa->nd_region) 2240 continue; 2241 2242 if (init_ars_done) { 2243 /* 2244 * No need to re-query, we're now just 2245 * reconciling all the ranges covered by the 2246 * initial scrub 2247 */ 2248 rc = 0; 2249 } else 2250 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); 2251 2252 if (rc == -ENOTTY) { 2253 /* no ars capability, just register spa and move on */ 2254 acpi_nfit_register_region(acpi_desc, nfit_spa); 2255 continue; 2256 } 2257 2258 if (rc == -EBUSY && !tmo) { 2259 /* fallthrough to directed scrub in phase 2 */ 2260 dev_warn(dev, "timeout awaiting ars results, continuing...\n"); 2261 break; 2262 } else if (rc == -EBUSY) { 2263 mutex_unlock(&acpi_desc->init_mutex); 2264 ssleep(1); 2265 tmo--; 2266 goto retry; 2267 } 2268 2269 /* we got some results, but there are more pending... */ 2270 if (rc == -ENOSPC && overflow_retry--) { 2271 ars_status = acpi_desc->ars_status; 2272 /* 2273 * Record the original scrub range, so that we 2274 * can recall all the ranges impacted by the 2275 * initial scrub. 2276 */ 2277 if (!init_scrub_length) { 2278 init_scrub_length = ars_status->length; 2279 init_scrub_address = ars_status->address; 2280 } 2281 rc = ars_continue(acpi_desc); 2282 if (rc == 0) { 2283 mutex_unlock(&acpi_desc->init_mutex); 2284 goto retry; 2285 } 2286 } 2287 2288 if (rc < 0) { 2289 /* 2290 * Initial scrub failed, we'll give it one more 2291 * try below... 2292 */ 2293 break; 2294 } 2295 2296 /* We got some final results, record completed ranges */ 2297 ars_status = acpi_desc->ars_status; 2298 if (init_scrub_length) { 2299 ars_start = init_scrub_address; 2300 ars_len = ars_start + init_scrub_length; 2301 } else { 2302 ars_start = ars_status->address; 2303 ars_len = ars_status->length; 2304 } 2305 spa = nfit_spa->spa; 2306 2307 if (!init_ars_done) { 2308 init_ars_done = true; 2309 dev_dbg(dev, "init scrub %#llx + %#llx complete\n", 2310 ars_start, ars_len); 2311 } 2312 if (ars_start <= spa->address && ars_start + ars_len 2313 >= spa->address + spa->length) 2314 acpi_nfit_register_region(acpi_desc, nfit_spa); 2315 } 2316 2317 /* 2318 * For all the ranges not covered by an initial scrub we still 2319 * want to see if there are errors, but it's ok to discover them 2320 * asynchronously. 2321 */ 2322 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2323 /* 2324 * Flag all the ranges that still need scrubbing, but 2325 * register them now to make data available. 2326 */ 2327 if (!nfit_spa->nd_region) { 2328 nfit_spa->ars_required = 1; 2329 acpi_nfit_register_region(acpi_desc, nfit_spa); 2330 } 2331 } 2332 2333 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 2334 acpi_nfit_async_scrub(acpi_desc, nfit_spa); 2335 acpi_desc->scrub_count++; 2336 if (acpi_desc->scrub_count_state) 2337 sysfs_notify_dirent(acpi_desc->scrub_count_state); 2338 mutex_unlock(&acpi_desc->init_mutex); 2339 } 2340 2341 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 2342 { 2343 struct nfit_spa *nfit_spa; 2344 int rc; 2345 2346 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 2347 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) { 2348 /* BLK regions don't need to wait for ars results */ 2349 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 2350 if (rc) 2351 return rc; 2352 } 2353 2354 queue_work(nfit_wq, &acpi_desc->work); 2355 return 0; 2356 } 2357 2358 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 2359 struct nfit_table_prev *prev) 2360 { 2361 struct device *dev = acpi_desc->dev; 2362 2363 if (!list_empty(&prev->spas) || 2364 !list_empty(&prev->memdevs) || 2365 !list_empty(&prev->dcrs) || 2366 !list_empty(&prev->bdws) || 2367 !list_empty(&prev->idts) || 2368 !list_empty(&prev->flushes)) { 2369 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 2370 return -ENXIO; 2371 } 2372 return 0; 2373 } 2374 2375 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) 2376 { 2377 struct device *dev = acpi_desc->dev; 2378 struct kernfs_node *nfit; 2379 struct device *bus_dev; 2380 2381 if (!ars_supported(acpi_desc->nvdimm_bus)) 2382 return 0; 2383 2384 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 2385 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); 2386 if (!nfit) { 2387 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); 2388 return -ENODEV; 2389 } 2390 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); 2391 sysfs_put(nfit); 2392 if (!acpi_desc->scrub_count_state) { 2393 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); 2394 return -ENODEV; 2395 } 2396 2397 return 0; 2398 } 2399 2400 static void acpi_nfit_destruct(void *data) 2401 { 2402 struct acpi_nfit_desc *acpi_desc = data; 2403 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 2404 2405 /* 2406 * Destruct under acpi_desc_lock so that nfit_handle_mce does not 2407 * race teardown 2408 */ 2409 mutex_lock(&acpi_desc_lock); 2410 acpi_desc->cancel = 1; 2411 /* 2412 * Bounce the nvdimm bus lock to make sure any in-flight 2413 * acpi_nfit_ars_rescan() submissions have had a chance to 2414 * either submit or see ->cancel set. 2415 */ 2416 device_lock(bus_dev); 2417 device_unlock(bus_dev); 2418 2419 flush_workqueue(nfit_wq); 2420 if (acpi_desc->scrub_count_state) 2421 sysfs_put(acpi_desc->scrub_count_state); 2422 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 2423 acpi_desc->nvdimm_bus = NULL; 2424 list_del(&acpi_desc->list); 2425 mutex_unlock(&acpi_desc_lock); 2426 } 2427 2428 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) 2429 { 2430 struct device *dev = acpi_desc->dev; 2431 struct nfit_table_prev prev; 2432 const void *end; 2433 int rc; 2434 2435 if (!acpi_desc->nvdimm_bus) { 2436 acpi_nfit_init_dsms(acpi_desc); 2437 2438 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, 2439 &acpi_desc->nd_desc); 2440 if (!acpi_desc->nvdimm_bus) 2441 return -ENOMEM; 2442 2443 rc = devm_add_action_or_reset(dev, acpi_nfit_destruct, 2444 acpi_desc); 2445 if (rc) 2446 return rc; 2447 2448 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); 2449 if (rc) 2450 return rc; 2451 2452 /* register this acpi_desc for mce notifications */ 2453 mutex_lock(&acpi_desc_lock); 2454 list_add_tail(&acpi_desc->list, &acpi_descs); 2455 mutex_unlock(&acpi_desc_lock); 2456 } 2457 2458 mutex_lock(&acpi_desc->init_mutex); 2459 2460 INIT_LIST_HEAD(&prev.spas); 2461 INIT_LIST_HEAD(&prev.memdevs); 2462 INIT_LIST_HEAD(&prev.dcrs); 2463 INIT_LIST_HEAD(&prev.bdws); 2464 INIT_LIST_HEAD(&prev.idts); 2465 INIT_LIST_HEAD(&prev.flushes); 2466 2467 list_cut_position(&prev.spas, &acpi_desc->spas, 2468 acpi_desc->spas.prev); 2469 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 2470 acpi_desc->memdevs.prev); 2471 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 2472 acpi_desc->dcrs.prev); 2473 list_cut_position(&prev.bdws, &acpi_desc->bdws, 2474 acpi_desc->bdws.prev); 2475 list_cut_position(&prev.idts, &acpi_desc->idts, 2476 acpi_desc->idts.prev); 2477 list_cut_position(&prev.flushes, &acpi_desc->flushes, 2478 acpi_desc->flushes.prev); 2479 2480 end = data + sz; 2481 while (!IS_ERR_OR_NULL(data)) 2482 data = add_table(acpi_desc, &prev, data, end); 2483 2484 if (IS_ERR(data)) { 2485 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, 2486 PTR_ERR(data)); 2487 rc = PTR_ERR(data); 2488 goto out_unlock; 2489 } 2490 2491 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 2492 if (rc) 2493 goto out_unlock; 2494 2495 rc = nfit_mem_init(acpi_desc); 2496 if (rc) 2497 goto out_unlock; 2498 2499 rc = acpi_nfit_register_dimms(acpi_desc); 2500 if (rc) 2501 goto out_unlock; 2502 2503 rc = acpi_nfit_register_regions(acpi_desc); 2504 2505 out_unlock: 2506 mutex_unlock(&acpi_desc->init_mutex); 2507 return rc; 2508 } 2509 EXPORT_SYMBOL_GPL(acpi_nfit_init); 2510 2511 struct acpi_nfit_flush_work { 2512 struct work_struct work; 2513 struct completion cmp; 2514 }; 2515 2516 static void flush_probe(struct work_struct *work) 2517 { 2518 struct acpi_nfit_flush_work *flush; 2519 2520 flush = container_of(work, typeof(*flush), work); 2521 complete(&flush->cmp); 2522 } 2523 2524 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 2525 { 2526 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 2527 struct device *dev = acpi_desc->dev; 2528 struct acpi_nfit_flush_work flush; 2529 2530 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 2531 device_lock(dev); 2532 device_unlock(dev); 2533 2534 /* 2535 * Scrub work could take 10s of seconds, userspace may give up so we 2536 * need to be interruptible while waiting. 2537 */ 2538 INIT_WORK_ONSTACK(&flush.work, flush_probe); 2539 COMPLETION_INITIALIZER_ONSTACK(flush.cmp); 2540 queue_work(nfit_wq, &flush.work); 2541 return wait_for_completion_interruptible(&flush.cmp); 2542 } 2543 2544 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 2545 struct nvdimm *nvdimm, unsigned int cmd) 2546 { 2547 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 2548 2549 if (nvdimm) 2550 return 0; 2551 if (cmd != ND_CMD_ARS_START) 2552 return 0; 2553 2554 /* 2555 * The kernel and userspace may race to initiate a scrub, but 2556 * the scrub thread is prepared to lose that initial race. It 2557 * just needs guarantees that any ars it initiates are not 2558 * interrupted by any intervening start reqeusts from userspace. 2559 */ 2560 if (work_busy(&acpi_desc->work)) 2561 return -EBUSY; 2562 2563 return 0; 2564 } 2565 2566 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc) 2567 { 2568 struct device *dev = acpi_desc->dev; 2569 struct nfit_spa *nfit_spa; 2570 2571 if (work_busy(&acpi_desc->work)) 2572 return -EBUSY; 2573 2574 if (acpi_desc->cancel) 2575 return 0; 2576 2577 mutex_lock(&acpi_desc->init_mutex); 2578 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2579 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2580 2581 if (nfit_spa_type(spa) != NFIT_SPA_PM) 2582 continue; 2583 2584 nfit_spa->ars_required = 1; 2585 } 2586 queue_work(nfit_wq, &acpi_desc->work); 2587 dev_dbg(dev, "%s: ars_scan triggered\n", __func__); 2588 mutex_unlock(&acpi_desc->init_mutex); 2589 2590 return 0; 2591 } 2592 2593 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 2594 { 2595 struct nvdimm_bus_descriptor *nd_desc; 2596 2597 dev_set_drvdata(dev, acpi_desc); 2598 acpi_desc->dev = dev; 2599 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 2600 nd_desc = &acpi_desc->nd_desc; 2601 nd_desc->provider_name = "ACPI.NFIT"; 2602 nd_desc->module = THIS_MODULE; 2603 nd_desc->ndctl = acpi_nfit_ctl; 2604 nd_desc->flush_probe = acpi_nfit_flush_probe; 2605 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 2606 nd_desc->attr_groups = acpi_nfit_attribute_groups; 2607 2608 INIT_LIST_HEAD(&acpi_desc->spas); 2609 INIT_LIST_HEAD(&acpi_desc->dcrs); 2610 INIT_LIST_HEAD(&acpi_desc->bdws); 2611 INIT_LIST_HEAD(&acpi_desc->idts); 2612 INIT_LIST_HEAD(&acpi_desc->flushes); 2613 INIT_LIST_HEAD(&acpi_desc->memdevs); 2614 INIT_LIST_HEAD(&acpi_desc->dimms); 2615 INIT_LIST_HEAD(&acpi_desc->list); 2616 mutex_init(&acpi_desc->init_mutex); 2617 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); 2618 } 2619 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); 2620 2621 static int acpi_nfit_add(struct acpi_device *adev) 2622 { 2623 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 2624 struct acpi_nfit_desc *acpi_desc; 2625 struct device *dev = &adev->dev; 2626 struct acpi_table_header *tbl; 2627 acpi_status status = AE_OK; 2628 acpi_size sz; 2629 int rc = 0; 2630 2631 status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz); 2632 if (ACPI_FAILURE(status)) { 2633 /* This is ok, we could have an nvdimm hotplugged later */ 2634 dev_dbg(dev, "failed to find NFIT at startup\n"); 2635 return 0; 2636 } 2637 2638 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 2639 if (!acpi_desc) 2640 return -ENOMEM; 2641 acpi_nfit_desc_init(acpi_desc, &adev->dev); 2642 2643 /* Save the acpi header for exporting the revision via sysfs */ 2644 acpi_desc->acpi_header = *tbl; 2645 2646 /* Evaluate _FIT and override with that if present */ 2647 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 2648 if (ACPI_SUCCESS(status) && buf.length > 0) { 2649 union acpi_object *obj = buf.pointer; 2650 2651 if (obj->type == ACPI_TYPE_BUFFER) 2652 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 2653 obj->buffer.length); 2654 else 2655 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", 2656 __func__, (int) obj->type); 2657 kfree(buf.pointer); 2658 } else 2659 /* skip over the lead-in header table */ 2660 rc = acpi_nfit_init(acpi_desc, (void *) tbl 2661 + sizeof(struct acpi_table_nfit), 2662 sz - sizeof(struct acpi_table_nfit)); 2663 return rc; 2664 } 2665 2666 static int acpi_nfit_remove(struct acpi_device *adev) 2667 { 2668 /* see acpi_nfit_destruct */ 2669 return 0; 2670 } 2671 2672 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 2673 { 2674 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 2675 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 2676 struct device *dev = &adev->dev; 2677 union acpi_object *obj; 2678 acpi_status status; 2679 int ret; 2680 2681 dev_dbg(dev, "%s: event: %d\n", __func__, event); 2682 2683 device_lock(dev); 2684 if (!dev->driver) { 2685 /* dev->driver may be null if we're being removed */ 2686 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 2687 goto out_unlock; 2688 } 2689 2690 if (!acpi_desc) { 2691 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 2692 if (!acpi_desc) 2693 goto out_unlock; 2694 acpi_nfit_desc_init(acpi_desc, &adev->dev); 2695 } else { 2696 /* 2697 * Finish previous registration before considering new 2698 * regions. 2699 */ 2700 flush_workqueue(nfit_wq); 2701 } 2702 2703 /* Evaluate _FIT */ 2704 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 2705 if (ACPI_FAILURE(status)) { 2706 dev_err(dev, "failed to evaluate _FIT\n"); 2707 goto out_unlock; 2708 } 2709 2710 obj = buf.pointer; 2711 if (obj->type == ACPI_TYPE_BUFFER) { 2712 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 2713 obj->buffer.length); 2714 if (ret) 2715 dev_err(dev, "failed to merge updated NFIT\n"); 2716 } else 2717 dev_err(dev, "Invalid _FIT\n"); 2718 kfree(buf.pointer); 2719 2720 out_unlock: 2721 device_unlock(dev); 2722 } 2723 2724 static const struct acpi_device_id acpi_nfit_ids[] = { 2725 { "ACPI0012", 0 }, 2726 { "", 0 }, 2727 }; 2728 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 2729 2730 static struct acpi_driver acpi_nfit_driver = { 2731 .name = KBUILD_MODNAME, 2732 .ids = acpi_nfit_ids, 2733 .ops = { 2734 .add = acpi_nfit_add, 2735 .remove = acpi_nfit_remove, 2736 .notify = acpi_nfit_notify, 2737 }, 2738 }; 2739 2740 static __init int nfit_init(void) 2741 { 2742 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 2743 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 2744 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 2745 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 2746 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 2747 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 2748 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 2749 2750 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]); 2751 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]); 2752 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]); 2753 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]); 2754 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]); 2755 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]); 2756 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]); 2757 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]); 2758 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); 2759 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); 2760 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 2761 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 2762 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); 2763 2764 nfit_wq = create_singlethread_workqueue("nfit"); 2765 if (!nfit_wq) 2766 return -ENOMEM; 2767 2768 nfit_mce_register(); 2769 2770 return acpi_bus_register_driver(&acpi_nfit_driver); 2771 } 2772 2773 static __exit void nfit_exit(void) 2774 { 2775 nfit_mce_unregister(); 2776 acpi_bus_unregister_driver(&acpi_nfit_driver); 2777 destroy_workqueue(nfit_wq); 2778 WARN_ON(!list_empty(&acpi_descs)); 2779 } 2780 2781 module_init(nfit_init); 2782 module_exit(nfit_exit); 2783 MODULE_LICENSE("GPL v2"); 2784 MODULE_AUTHOR("Intel Corporation"); 2785