1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/libnvdimm.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/ndctl.h> 18 #include <linux/sysfs.h> 19 #include <linux/delay.h> 20 #include <linux/list.h> 21 #include <linux/acpi.h> 22 #include <linux/sort.h> 23 #include <linux/pmem.h> 24 #include <linux/io.h> 25 #include <linux/nd.h> 26 #include <asm/cacheflush.h> 27 #include "nfit.h" 28 29 /* 30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 31 * irrelevant. 32 */ 33 #include <linux/io-64-nonatomic-hi-lo.h> 34 35 static bool force_enable_dimms; 36 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 37 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 38 39 static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT; 40 module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR); 41 MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds"); 42 43 /* after three payloads of overflow, it's dead jim */ 44 static unsigned int scrub_overflow_abort = 3; 45 module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR); 46 MODULE_PARM_DESC(scrub_overflow_abort, 47 "Number of times we overflow ARS results before abort"); 48 49 static bool disable_vendor_specific; 50 module_param(disable_vendor_specific, bool, S_IRUGO); 51 MODULE_PARM_DESC(disable_vendor_specific, 52 "Limit commands to the publicly specified set"); 53 54 static unsigned long override_dsm_mask; 55 module_param(override_dsm_mask, ulong, S_IRUGO); 56 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); 57 58 static int default_dsm_family = -1; 59 module_param(default_dsm_family, int, S_IRUGO); 60 MODULE_PARM_DESC(default_dsm_family, 61 "Try this DSM type first when identifying NVDIMM family"); 62 63 LIST_HEAD(acpi_descs); 64 DEFINE_MUTEX(acpi_desc_lock); 65 66 static struct workqueue_struct *nfit_wq; 67 68 struct nfit_table_prev { 69 struct list_head spas; 70 struct list_head memdevs; 71 struct list_head dcrs; 72 struct list_head bdws; 73 struct list_head idts; 74 struct list_head flushes; 75 }; 76 77 static u8 nfit_uuid[NFIT_UUID_MAX][16]; 78 79 const u8 *to_nfit_uuid(enum nfit_uuids id) 80 { 81 return nfit_uuid[id]; 82 } 83 EXPORT_SYMBOL(to_nfit_uuid); 84 85 static struct acpi_nfit_desc *to_acpi_nfit_desc( 86 struct nvdimm_bus_descriptor *nd_desc) 87 { 88 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 89 } 90 91 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 92 { 93 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 94 95 /* 96 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 97 * acpi_device. 98 */ 99 if (!nd_desc->provider_name 100 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 101 return NULL; 102 103 return to_acpi_device(acpi_desc->dev); 104 } 105 106 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) 107 { 108 struct nd_cmd_clear_error *clear_err; 109 struct nd_cmd_ars_status *ars_status; 110 u16 flags; 111 112 switch (cmd) { 113 case ND_CMD_ARS_CAP: 114 if ((status & 0xffff) == NFIT_ARS_CAP_NONE) 115 return -ENOTTY; 116 117 /* Command failed */ 118 if (status & 0xffff) 119 return -EIO; 120 121 /* No supported scan types for this range */ 122 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 123 if ((status >> 16 & flags) == 0) 124 return -ENOTTY; 125 return 0; 126 case ND_CMD_ARS_START: 127 /* ARS is in progress */ 128 if ((status & 0xffff) == NFIT_ARS_START_BUSY) 129 return -EBUSY; 130 131 /* Command failed */ 132 if (status & 0xffff) 133 return -EIO; 134 return 0; 135 case ND_CMD_ARS_STATUS: 136 ars_status = buf; 137 /* Command failed */ 138 if (status & 0xffff) 139 return -EIO; 140 /* Check extended status (Upper two bytes) */ 141 if (status == NFIT_ARS_STATUS_DONE) 142 return 0; 143 144 /* ARS is in progress */ 145 if (status == NFIT_ARS_STATUS_BUSY) 146 return -EBUSY; 147 148 /* No ARS performed for the current boot */ 149 if (status == NFIT_ARS_STATUS_NONE) 150 return -EAGAIN; 151 152 /* 153 * ARS interrupted, either we overflowed or some other 154 * agent wants the scan to stop. If we didn't overflow 155 * then just continue with the returned results. 156 */ 157 if (status == NFIT_ARS_STATUS_INTR) { 158 if (ars_status->out_length >= 40 && (ars_status->flags 159 & NFIT_ARS_F_OVERFLOW)) 160 return -ENOSPC; 161 return 0; 162 } 163 164 /* Unknown status */ 165 if (status >> 16) 166 return -EIO; 167 return 0; 168 case ND_CMD_CLEAR_ERROR: 169 clear_err = buf; 170 if (status & 0xffff) 171 return -EIO; 172 if (!clear_err->cleared) 173 return -EIO; 174 if (clear_err->length > clear_err->cleared) 175 return clear_err->cleared; 176 return 0; 177 default: 178 break; 179 } 180 181 /* all other non-zero status results in an error */ 182 if (status) 183 return -EIO; 184 return 0; 185 } 186 187 static int xlat_nvdimm_status(void *buf, unsigned int cmd, u32 status) 188 { 189 switch (cmd) { 190 case ND_CMD_GET_CONFIG_SIZE: 191 if (status >> 16 & ND_CONFIG_LOCKED) 192 return -EACCES; 193 break; 194 default: 195 break; 196 } 197 198 /* all other non-zero status results in an error */ 199 if (status) 200 return -EIO; 201 return 0; 202 } 203 204 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 205 u32 status) 206 { 207 if (!nvdimm) 208 return xlat_bus_status(buf, cmd, status); 209 return xlat_nvdimm_status(buf, cmd, status); 210 } 211 212 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 213 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 214 { 215 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 216 union acpi_object in_obj, in_buf, *out_obj; 217 const struct nd_cmd_desc *desc = NULL; 218 struct device *dev = acpi_desc->dev; 219 struct nd_cmd_pkg *call_pkg = NULL; 220 const char *cmd_name, *dimm_name; 221 unsigned long cmd_mask, dsm_mask; 222 u32 offset, fw_status = 0; 223 acpi_handle handle; 224 unsigned int func; 225 const u8 *uuid; 226 int rc, i; 227 228 func = cmd; 229 if (cmd == ND_CMD_CALL) { 230 call_pkg = buf; 231 func = call_pkg->nd_command; 232 } 233 234 if (nvdimm) { 235 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 236 struct acpi_device *adev = nfit_mem->adev; 237 238 if (!adev) 239 return -ENOTTY; 240 if (call_pkg && nfit_mem->family != call_pkg->nd_family) 241 return -ENOTTY; 242 243 dimm_name = nvdimm_name(nvdimm); 244 cmd_name = nvdimm_cmd_name(cmd); 245 cmd_mask = nvdimm_cmd_mask(nvdimm); 246 dsm_mask = nfit_mem->dsm_mask; 247 desc = nd_cmd_dimm_desc(cmd); 248 uuid = to_nfit_uuid(nfit_mem->family); 249 handle = adev->handle; 250 } else { 251 struct acpi_device *adev = to_acpi_dev(acpi_desc); 252 253 cmd_name = nvdimm_bus_cmd_name(cmd); 254 cmd_mask = nd_desc->cmd_mask; 255 dsm_mask = cmd_mask; 256 desc = nd_cmd_bus_desc(cmd); 257 uuid = to_nfit_uuid(NFIT_DEV_BUS); 258 handle = adev->handle; 259 dimm_name = "bus"; 260 } 261 262 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 263 return -ENOTTY; 264 265 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) 266 return -ENOTTY; 267 268 in_obj.type = ACPI_TYPE_PACKAGE; 269 in_obj.package.count = 1; 270 in_obj.package.elements = &in_buf; 271 in_buf.type = ACPI_TYPE_BUFFER; 272 in_buf.buffer.pointer = buf; 273 in_buf.buffer.length = 0; 274 275 /* libnvdimm has already validated the input envelope */ 276 for (i = 0; i < desc->in_num; i++) 277 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 278 i, buf); 279 280 if (call_pkg) { 281 /* skip over package wrapper */ 282 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; 283 in_buf.buffer.length = call_pkg->nd_size_in; 284 } 285 286 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n", 287 __func__, dimm_name, cmd, func, in_buf.buffer.length); 288 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, 289 in_buf.buffer.pointer, 290 min_t(u32, 256, in_buf.buffer.length), true); 291 292 out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj); 293 if (!out_obj) { 294 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, 295 cmd_name); 296 return -EINVAL; 297 } 298 299 if (call_pkg) { 300 call_pkg->nd_fw_size = out_obj->buffer.length; 301 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, 302 out_obj->buffer.pointer, 303 min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); 304 305 ACPI_FREE(out_obj); 306 /* 307 * Need to support FW function w/o known size in advance. 308 * Caller can determine required size based upon nd_fw_size. 309 * If we return an error (like elsewhere) then caller wouldn't 310 * be able to rely upon data returned to make calculation. 311 */ 312 return 0; 313 } 314 315 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 316 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", 317 __func__, dimm_name, cmd_name, out_obj->type); 318 rc = -EINVAL; 319 goto out; 320 } 321 322 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name, 323 cmd_name, out_obj->buffer.length); 324 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, 325 out_obj->buffer.pointer, 326 min_t(u32, 128, out_obj->buffer.length), true); 327 328 for (i = 0, offset = 0; i < desc->out_num; i++) { 329 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 330 (u32 *) out_obj->buffer.pointer, 331 out_obj->buffer.length - offset); 332 333 if (offset + out_size > out_obj->buffer.length) { 334 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", 335 __func__, dimm_name, cmd_name, i); 336 break; 337 } 338 339 if (in_buf.buffer.length + offset + out_size > buf_len) { 340 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", 341 __func__, dimm_name, cmd_name, i); 342 rc = -ENXIO; 343 goto out; 344 } 345 memcpy(buf + in_buf.buffer.length + offset, 346 out_obj->buffer.pointer + offset, out_size); 347 offset += out_size; 348 } 349 350 /* 351 * Set fw_status for all the commands with a known format to be 352 * later interpreted by xlat_status(). 353 */ 354 if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR) 355 || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR))) 356 fw_status = *(u32 *) out_obj->buffer.pointer; 357 358 if (offset + in_buf.buffer.length < buf_len) { 359 if (i >= 1) { 360 /* 361 * status valid, return the number of bytes left 362 * unfilled in the output buffer 363 */ 364 rc = buf_len - offset - in_buf.buffer.length; 365 if (cmd_rc) 366 *cmd_rc = xlat_status(nvdimm, buf, cmd, 367 fw_status); 368 } else { 369 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 370 __func__, dimm_name, cmd_name, buf_len, 371 offset); 372 rc = -ENXIO; 373 } 374 } else { 375 rc = 0; 376 if (cmd_rc) 377 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); 378 } 379 380 out: 381 ACPI_FREE(out_obj); 382 383 return rc; 384 } 385 EXPORT_SYMBOL_GPL(acpi_nfit_ctl); 386 387 static const char *spa_type_name(u16 type) 388 { 389 static const char *to_name[] = { 390 [NFIT_SPA_VOLATILE] = "volatile", 391 [NFIT_SPA_PM] = "pmem", 392 [NFIT_SPA_DCR] = "dimm-control-region", 393 [NFIT_SPA_BDW] = "block-data-window", 394 [NFIT_SPA_VDISK] = "volatile-disk", 395 [NFIT_SPA_VCD] = "volatile-cd", 396 [NFIT_SPA_PDISK] = "persistent-disk", 397 [NFIT_SPA_PCD] = "persistent-cd", 398 399 }; 400 401 if (type > NFIT_SPA_PCD) 402 return "unknown"; 403 404 return to_name[type]; 405 } 406 407 int nfit_spa_type(struct acpi_nfit_system_address *spa) 408 { 409 int i; 410 411 for (i = 0; i < NFIT_UUID_MAX; i++) 412 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0) 413 return i; 414 return -1; 415 } 416 417 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 418 struct nfit_table_prev *prev, 419 struct acpi_nfit_system_address *spa) 420 { 421 struct device *dev = acpi_desc->dev; 422 struct nfit_spa *nfit_spa; 423 424 if (spa->header.length != sizeof(*spa)) 425 return false; 426 427 list_for_each_entry(nfit_spa, &prev->spas, list) { 428 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 429 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 430 return true; 431 } 432 } 433 434 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), 435 GFP_KERNEL); 436 if (!nfit_spa) 437 return false; 438 INIT_LIST_HEAD(&nfit_spa->list); 439 memcpy(nfit_spa->spa, spa, sizeof(*spa)); 440 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 441 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, 442 spa->range_index, 443 spa_type_name(nfit_spa_type(spa))); 444 return true; 445 } 446 447 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 448 struct nfit_table_prev *prev, 449 struct acpi_nfit_memory_map *memdev) 450 { 451 struct device *dev = acpi_desc->dev; 452 struct nfit_memdev *nfit_memdev; 453 454 if (memdev->header.length != sizeof(*memdev)) 455 return false; 456 457 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 458 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 459 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 460 return true; 461 } 462 463 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), 464 GFP_KERNEL); 465 if (!nfit_memdev) 466 return false; 467 INIT_LIST_HEAD(&nfit_memdev->list); 468 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); 469 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 470 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n", 471 __func__, memdev->device_handle, memdev->range_index, 472 memdev->region_index, memdev->flags); 473 return true; 474 } 475 476 /* 477 * An implementation may provide a truncated control region if no block windows 478 * are defined. 479 */ 480 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) 481 { 482 if (dcr->header.length < offsetof(struct acpi_nfit_control_region, 483 window_size)) 484 return 0; 485 if (dcr->windows) 486 return sizeof(*dcr); 487 return offsetof(struct acpi_nfit_control_region, window_size); 488 } 489 490 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 491 struct nfit_table_prev *prev, 492 struct acpi_nfit_control_region *dcr) 493 { 494 struct device *dev = acpi_desc->dev; 495 struct nfit_dcr *nfit_dcr; 496 497 if (!sizeof_dcr(dcr)) 498 return false; 499 500 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 501 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { 502 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 503 return true; 504 } 505 506 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), 507 GFP_KERNEL); 508 if (!nfit_dcr) 509 return false; 510 INIT_LIST_HEAD(&nfit_dcr->list); 511 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); 512 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 513 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, 514 dcr->region_index, dcr->windows); 515 return true; 516 } 517 518 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 519 struct nfit_table_prev *prev, 520 struct acpi_nfit_data_region *bdw) 521 { 522 struct device *dev = acpi_desc->dev; 523 struct nfit_bdw *nfit_bdw; 524 525 if (bdw->header.length != sizeof(*bdw)) 526 return false; 527 list_for_each_entry(nfit_bdw, &prev->bdws, list) 528 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 529 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 530 return true; 531 } 532 533 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), 534 GFP_KERNEL); 535 if (!nfit_bdw) 536 return false; 537 INIT_LIST_HEAD(&nfit_bdw->list); 538 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); 539 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 540 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, 541 bdw->region_index, bdw->windows); 542 return true; 543 } 544 545 static size_t sizeof_idt(struct acpi_nfit_interleave *idt) 546 { 547 if (idt->header.length < sizeof(*idt)) 548 return 0; 549 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); 550 } 551 552 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 553 struct nfit_table_prev *prev, 554 struct acpi_nfit_interleave *idt) 555 { 556 struct device *dev = acpi_desc->dev; 557 struct nfit_idt *nfit_idt; 558 559 if (!sizeof_idt(idt)) 560 return false; 561 562 list_for_each_entry(nfit_idt, &prev->idts, list) { 563 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) 564 continue; 565 566 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { 567 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 568 return true; 569 } 570 } 571 572 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), 573 GFP_KERNEL); 574 if (!nfit_idt) 575 return false; 576 INIT_LIST_HEAD(&nfit_idt->list); 577 memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); 578 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 579 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, 580 idt->interleave_index, idt->line_count); 581 return true; 582 } 583 584 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) 585 { 586 if (flush->header.length < sizeof(*flush)) 587 return 0; 588 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); 589 } 590 591 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 592 struct nfit_table_prev *prev, 593 struct acpi_nfit_flush_address *flush) 594 { 595 struct device *dev = acpi_desc->dev; 596 struct nfit_flush *nfit_flush; 597 598 if (!sizeof_flush(flush)) 599 return false; 600 601 list_for_each_entry(nfit_flush, &prev->flushes, list) { 602 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) 603 continue; 604 605 if (memcmp(nfit_flush->flush, flush, 606 sizeof_flush(flush)) == 0) { 607 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 608 return true; 609 } 610 } 611 612 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) 613 + sizeof_flush(flush), GFP_KERNEL); 614 if (!nfit_flush) 615 return false; 616 INIT_LIST_HEAD(&nfit_flush->list); 617 memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); 618 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 619 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, 620 flush->device_handle, flush->hint_count); 621 return true; 622 } 623 624 static void *add_table(struct acpi_nfit_desc *acpi_desc, 625 struct nfit_table_prev *prev, void *table, const void *end) 626 { 627 struct device *dev = acpi_desc->dev; 628 struct acpi_nfit_header *hdr; 629 void *err = ERR_PTR(-ENOMEM); 630 631 if (table >= end) 632 return NULL; 633 634 hdr = table; 635 if (!hdr->length) { 636 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 637 hdr->type); 638 return NULL; 639 } 640 641 switch (hdr->type) { 642 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 643 if (!add_spa(acpi_desc, prev, table)) 644 return err; 645 break; 646 case ACPI_NFIT_TYPE_MEMORY_MAP: 647 if (!add_memdev(acpi_desc, prev, table)) 648 return err; 649 break; 650 case ACPI_NFIT_TYPE_CONTROL_REGION: 651 if (!add_dcr(acpi_desc, prev, table)) 652 return err; 653 break; 654 case ACPI_NFIT_TYPE_DATA_REGION: 655 if (!add_bdw(acpi_desc, prev, table)) 656 return err; 657 break; 658 case ACPI_NFIT_TYPE_INTERLEAVE: 659 if (!add_idt(acpi_desc, prev, table)) 660 return err; 661 break; 662 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 663 if (!add_flush(acpi_desc, prev, table)) 664 return err; 665 break; 666 case ACPI_NFIT_TYPE_SMBIOS: 667 dev_dbg(dev, "%s: smbios\n", __func__); 668 break; 669 default: 670 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 671 break; 672 } 673 674 return table + hdr->length; 675 } 676 677 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 678 struct nfit_mem *nfit_mem) 679 { 680 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 681 u16 dcr = nfit_mem->dcr->region_index; 682 struct nfit_spa *nfit_spa; 683 684 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 685 u16 range_index = nfit_spa->spa->range_index; 686 int type = nfit_spa_type(nfit_spa->spa); 687 struct nfit_memdev *nfit_memdev; 688 689 if (type != NFIT_SPA_BDW) 690 continue; 691 692 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 693 if (nfit_memdev->memdev->range_index != range_index) 694 continue; 695 if (nfit_memdev->memdev->device_handle != device_handle) 696 continue; 697 if (nfit_memdev->memdev->region_index != dcr) 698 continue; 699 700 nfit_mem->spa_bdw = nfit_spa->spa; 701 return; 702 } 703 } 704 705 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 706 nfit_mem->spa_dcr->range_index); 707 nfit_mem->bdw = NULL; 708 } 709 710 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, 711 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 712 { 713 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 714 struct nfit_memdev *nfit_memdev; 715 struct nfit_bdw *nfit_bdw; 716 struct nfit_idt *nfit_idt; 717 u16 idt_idx, range_index; 718 719 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 720 if (nfit_bdw->bdw->region_index != dcr) 721 continue; 722 nfit_mem->bdw = nfit_bdw->bdw; 723 break; 724 } 725 726 if (!nfit_mem->bdw) 727 return; 728 729 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 730 731 if (!nfit_mem->spa_bdw) 732 return; 733 734 range_index = nfit_mem->spa_bdw->range_index; 735 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 736 if (nfit_memdev->memdev->range_index != range_index || 737 nfit_memdev->memdev->region_index != dcr) 738 continue; 739 nfit_mem->memdev_bdw = nfit_memdev->memdev; 740 idt_idx = nfit_memdev->memdev->interleave_index; 741 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 742 if (nfit_idt->idt->interleave_index != idt_idx) 743 continue; 744 nfit_mem->idt_bdw = nfit_idt->idt; 745 break; 746 } 747 break; 748 } 749 } 750 751 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, 752 struct acpi_nfit_system_address *spa) 753 { 754 struct nfit_mem *nfit_mem, *found; 755 struct nfit_memdev *nfit_memdev; 756 int type = spa ? nfit_spa_type(spa) : 0; 757 758 switch (type) { 759 case NFIT_SPA_DCR: 760 case NFIT_SPA_PM: 761 break; 762 default: 763 if (spa) 764 return 0; 765 } 766 767 /* 768 * This loop runs in two modes, when a dimm is mapped the loop 769 * adds memdev associations to an existing dimm, or creates a 770 * dimm. In the unmapped dimm case this loop sweeps for memdev 771 * instances with an invalid / zero range_index and adds those 772 * dimms without spa associations. 773 */ 774 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 775 struct nfit_flush *nfit_flush; 776 struct nfit_dcr *nfit_dcr; 777 u32 device_handle; 778 u16 dcr; 779 780 if (spa && nfit_memdev->memdev->range_index != spa->range_index) 781 continue; 782 if (!spa && nfit_memdev->memdev->range_index) 783 continue; 784 found = NULL; 785 dcr = nfit_memdev->memdev->region_index; 786 device_handle = nfit_memdev->memdev->device_handle; 787 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 788 if (__to_nfit_memdev(nfit_mem)->device_handle 789 == device_handle) { 790 found = nfit_mem; 791 break; 792 } 793 794 if (found) 795 nfit_mem = found; 796 else { 797 nfit_mem = devm_kzalloc(acpi_desc->dev, 798 sizeof(*nfit_mem), GFP_KERNEL); 799 if (!nfit_mem) 800 return -ENOMEM; 801 INIT_LIST_HEAD(&nfit_mem->list); 802 nfit_mem->acpi_desc = acpi_desc; 803 list_add(&nfit_mem->list, &acpi_desc->dimms); 804 } 805 806 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 807 if (nfit_dcr->dcr->region_index != dcr) 808 continue; 809 /* 810 * Record the control region for the dimm. For 811 * the ACPI 6.1 case, where there are separate 812 * control regions for the pmem vs blk 813 * interfaces, be sure to record the extended 814 * blk details. 815 */ 816 if (!nfit_mem->dcr) 817 nfit_mem->dcr = nfit_dcr->dcr; 818 else if (nfit_mem->dcr->windows == 0 819 && nfit_dcr->dcr->windows) 820 nfit_mem->dcr = nfit_dcr->dcr; 821 break; 822 } 823 824 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 825 struct acpi_nfit_flush_address *flush; 826 u16 i; 827 828 if (nfit_flush->flush->device_handle != device_handle) 829 continue; 830 nfit_mem->nfit_flush = nfit_flush; 831 flush = nfit_flush->flush; 832 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, 833 flush->hint_count 834 * sizeof(struct resource), GFP_KERNEL); 835 if (!nfit_mem->flush_wpq) 836 return -ENOMEM; 837 for (i = 0; i < flush->hint_count; i++) { 838 struct resource *res = &nfit_mem->flush_wpq[i]; 839 840 res->start = flush->hint_address[i]; 841 res->end = res->start + 8 - 1; 842 } 843 break; 844 } 845 846 if (dcr && !nfit_mem->dcr) { 847 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 848 spa->range_index, dcr); 849 return -ENODEV; 850 } 851 852 if (type == NFIT_SPA_DCR) { 853 struct nfit_idt *nfit_idt; 854 u16 idt_idx; 855 856 /* multiple dimms may share a SPA when interleaved */ 857 nfit_mem->spa_dcr = spa; 858 nfit_mem->memdev_dcr = nfit_memdev->memdev; 859 idt_idx = nfit_memdev->memdev->interleave_index; 860 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 861 if (nfit_idt->idt->interleave_index != idt_idx) 862 continue; 863 nfit_mem->idt_dcr = nfit_idt->idt; 864 break; 865 } 866 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); 867 } else if (type == NFIT_SPA_PM) { 868 /* 869 * A single dimm may belong to multiple SPA-PM 870 * ranges, record at least one in addition to 871 * any SPA-DCR range. 872 */ 873 nfit_mem->memdev_pmem = nfit_memdev->memdev; 874 } else 875 nfit_mem->memdev_dcr = nfit_memdev->memdev; 876 } 877 878 return 0; 879 } 880 881 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 882 { 883 struct nfit_mem *a = container_of(_a, typeof(*a), list); 884 struct nfit_mem *b = container_of(_b, typeof(*b), list); 885 u32 handleA, handleB; 886 887 handleA = __to_nfit_memdev(a)->device_handle; 888 handleB = __to_nfit_memdev(b)->device_handle; 889 if (handleA < handleB) 890 return -1; 891 else if (handleA > handleB) 892 return 1; 893 return 0; 894 } 895 896 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 897 { 898 struct nfit_spa *nfit_spa; 899 int rc; 900 901 902 /* 903 * For each SPA-DCR or SPA-PMEM address range find its 904 * corresponding MEMDEV(s). From each MEMDEV find the 905 * corresponding DCR. Then, if we're operating on a SPA-DCR, 906 * try to find a SPA-BDW and a corresponding BDW that references 907 * the DCR. Throw it all into an nfit_mem object. Note, that 908 * BDWs are optional. 909 */ 910 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 911 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); 912 if (rc) 913 return rc; 914 } 915 916 /* 917 * If a DIMM has failed to be mapped into SPA there will be no 918 * SPA entries above. Find and register all the unmapped DIMMs 919 * for reporting and recovery purposes. 920 */ 921 rc = __nfit_mem_init(acpi_desc, NULL); 922 if (rc) 923 return rc; 924 925 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 926 927 return 0; 928 } 929 930 static ssize_t revision_show(struct device *dev, 931 struct device_attribute *attr, char *buf) 932 { 933 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 934 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 935 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 936 937 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 938 } 939 static DEVICE_ATTR_RO(revision); 940 941 static ssize_t hw_error_scrub_show(struct device *dev, 942 struct device_attribute *attr, char *buf) 943 { 944 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 945 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 946 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 947 948 return sprintf(buf, "%d\n", acpi_desc->scrub_mode); 949 } 950 951 /* 952 * The 'hw_error_scrub' attribute can have the following values written to it: 953 * '0': Switch to the default mode where an exception will only insert 954 * the address of the memory error into the poison and badblocks lists. 955 * '1': Enable a full scrub to happen if an exception for a memory error is 956 * received. 957 */ 958 static ssize_t hw_error_scrub_store(struct device *dev, 959 struct device_attribute *attr, const char *buf, size_t size) 960 { 961 struct nvdimm_bus_descriptor *nd_desc; 962 ssize_t rc; 963 long val; 964 965 rc = kstrtol(buf, 0, &val); 966 if (rc) 967 return rc; 968 969 device_lock(dev); 970 nd_desc = dev_get_drvdata(dev); 971 if (nd_desc) { 972 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 973 974 switch (val) { 975 case HW_ERROR_SCRUB_ON: 976 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; 977 break; 978 case HW_ERROR_SCRUB_OFF: 979 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; 980 break; 981 default: 982 rc = -EINVAL; 983 break; 984 } 985 } 986 device_unlock(dev); 987 if (rc) 988 return rc; 989 return size; 990 } 991 static DEVICE_ATTR_RW(hw_error_scrub); 992 993 /* 994 * This shows the number of full Address Range Scrubs that have been 995 * completed since driver load time. Userspace can wait on this using 996 * select/poll etc. A '+' at the end indicates an ARS is in progress 997 */ 998 static ssize_t scrub_show(struct device *dev, 999 struct device_attribute *attr, char *buf) 1000 { 1001 struct nvdimm_bus_descriptor *nd_desc; 1002 ssize_t rc = -ENXIO; 1003 1004 device_lock(dev); 1005 nd_desc = dev_get_drvdata(dev); 1006 if (nd_desc) { 1007 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1008 1009 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1010 (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); 1011 } 1012 device_unlock(dev); 1013 return rc; 1014 } 1015 1016 static ssize_t scrub_store(struct device *dev, 1017 struct device_attribute *attr, const char *buf, size_t size) 1018 { 1019 struct nvdimm_bus_descriptor *nd_desc; 1020 ssize_t rc; 1021 long val; 1022 1023 rc = kstrtol(buf, 0, &val); 1024 if (rc) 1025 return rc; 1026 if (val != 1) 1027 return -EINVAL; 1028 1029 device_lock(dev); 1030 nd_desc = dev_get_drvdata(dev); 1031 if (nd_desc) { 1032 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1033 1034 rc = acpi_nfit_ars_rescan(acpi_desc); 1035 } 1036 device_unlock(dev); 1037 if (rc) 1038 return rc; 1039 return size; 1040 } 1041 static DEVICE_ATTR_RW(scrub); 1042 1043 static bool ars_supported(struct nvdimm_bus *nvdimm_bus) 1044 { 1045 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1046 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START 1047 | 1 << ND_CMD_ARS_STATUS; 1048 1049 return (nd_desc->cmd_mask & mask) == mask; 1050 } 1051 1052 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) 1053 { 1054 struct device *dev = container_of(kobj, struct device, kobj); 1055 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1056 1057 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) 1058 return 0; 1059 return a->mode; 1060 } 1061 1062 static struct attribute *acpi_nfit_attributes[] = { 1063 &dev_attr_revision.attr, 1064 &dev_attr_scrub.attr, 1065 &dev_attr_hw_error_scrub.attr, 1066 NULL, 1067 }; 1068 1069 static struct attribute_group acpi_nfit_attribute_group = { 1070 .name = "nfit", 1071 .attrs = acpi_nfit_attributes, 1072 .is_visible = nfit_visible, 1073 }; 1074 1075 static const struct attribute_group *acpi_nfit_attribute_groups[] = { 1076 &nvdimm_bus_attribute_group, 1077 &acpi_nfit_attribute_group, 1078 NULL, 1079 }; 1080 1081 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 1082 { 1083 struct nvdimm *nvdimm = to_nvdimm(dev); 1084 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1085 1086 return __to_nfit_memdev(nfit_mem); 1087 } 1088 1089 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 1090 { 1091 struct nvdimm *nvdimm = to_nvdimm(dev); 1092 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1093 1094 return nfit_mem->dcr; 1095 } 1096 1097 static ssize_t handle_show(struct device *dev, 1098 struct device_attribute *attr, char *buf) 1099 { 1100 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1101 1102 return sprintf(buf, "%#x\n", memdev->device_handle); 1103 } 1104 static DEVICE_ATTR_RO(handle); 1105 1106 static ssize_t phys_id_show(struct device *dev, 1107 struct device_attribute *attr, char *buf) 1108 { 1109 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1110 1111 return sprintf(buf, "%#x\n", memdev->physical_id); 1112 } 1113 static DEVICE_ATTR_RO(phys_id); 1114 1115 static ssize_t vendor_show(struct device *dev, 1116 struct device_attribute *attr, char *buf) 1117 { 1118 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1119 1120 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1121 } 1122 static DEVICE_ATTR_RO(vendor); 1123 1124 static ssize_t rev_id_show(struct device *dev, 1125 struct device_attribute *attr, char *buf) 1126 { 1127 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1128 1129 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1130 } 1131 static DEVICE_ATTR_RO(rev_id); 1132 1133 static ssize_t device_show(struct device *dev, 1134 struct device_attribute *attr, char *buf) 1135 { 1136 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1137 1138 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1139 } 1140 static DEVICE_ATTR_RO(device); 1141 1142 static ssize_t subsystem_vendor_show(struct device *dev, 1143 struct device_attribute *attr, char *buf) 1144 { 1145 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1146 1147 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1148 } 1149 static DEVICE_ATTR_RO(subsystem_vendor); 1150 1151 static ssize_t subsystem_rev_id_show(struct device *dev, 1152 struct device_attribute *attr, char *buf) 1153 { 1154 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1155 1156 return sprintf(buf, "0x%04x\n", 1157 be16_to_cpu(dcr->subsystem_revision_id)); 1158 } 1159 static DEVICE_ATTR_RO(subsystem_rev_id); 1160 1161 static ssize_t subsystem_device_show(struct device *dev, 1162 struct device_attribute *attr, char *buf) 1163 { 1164 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1165 1166 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1167 } 1168 static DEVICE_ATTR_RO(subsystem_device); 1169 1170 static int num_nvdimm_formats(struct nvdimm *nvdimm) 1171 { 1172 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1173 int formats = 0; 1174 1175 if (nfit_mem->memdev_pmem) 1176 formats++; 1177 if (nfit_mem->memdev_bdw) 1178 formats++; 1179 return formats; 1180 } 1181 1182 static ssize_t format_show(struct device *dev, 1183 struct device_attribute *attr, char *buf) 1184 { 1185 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1186 1187 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1188 } 1189 static DEVICE_ATTR_RO(format); 1190 1191 static ssize_t format1_show(struct device *dev, 1192 struct device_attribute *attr, char *buf) 1193 { 1194 u32 handle; 1195 ssize_t rc = -ENXIO; 1196 struct nfit_mem *nfit_mem; 1197 struct nfit_memdev *nfit_memdev; 1198 struct acpi_nfit_desc *acpi_desc; 1199 struct nvdimm *nvdimm = to_nvdimm(dev); 1200 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1201 1202 nfit_mem = nvdimm_provider_data(nvdimm); 1203 acpi_desc = nfit_mem->acpi_desc; 1204 handle = to_nfit_memdev(dev)->device_handle; 1205 1206 /* assumes DIMMs have at most 2 published interface codes */ 1207 mutex_lock(&acpi_desc->init_mutex); 1208 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1209 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1210 struct nfit_dcr *nfit_dcr; 1211 1212 if (memdev->device_handle != handle) 1213 continue; 1214 1215 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1216 if (nfit_dcr->dcr->region_index != memdev->region_index) 1217 continue; 1218 if (nfit_dcr->dcr->code == dcr->code) 1219 continue; 1220 rc = sprintf(buf, "0x%04x\n", 1221 le16_to_cpu(nfit_dcr->dcr->code)); 1222 break; 1223 } 1224 if (rc != ENXIO) 1225 break; 1226 } 1227 mutex_unlock(&acpi_desc->init_mutex); 1228 return rc; 1229 } 1230 static DEVICE_ATTR_RO(format1); 1231 1232 static ssize_t formats_show(struct device *dev, 1233 struct device_attribute *attr, char *buf) 1234 { 1235 struct nvdimm *nvdimm = to_nvdimm(dev); 1236 1237 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1238 } 1239 static DEVICE_ATTR_RO(formats); 1240 1241 static ssize_t serial_show(struct device *dev, 1242 struct device_attribute *attr, char *buf) 1243 { 1244 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1245 1246 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1247 } 1248 static DEVICE_ATTR_RO(serial); 1249 1250 static ssize_t family_show(struct device *dev, 1251 struct device_attribute *attr, char *buf) 1252 { 1253 struct nvdimm *nvdimm = to_nvdimm(dev); 1254 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1255 1256 if (nfit_mem->family < 0) 1257 return -ENXIO; 1258 return sprintf(buf, "%d\n", nfit_mem->family); 1259 } 1260 static DEVICE_ATTR_RO(family); 1261 1262 static ssize_t dsm_mask_show(struct device *dev, 1263 struct device_attribute *attr, char *buf) 1264 { 1265 struct nvdimm *nvdimm = to_nvdimm(dev); 1266 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1267 1268 if (nfit_mem->family < 0) 1269 return -ENXIO; 1270 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); 1271 } 1272 static DEVICE_ATTR_RO(dsm_mask); 1273 1274 static ssize_t flags_show(struct device *dev, 1275 struct device_attribute *attr, char *buf) 1276 { 1277 u16 flags = to_nfit_memdev(dev)->flags; 1278 1279 return sprintf(buf, "%s%s%s%s%s%s%s\n", 1280 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 1281 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 1282 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 1283 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 1284 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", 1285 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", 1286 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); 1287 } 1288 static DEVICE_ATTR_RO(flags); 1289 1290 static ssize_t id_show(struct device *dev, 1291 struct device_attribute *attr, char *buf) 1292 { 1293 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1294 1295 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) 1296 return sprintf(buf, "%04x-%02x-%04x-%08x\n", 1297 be16_to_cpu(dcr->vendor_id), 1298 dcr->manufacturing_location, 1299 be16_to_cpu(dcr->manufacturing_date), 1300 be32_to_cpu(dcr->serial_number)); 1301 else 1302 return sprintf(buf, "%04x-%08x\n", 1303 be16_to_cpu(dcr->vendor_id), 1304 be32_to_cpu(dcr->serial_number)); 1305 } 1306 static DEVICE_ATTR_RO(id); 1307 1308 static struct attribute *acpi_nfit_dimm_attributes[] = { 1309 &dev_attr_handle.attr, 1310 &dev_attr_phys_id.attr, 1311 &dev_attr_vendor.attr, 1312 &dev_attr_device.attr, 1313 &dev_attr_rev_id.attr, 1314 &dev_attr_subsystem_vendor.attr, 1315 &dev_attr_subsystem_device.attr, 1316 &dev_attr_subsystem_rev_id.attr, 1317 &dev_attr_format.attr, 1318 &dev_attr_formats.attr, 1319 &dev_attr_format1.attr, 1320 &dev_attr_serial.attr, 1321 &dev_attr_flags.attr, 1322 &dev_attr_id.attr, 1323 &dev_attr_family.attr, 1324 &dev_attr_dsm_mask.attr, 1325 NULL, 1326 }; 1327 1328 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 1329 struct attribute *a, int n) 1330 { 1331 struct device *dev = container_of(kobj, struct device, kobj); 1332 struct nvdimm *nvdimm = to_nvdimm(dev); 1333 1334 if (!to_nfit_dcr(dev)) { 1335 /* Without a dcr only the memdev attributes can be surfaced */ 1336 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr 1337 || a == &dev_attr_flags.attr 1338 || a == &dev_attr_family.attr 1339 || a == &dev_attr_dsm_mask.attr) 1340 return a->mode; 1341 return 0; 1342 } 1343 1344 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) 1345 return 0; 1346 return a->mode; 1347 } 1348 1349 static struct attribute_group acpi_nfit_dimm_attribute_group = { 1350 .name = "nfit", 1351 .attrs = acpi_nfit_dimm_attributes, 1352 .is_visible = acpi_nfit_dimm_attr_visible, 1353 }; 1354 1355 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 1356 &nvdimm_attribute_group, 1357 &nd_device_attribute_group, 1358 &acpi_nfit_dimm_attribute_group, 1359 NULL, 1360 }; 1361 1362 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 1363 u32 device_handle) 1364 { 1365 struct nfit_mem *nfit_mem; 1366 1367 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1368 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 1369 return nfit_mem->nvdimm; 1370 1371 return NULL; 1372 } 1373 1374 void __acpi_nvdimm_notify(struct device *dev, u32 event) 1375 { 1376 struct nfit_mem *nfit_mem; 1377 struct acpi_nfit_desc *acpi_desc; 1378 1379 dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__, 1380 event); 1381 1382 if (event != NFIT_NOTIFY_DIMM_HEALTH) { 1383 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), 1384 event); 1385 return; 1386 } 1387 1388 acpi_desc = dev_get_drvdata(dev->parent); 1389 if (!acpi_desc) 1390 return; 1391 1392 /* 1393 * If we successfully retrieved acpi_desc, then we know nfit_mem data 1394 * is still valid. 1395 */ 1396 nfit_mem = dev_get_drvdata(dev); 1397 if (nfit_mem && nfit_mem->flags_attr) 1398 sysfs_notify_dirent(nfit_mem->flags_attr); 1399 } 1400 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); 1401 1402 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) 1403 { 1404 struct acpi_device *adev = data; 1405 struct device *dev = &adev->dev; 1406 1407 device_lock(dev->parent); 1408 __acpi_nvdimm_notify(dev, event); 1409 device_unlock(dev->parent); 1410 } 1411 1412 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1413 struct nfit_mem *nfit_mem, u32 device_handle) 1414 { 1415 struct acpi_device *adev, *adev_dimm; 1416 struct device *dev = acpi_desc->dev; 1417 unsigned long dsm_mask; 1418 const u8 *uuid; 1419 int i; 1420 int family = -1; 1421 1422 /* nfit test assumes 1:1 relationship between commands and dsms */ 1423 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; 1424 nfit_mem->family = NVDIMM_FAMILY_INTEL; 1425 adev = to_acpi_dev(acpi_desc); 1426 if (!adev) 1427 return 0; 1428 1429 adev_dimm = acpi_find_child_device(adev, device_handle, false); 1430 nfit_mem->adev = adev_dimm; 1431 if (!adev_dimm) { 1432 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 1433 device_handle); 1434 return force_enable_dimms ? 0 : -ENODEV; 1435 } 1436 1437 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, 1438 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { 1439 dev_err(dev, "%s: notification registration failed\n", 1440 dev_name(&adev_dimm->dev)); 1441 return -ENXIO; 1442 } 1443 1444 /* 1445 * Until standardization materializes we need to consider 4 1446 * different command sets. Note, that checking for function0 (bit0) 1447 * tells us if any commands are reachable through this uuid. 1448 */ 1449 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++) 1450 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1451 if (family < 0 || i == default_dsm_family) 1452 family = i; 1453 1454 /* limit the supported commands to those that are publicly documented */ 1455 nfit_mem->family = family; 1456 if (override_dsm_mask && !disable_vendor_specific) 1457 dsm_mask = override_dsm_mask; 1458 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1459 dsm_mask = 0x3fe; 1460 if (disable_vendor_specific) 1461 dsm_mask &= ~(1 << ND_CMD_VENDOR); 1462 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { 1463 dsm_mask = 0x1c3c76; 1464 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { 1465 dsm_mask = 0x1fe; 1466 if (disable_vendor_specific) 1467 dsm_mask &= ~(1 << 8); 1468 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { 1469 dsm_mask = 0xffffffff; 1470 } else { 1471 dev_dbg(dev, "unknown dimm command family\n"); 1472 nfit_mem->family = -1; 1473 /* DSMs are optional, continue loading the driver... */ 1474 return 0; 1475 } 1476 1477 uuid = to_nfit_uuid(nfit_mem->family); 1478 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1479 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) 1480 set_bit(i, &nfit_mem->dsm_mask); 1481 1482 return 0; 1483 } 1484 1485 static void shutdown_dimm_notify(void *data) 1486 { 1487 struct acpi_nfit_desc *acpi_desc = data; 1488 struct nfit_mem *nfit_mem; 1489 1490 mutex_lock(&acpi_desc->init_mutex); 1491 /* 1492 * Clear out the nfit_mem->flags_attr and shut down dimm event 1493 * notifications. 1494 */ 1495 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1496 struct acpi_device *adev_dimm = nfit_mem->adev; 1497 1498 if (nfit_mem->flags_attr) { 1499 sysfs_put(nfit_mem->flags_attr); 1500 nfit_mem->flags_attr = NULL; 1501 } 1502 if (adev_dimm) 1503 acpi_remove_notify_handler(adev_dimm->handle, 1504 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); 1505 } 1506 mutex_unlock(&acpi_desc->init_mutex); 1507 } 1508 1509 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 1510 { 1511 struct nfit_mem *nfit_mem; 1512 int dimm_count = 0, rc; 1513 struct nvdimm *nvdimm; 1514 1515 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1516 struct acpi_nfit_flush_address *flush; 1517 unsigned long flags = 0, cmd_mask; 1518 struct nfit_memdev *nfit_memdev; 1519 u32 device_handle; 1520 u16 mem_flags; 1521 1522 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 1523 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 1524 if (nvdimm) { 1525 dimm_count++; 1526 continue; 1527 } 1528 1529 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 1530 set_bit(NDD_ALIASING, &flags); 1531 1532 /* collate flags across all memdevs for this dimm */ 1533 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1534 struct acpi_nfit_memory_map *dimm_memdev; 1535 1536 dimm_memdev = __to_nfit_memdev(nfit_mem); 1537 if (dimm_memdev->device_handle 1538 != nfit_memdev->memdev->device_handle) 1539 continue; 1540 dimm_memdev->flags |= nfit_memdev->memdev->flags; 1541 } 1542 1543 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 1544 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 1545 set_bit(NDD_UNARMED, &flags); 1546 1547 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 1548 if (rc) 1549 continue; 1550 1551 /* 1552 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL 1553 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the 1554 * userspace interface. 1555 */ 1556 cmd_mask = 1UL << ND_CMD_CALL; 1557 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) 1558 cmd_mask |= nfit_mem->dsm_mask; 1559 1560 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush 1561 : NULL; 1562 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 1563 acpi_nfit_dimm_attribute_groups, 1564 flags, cmd_mask, flush ? flush->hint_count : 0, 1565 nfit_mem->flush_wpq); 1566 if (!nvdimm) 1567 return -ENOMEM; 1568 1569 nfit_mem->nvdimm = nvdimm; 1570 dimm_count++; 1571 1572 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 1573 continue; 1574 1575 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", 1576 nvdimm_name(nvdimm), 1577 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 1578 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 1579 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 1580 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", 1581 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); 1582 1583 } 1584 1585 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 1586 if (rc) 1587 return rc; 1588 1589 /* 1590 * Now that dimms are successfully registered, and async registration 1591 * is flushed, attempt to enable event notification. 1592 */ 1593 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1594 struct kernfs_node *nfit_kernfs; 1595 1596 nvdimm = nfit_mem->nvdimm; 1597 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 1598 if (nfit_kernfs) 1599 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 1600 "flags"); 1601 sysfs_put(nfit_kernfs); 1602 if (!nfit_mem->flags_attr) 1603 dev_warn(acpi_desc->dev, "%s: notifications disabled\n", 1604 nvdimm_name(nvdimm)); 1605 } 1606 1607 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, 1608 acpi_desc); 1609 } 1610 1611 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 1612 { 1613 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1614 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS); 1615 struct acpi_device *adev; 1616 int i; 1617 1618 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; 1619 adev = to_acpi_dev(acpi_desc); 1620 if (!adev) 1621 return; 1622 1623 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) 1624 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) 1625 set_bit(i, &nd_desc->cmd_mask); 1626 } 1627 1628 static ssize_t range_index_show(struct device *dev, 1629 struct device_attribute *attr, char *buf) 1630 { 1631 struct nd_region *nd_region = to_nd_region(dev); 1632 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 1633 1634 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 1635 } 1636 static DEVICE_ATTR_RO(range_index); 1637 1638 static struct attribute *acpi_nfit_region_attributes[] = { 1639 &dev_attr_range_index.attr, 1640 NULL, 1641 }; 1642 1643 static struct attribute_group acpi_nfit_region_attribute_group = { 1644 .name = "nfit", 1645 .attrs = acpi_nfit_region_attributes, 1646 }; 1647 1648 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 1649 &nd_region_attribute_group, 1650 &nd_mapping_attribute_group, 1651 &nd_device_attribute_group, 1652 &nd_numa_attribute_group, 1653 &acpi_nfit_region_attribute_group, 1654 NULL, 1655 }; 1656 1657 /* enough info to uniquely specify an interleave set */ 1658 struct nfit_set_info { 1659 struct nfit_set_info_map { 1660 u64 region_offset; 1661 u32 serial_number; 1662 u32 pad; 1663 } mapping[0]; 1664 }; 1665 1666 static size_t sizeof_nfit_set_info(int num_mappings) 1667 { 1668 return sizeof(struct nfit_set_info) 1669 + num_mappings * sizeof(struct nfit_set_info_map); 1670 } 1671 1672 static int cmp_map_compat(const void *m0, const void *m1) 1673 { 1674 const struct nfit_set_info_map *map0 = m0; 1675 const struct nfit_set_info_map *map1 = m1; 1676 1677 return memcmp(&map0->region_offset, &map1->region_offset, 1678 sizeof(u64)); 1679 } 1680 1681 static int cmp_map(const void *m0, const void *m1) 1682 { 1683 const struct nfit_set_info_map *map0 = m0; 1684 const struct nfit_set_info_map *map1 = m1; 1685 1686 if (map0->region_offset < map1->region_offset) 1687 return -1; 1688 else if (map0->region_offset > map1->region_offset) 1689 return 1; 1690 return 0; 1691 } 1692 1693 /* Retrieve the nth entry referencing this spa */ 1694 static struct acpi_nfit_memory_map *memdev_from_spa( 1695 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 1696 { 1697 struct nfit_memdev *nfit_memdev; 1698 1699 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 1700 if (nfit_memdev->memdev->range_index == range_index) 1701 if (n-- == 0) 1702 return nfit_memdev->memdev; 1703 return NULL; 1704 } 1705 1706 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 1707 struct nd_region_desc *ndr_desc, 1708 struct acpi_nfit_system_address *spa) 1709 { 1710 int i, spa_type = nfit_spa_type(spa); 1711 struct device *dev = acpi_desc->dev; 1712 struct nd_interleave_set *nd_set; 1713 u16 nr = ndr_desc->num_mappings; 1714 struct nfit_set_info *info; 1715 1716 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE) 1717 /* pass */; 1718 else 1719 return 0; 1720 1721 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 1722 if (!nd_set) 1723 return -ENOMEM; 1724 1725 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 1726 if (!info) 1727 return -ENOMEM; 1728 for (i = 0; i < nr; i++) { 1729 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 1730 struct nfit_set_info_map *map = &info->mapping[i]; 1731 struct nvdimm *nvdimm = mapping->nvdimm; 1732 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1733 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 1734 spa->range_index, i); 1735 1736 if (!memdev || !nfit_mem->dcr) { 1737 dev_err(dev, "%s: failed to find DCR\n", __func__); 1738 return -ENODEV; 1739 } 1740 1741 map->region_offset = memdev->region_offset; 1742 map->serial_number = nfit_mem->dcr->serial_number; 1743 } 1744 1745 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 1746 cmp_map, NULL); 1747 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 1748 1749 /* support namespaces created with the wrong sort order */ 1750 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 1751 cmp_map_compat, NULL); 1752 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 1753 1754 ndr_desc->nd_set = nd_set; 1755 devm_kfree(dev, info); 1756 1757 return 0; 1758 } 1759 1760 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 1761 { 1762 struct acpi_nfit_interleave *idt = mmio->idt; 1763 u32 sub_line_offset, line_index, line_offset; 1764 u64 line_no, table_skip_count, table_offset; 1765 1766 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 1767 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 1768 line_offset = idt->line_offset[line_index] 1769 * mmio->line_size; 1770 table_offset = table_skip_count * mmio->table_size; 1771 1772 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 1773 } 1774 1775 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 1776 { 1777 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1778 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 1779 const u32 STATUS_MASK = 0x80000037; 1780 1781 if (mmio->num_lines) 1782 offset = to_interleave_offset(offset, mmio); 1783 1784 return readl(mmio->addr.base + offset) & STATUS_MASK; 1785 } 1786 1787 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 1788 resource_size_t dpa, unsigned int len, unsigned int write) 1789 { 1790 u64 cmd, offset; 1791 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1792 1793 enum { 1794 BCW_OFFSET_MASK = (1ULL << 48)-1, 1795 BCW_LEN_SHIFT = 48, 1796 BCW_LEN_MASK = (1ULL << 8) - 1, 1797 BCW_CMD_SHIFT = 56, 1798 }; 1799 1800 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 1801 len = len >> L1_CACHE_SHIFT; 1802 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 1803 cmd |= ((u64) write) << BCW_CMD_SHIFT; 1804 1805 offset = nfit_blk->cmd_offset + mmio->size * bw; 1806 if (mmio->num_lines) 1807 offset = to_interleave_offset(offset, mmio); 1808 1809 writeq(cmd, mmio->addr.base + offset); 1810 nvdimm_flush(nfit_blk->nd_region); 1811 1812 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) 1813 readq(mmio->addr.base + offset); 1814 } 1815 1816 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 1817 resource_size_t dpa, void *iobuf, size_t len, int rw, 1818 unsigned int lane) 1819 { 1820 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1821 unsigned int copied = 0; 1822 u64 base_offset; 1823 int rc; 1824 1825 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 1826 + lane * mmio->size; 1827 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 1828 while (len) { 1829 unsigned int c; 1830 u64 offset; 1831 1832 if (mmio->num_lines) { 1833 u32 line_offset; 1834 1835 offset = to_interleave_offset(base_offset + copied, 1836 mmio); 1837 div_u64_rem(offset, mmio->line_size, &line_offset); 1838 c = min_t(size_t, len, mmio->line_size - line_offset); 1839 } else { 1840 offset = base_offset + nfit_blk->bdw_offset; 1841 c = len; 1842 } 1843 1844 if (rw) 1845 memcpy_to_pmem(mmio->addr.aperture + offset, 1846 iobuf + copied, c); 1847 else { 1848 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) 1849 mmio_flush_range((void __force *) 1850 mmio->addr.aperture + offset, c); 1851 1852 memcpy(iobuf + copied, mmio->addr.aperture + offset, c); 1853 } 1854 1855 copied += c; 1856 len -= c; 1857 } 1858 1859 if (rw) 1860 nvdimm_flush(nfit_blk->nd_region); 1861 1862 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 1863 return rc; 1864 } 1865 1866 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 1867 resource_size_t dpa, void *iobuf, u64 len, int rw) 1868 { 1869 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 1870 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1871 struct nd_region *nd_region = nfit_blk->nd_region; 1872 unsigned int lane, copied = 0; 1873 int rc = 0; 1874 1875 lane = nd_region_acquire_lane(nd_region); 1876 while (len) { 1877 u64 c = min(len, mmio->size); 1878 1879 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 1880 iobuf + copied, c, rw, lane); 1881 if (rc) 1882 break; 1883 1884 copied += c; 1885 len -= c; 1886 } 1887 nd_region_release_lane(nd_region, lane); 1888 1889 return rc; 1890 } 1891 1892 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 1893 struct acpi_nfit_interleave *idt, u16 interleave_ways) 1894 { 1895 if (idt) { 1896 mmio->num_lines = idt->line_count; 1897 mmio->line_size = idt->line_size; 1898 if (interleave_ways == 0) 1899 return -ENXIO; 1900 mmio->table_size = mmio->num_lines * interleave_ways 1901 * mmio->line_size; 1902 } 1903 1904 return 0; 1905 } 1906 1907 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 1908 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 1909 { 1910 struct nd_cmd_dimm_flags flags; 1911 int rc; 1912 1913 memset(&flags, 0, sizeof(flags)); 1914 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 1915 sizeof(flags), NULL); 1916 1917 if (rc >= 0 && flags.status == 0) 1918 nfit_blk->dimm_flags = flags.flags; 1919 else if (rc == -ENOTTY) { 1920 /* fall back to a conservative default */ 1921 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; 1922 rc = 0; 1923 } else 1924 rc = -ENXIO; 1925 1926 return rc; 1927 } 1928 1929 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 1930 struct device *dev) 1931 { 1932 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1933 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1934 struct nfit_blk_mmio *mmio; 1935 struct nfit_blk *nfit_blk; 1936 struct nfit_mem *nfit_mem; 1937 struct nvdimm *nvdimm; 1938 int rc; 1939 1940 nvdimm = nd_blk_region_to_dimm(ndbr); 1941 nfit_mem = nvdimm_provider_data(nvdimm); 1942 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 1943 dev_dbg(dev, "%s: missing%s%s%s\n", __func__, 1944 nfit_mem ? "" : " nfit_mem", 1945 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 1946 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 1947 return -ENXIO; 1948 } 1949 1950 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 1951 if (!nfit_blk) 1952 return -ENOMEM; 1953 nd_blk_region_set_provider_data(ndbr, nfit_blk); 1954 nfit_blk->nd_region = to_nd_region(dev); 1955 1956 /* map block aperture memory */ 1957 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 1958 mmio = &nfit_blk->mmio[BDW]; 1959 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, 1960 nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM); 1961 if (!mmio->addr.base) { 1962 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, 1963 nvdimm_name(nvdimm)); 1964 return -ENOMEM; 1965 } 1966 mmio->size = nfit_mem->bdw->size; 1967 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 1968 mmio->idt = nfit_mem->idt_bdw; 1969 mmio->spa = nfit_mem->spa_bdw; 1970 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 1971 nfit_mem->memdev_bdw->interleave_ways); 1972 if (rc) { 1973 dev_dbg(dev, "%s: %s failed to init bdw interleave\n", 1974 __func__, nvdimm_name(nvdimm)); 1975 return rc; 1976 } 1977 1978 /* map block control memory */ 1979 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 1980 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 1981 mmio = &nfit_blk->mmio[DCR]; 1982 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, 1983 nfit_mem->spa_dcr->length); 1984 if (!mmio->addr.base) { 1985 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, 1986 nvdimm_name(nvdimm)); 1987 return -ENOMEM; 1988 } 1989 mmio->size = nfit_mem->dcr->window_size; 1990 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 1991 mmio->idt = nfit_mem->idt_dcr; 1992 mmio->spa = nfit_mem->spa_dcr; 1993 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 1994 nfit_mem->memdev_dcr->interleave_ways); 1995 if (rc) { 1996 dev_dbg(dev, "%s: %s failed to init dcr interleave\n", 1997 __func__, nvdimm_name(nvdimm)); 1998 return rc; 1999 } 2000 2001 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 2002 if (rc < 0) { 2003 dev_dbg(dev, "%s: %s failed get DIMM flags\n", 2004 __func__, nvdimm_name(nvdimm)); 2005 return rc; 2006 } 2007 2008 if (nvdimm_has_flush(nfit_blk->nd_region) < 0) 2009 dev_warn(dev, "unable to guarantee persistence of writes\n"); 2010 2011 if (mmio->line_size == 0) 2012 return 0; 2013 2014 if ((u32) nfit_blk->cmd_offset % mmio->line_size 2015 + 8 > mmio->line_size) { 2016 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 2017 return -ENXIO; 2018 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 2019 + 8 > mmio->line_size) { 2020 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 2021 return -ENXIO; 2022 } 2023 2024 return 0; 2025 } 2026 2027 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 2028 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 2029 { 2030 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2031 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2032 int cmd_rc, rc; 2033 2034 cmd->address = spa->address; 2035 cmd->length = spa->length; 2036 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 2037 sizeof(*cmd), &cmd_rc); 2038 if (rc < 0) 2039 return rc; 2040 return cmd_rc; 2041 } 2042 2043 static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) 2044 { 2045 int rc; 2046 int cmd_rc; 2047 struct nd_cmd_ars_start ars_start; 2048 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2049 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2050 2051 memset(&ars_start, 0, sizeof(ars_start)); 2052 ars_start.address = spa->address; 2053 ars_start.length = spa->length; 2054 if (nfit_spa_type(spa) == NFIT_SPA_PM) 2055 ars_start.type = ND_ARS_PERSISTENT; 2056 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) 2057 ars_start.type = ND_ARS_VOLATILE; 2058 else 2059 return -ENOTTY; 2060 2061 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2062 sizeof(ars_start), &cmd_rc); 2063 2064 if (rc < 0) 2065 return rc; 2066 return cmd_rc; 2067 } 2068 2069 static int ars_continue(struct acpi_nfit_desc *acpi_desc) 2070 { 2071 int rc, cmd_rc; 2072 struct nd_cmd_ars_start ars_start; 2073 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2074 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2075 2076 memset(&ars_start, 0, sizeof(ars_start)); 2077 ars_start.address = ars_status->restart_address; 2078 ars_start.length = ars_status->restart_length; 2079 ars_start.type = ars_status->type; 2080 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2081 sizeof(ars_start), &cmd_rc); 2082 if (rc < 0) 2083 return rc; 2084 return cmd_rc; 2085 } 2086 2087 static int ars_get_status(struct acpi_nfit_desc *acpi_desc) 2088 { 2089 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2090 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2091 int rc, cmd_rc; 2092 2093 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, 2094 acpi_desc->ars_status_size, &cmd_rc); 2095 if (rc < 0) 2096 return rc; 2097 return cmd_rc; 2098 } 2099 2100 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc, 2101 struct nd_cmd_ars_status *ars_status) 2102 { 2103 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; 2104 int rc; 2105 u32 i; 2106 2107 /* 2108 * First record starts at 44 byte offset from the start of the 2109 * payload. 2110 */ 2111 if (ars_status->out_length < 44) 2112 return 0; 2113 for (i = 0; i < ars_status->num_records; i++) { 2114 /* only process full records */ 2115 if (ars_status->out_length 2116 < 44 + sizeof(struct nd_ars_record) * (i + 1)) 2117 break; 2118 rc = nvdimm_bus_add_poison(nvdimm_bus, 2119 ars_status->records[i].err_address, 2120 ars_status->records[i].length); 2121 if (rc) 2122 return rc; 2123 } 2124 if (i < ars_status->num_records) 2125 dev_warn(acpi_desc->dev, "detected truncated ars results\n"); 2126 2127 return 0; 2128 } 2129 2130 static void acpi_nfit_remove_resource(void *data) 2131 { 2132 struct resource *res = data; 2133 2134 remove_resource(res); 2135 } 2136 2137 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, 2138 struct nd_region_desc *ndr_desc) 2139 { 2140 struct resource *res, *nd_res = ndr_desc->res; 2141 int is_pmem, ret; 2142 2143 /* No operation if the region is already registered as PMEM */ 2144 is_pmem = region_intersects(nd_res->start, resource_size(nd_res), 2145 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); 2146 if (is_pmem == REGION_INTERSECTS) 2147 return 0; 2148 2149 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); 2150 if (!res) 2151 return -ENOMEM; 2152 2153 res->name = "Persistent Memory"; 2154 res->start = nd_res->start; 2155 res->end = nd_res->end; 2156 res->flags = IORESOURCE_MEM; 2157 res->desc = IORES_DESC_PERSISTENT_MEMORY; 2158 2159 ret = insert_resource(&iomem_resource, res); 2160 if (ret) 2161 return ret; 2162 2163 ret = devm_add_action_or_reset(acpi_desc->dev, 2164 acpi_nfit_remove_resource, 2165 res); 2166 if (ret) 2167 return ret; 2168 2169 return 0; 2170 } 2171 2172 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 2173 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, 2174 struct acpi_nfit_memory_map *memdev, 2175 struct nfit_spa *nfit_spa) 2176 { 2177 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 2178 memdev->device_handle); 2179 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2180 struct nd_blk_region_desc *ndbr_desc; 2181 struct nfit_mem *nfit_mem; 2182 int blk_valid = 0; 2183 2184 if (!nvdimm) { 2185 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 2186 spa->range_index, memdev->device_handle); 2187 return -ENODEV; 2188 } 2189 2190 mapping->nvdimm = nvdimm; 2191 switch (nfit_spa_type(spa)) { 2192 case NFIT_SPA_PM: 2193 case NFIT_SPA_VOLATILE: 2194 mapping->start = memdev->address; 2195 mapping->size = memdev->region_size; 2196 break; 2197 case NFIT_SPA_DCR: 2198 nfit_mem = nvdimm_provider_data(nvdimm); 2199 if (!nfit_mem || !nfit_mem->bdw) { 2200 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 2201 spa->range_index, nvdimm_name(nvdimm)); 2202 } else { 2203 mapping->size = nfit_mem->bdw->capacity; 2204 mapping->start = nfit_mem->bdw->start_address; 2205 ndr_desc->num_lanes = nfit_mem->bdw->windows; 2206 blk_valid = 1; 2207 } 2208 2209 ndr_desc->mapping = mapping; 2210 ndr_desc->num_mappings = blk_valid; 2211 ndbr_desc = to_blk_region_desc(ndr_desc); 2212 ndbr_desc->enable = acpi_nfit_blk_region_enable; 2213 ndbr_desc->do_io = acpi_desc->blk_do_io; 2214 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 2215 ndr_desc); 2216 if (!nfit_spa->nd_region) 2217 return -ENOMEM; 2218 break; 2219 } 2220 2221 return 0; 2222 } 2223 2224 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) 2225 { 2226 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2227 nfit_spa_type(spa) == NFIT_SPA_VCD || 2228 nfit_spa_type(spa) == NFIT_SPA_PDISK || 2229 nfit_spa_type(spa) == NFIT_SPA_PCD); 2230 } 2231 2232 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 2233 struct nfit_spa *nfit_spa) 2234 { 2235 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; 2236 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2237 struct nd_blk_region_desc ndbr_desc; 2238 struct nd_region_desc *ndr_desc; 2239 struct nfit_memdev *nfit_memdev; 2240 struct nvdimm_bus *nvdimm_bus; 2241 struct resource res; 2242 int count = 0, rc; 2243 2244 if (nfit_spa->nd_region) 2245 return 0; 2246 2247 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { 2248 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", 2249 __func__); 2250 return 0; 2251 } 2252 2253 memset(&res, 0, sizeof(res)); 2254 memset(&mappings, 0, sizeof(mappings)); 2255 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 2256 res.start = spa->address; 2257 res.end = res.start + spa->length - 1; 2258 ndr_desc = &ndbr_desc.ndr_desc; 2259 ndr_desc->res = &res; 2260 ndr_desc->provider_data = nfit_spa; 2261 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 2262 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 2263 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 2264 spa->proximity_domain); 2265 else 2266 ndr_desc->numa_node = NUMA_NO_NODE; 2267 2268 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2269 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 2270 struct nd_mapping_desc *mapping; 2271 2272 if (memdev->range_index != spa->range_index) 2273 continue; 2274 if (count >= ND_MAX_MAPPINGS) { 2275 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 2276 spa->range_index, ND_MAX_MAPPINGS); 2277 return -ENXIO; 2278 } 2279 mapping = &mappings[count++]; 2280 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, 2281 memdev, nfit_spa); 2282 if (rc) 2283 goto out; 2284 } 2285 2286 ndr_desc->mapping = mappings; 2287 ndr_desc->num_mappings = count; 2288 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2289 if (rc) 2290 goto out; 2291 2292 nvdimm_bus = acpi_desc->nvdimm_bus; 2293 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 2294 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); 2295 if (rc) { 2296 dev_warn(acpi_desc->dev, 2297 "failed to insert pmem resource to iomem: %d\n", 2298 rc); 2299 goto out; 2300 } 2301 2302 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2303 ndr_desc); 2304 if (!nfit_spa->nd_region) 2305 rc = -ENOMEM; 2306 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { 2307 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, 2308 ndr_desc); 2309 if (!nfit_spa->nd_region) 2310 rc = -ENOMEM; 2311 } else if (nfit_spa_is_virtual(spa)) { 2312 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2313 ndr_desc); 2314 if (!nfit_spa->nd_region) 2315 rc = -ENOMEM; 2316 } 2317 2318 out: 2319 if (rc) 2320 dev_err(acpi_desc->dev, "failed to register spa range %d\n", 2321 nfit_spa->spa->range_index); 2322 return rc; 2323 } 2324 2325 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc, 2326 u32 max_ars) 2327 { 2328 struct device *dev = acpi_desc->dev; 2329 struct nd_cmd_ars_status *ars_status; 2330 2331 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) { 2332 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size); 2333 return 0; 2334 } 2335 2336 if (acpi_desc->ars_status) 2337 devm_kfree(dev, acpi_desc->ars_status); 2338 acpi_desc->ars_status = NULL; 2339 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL); 2340 if (!ars_status) 2341 return -ENOMEM; 2342 acpi_desc->ars_status = ars_status; 2343 acpi_desc->ars_status_size = max_ars; 2344 return 0; 2345 } 2346 2347 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc, 2348 struct nfit_spa *nfit_spa) 2349 { 2350 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2351 int rc; 2352 2353 if (!nfit_spa->max_ars) { 2354 struct nd_cmd_ars_cap ars_cap; 2355 2356 memset(&ars_cap, 0, sizeof(ars_cap)); 2357 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); 2358 if (rc < 0) 2359 return rc; 2360 nfit_spa->max_ars = ars_cap.max_ars_out; 2361 nfit_spa->clear_err_unit = ars_cap.clear_err_unit; 2362 /* check that the supported scrub types match the spa type */ 2363 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE && 2364 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0) 2365 return -ENOTTY; 2366 else if (nfit_spa_type(spa) == NFIT_SPA_PM && 2367 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0) 2368 return -ENOTTY; 2369 } 2370 2371 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars)) 2372 return -ENOMEM; 2373 2374 rc = ars_get_status(acpi_desc); 2375 if (rc < 0 && rc != -ENOSPC) 2376 return rc; 2377 2378 if (ars_status_process_records(acpi_desc, acpi_desc->ars_status)) 2379 return -ENOMEM; 2380 2381 return 0; 2382 } 2383 2384 static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc, 2385 struct nfit_spa *nfit_spa) 2386 { 2387 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2388 unsigned int overflow_retry = scrub_overflow_abort; 2389 u64 init_ars_start = 0, init_ars_len = 0; 2390 struct device *dev = acpi_desc->dev; 2391 unsigned int tmo = scrub_timeout; 2392 int rc; 2393 2394 if (!nfit_spa->ars_required || !nfit_spa->nd_region) 2395 return; 2396 2397 rc = ars_start(acpi_desc, nfit_spa); 2398 /* 2399 * If we timed out the initial scan we'll still be busy here, 2400 * and will wait another timeout before giving up permanently. 2401 */ 2402 if (rc < 0 && rc != -EBUSY) 2403 return; 2404 2405 do { 2406 u64 ars_start, ars_len; 2407 2408 if (acpi_desc->cancel) 2409 break; 2410 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); 2411 if (rc == -ENOTTY) 2412 break; 2413 if (rc == -EBUSY && !tmo) { 2414 dev_warn(dev, "range %d ars timeout, aborting\n", 2415 spa->range_index); 2416 break; 2417 } 2418 2419 if (rc == -EBUSY) { 2420 /* 2421 * Note, entries may be appended to the list 2422 * while the lock is dropped, but the workqueue 2423 * being active prevents entries being deleted / 2424 * freed. 2425 */ 2426 mutex_unlock(&acpi_desc->init_mutex); 2427 ssleep(1); 2428 tmo--; 2429 mutex_lock(&acpi_desc->init_mutex); 2430 continue; 2431 } 2432 2433 /* we got some results, but there are more pending... */ 2434 if (rc == -ENOSPC && overflow_retry--) { 2435 if (!init_ars_len) { 2436 init_ars_len = acpi_desc->ars_status->length; 2437 init_ars_start = acpi_desc->ars_status->address; 2438 } 2439 rc = ars_continue(acpi_desc); 2440 } 2441 2442 if (rc < 0) { 2443 dev_warn(dev, "range %d ars continuation failed\n", 2444 spa->range_index); 2445 break; 2446 } 2447 2448 if (init_ars_len) { 2449 ars_start = init_ars_start; 2450 ars_len = init_ars_len; 2451 } else { 2452 ars_start = acpi_desc->ars_status->address; 2453 ars_len = acpi_desc->ars_status->length; 2454 } 2455 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n", 2456 spa->range_index, ars_start, ars_len); 2457 /* notify the region about new poison entries */ 2458 nvdimm_region_notify(nfit_spa->nd_region, 2459 NVDIMM_REVALIDATE_POISON); 2460 break; 2461 } while (1); 2462 } 2463 2464 static void acpi_nfit_scrub(struct work_struct *work) 2465 { 2466 struct device *dev; 2467 u64 init_scrub_length = 0; 2468 struct nfit_spa *nfit_spa; 2469 u64 init_scrub_address = 0; 2470 bool init_ars_done = false; 2471 struct acpi_nfit_desc *acpi_desc; 2472 unsigned int tmo = scrub_timeout; 2473 unsigned int overflow_retry = scrub_overflow_abort; 2474 2475 acpi_desc = container_of(work, typeof(*acpi_desc), work); 2476 dev = acpi_desc->dev; 2477 2478 /* 2479 * We scrub in 2 phases. The first phase waits for any platform 2480 * firmware initiated scrubs to complete and then we go search for the 2481 * affected spa regions to mark them scanned. In the second phase we 2482 * initiate a directed scrub for every range that was not scrubbed in 2483 * phase 1. If we're called for a 'rescan', we harmlessly pass through 2484 * the first phase, but really only care about running phase 2, where 2485 * regions can be notified of new poison. 2486 */ 2487 2488 /* process platform firmware initiated scrubs */ 2489 retry: 2490 mutex_lock(&acpi_desc->init_mutex); 2491 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2492 struct nd_cmd_ars_status *ars_status; 2493 struct acpi_nfit_system_address *spa; 2494 u64 ars_start, ars_len; 2495 int rc; 2496 2497 if (acpi_desc->cancel) 2498 break; 2499 2500 if (nfit_spa->nd_region) 2501 continue; 2502 2503 if (init_ars_done) { 2504 /* 2505 * No need to re-query, we're now just 2506 * reconciling all the ranges covered by the 2507 * initial scrub 2508 */ 2509 rc = 0; 2510 } else 2511 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); 2512 2513 if (rc == -ENOTTY) { 2514 /* no ars capability, just register spa and move on */ 2515 acpi_nfit_register_region(acpi_desc, nfit_spa); 2516 continue; 2517 } 2518 2519 if (rc == -EBUSY && !tmo) { 2520 /* fallthrough to directed scrub in phase 2 */ 2521 dev_warn(dev, "timeout awaiting ars results, continuing...\n"); 2522 break; 2523 } else if (rc == -EBUSY) { 2524 mutex_unlock(&acpi_desc->init_mutex); 2525 ssleep(1); 2526 tmo--; 2527 goto retry; 2528 } 2529 2530 /* we got some results, but there are more pending... */ 2531 if (rc == -ENOSPC && overflow_retry--) { 2532 ars_status = acpi_desc->ars_status; 2533 /* 2534 * Record the original scrub range, so that we 2535 * can recall all the ranges impacted by the 2536 * initial scrub. 2537 */ 2538 if (!init_scrub_length) { 2539 init_scrub_length = ars_status->length; 2540 init_scrub_address = ars_status->address; 2541 } 2542 rc = ars_continue(acpi_desc); 2543 if (rc == 0) { 2544 mutex_unlock(&acpi_desc->init_mutex); 2545 goto retry; 2546 } 2547 } 2548 2549 if (rc < 0) { 2550 /* 2551 * Initial scrub failed, we'll give it one more 2552 * try below... 2553 */ 2554 break; 2555 } 2556 2557 /* We got some final results, record completed ranges */ 2558 ars_status = acpi_desc->ars_status; 2559 if (init_scrub_length) { 2560 ars_start = init_scrub_address; 2561 ars_len = ars_start + init_scrub_length; 2562 } else { 2563 ars_start = ars_status->address; 2564 ars_len = ars_status->length; 2565 } 2566 spa = nfit_spa->spa; 2567 2568 if (!init_ars_done) { 2569 init_ars_done = true; 2570 dev_dbg(dev, "init scrub %#llx + %#llx complete\n", 2571 ars_start, ars_len); 2572 } 2573 if (ars_start <= spa->address && ars_start + ars_len 2574 >= spa->address + spa->length) 2575 acpi_nfit_register_region(acpi_desc, nfit_spa); 2576 } 2577 2578 /* 2579 * For all the ranges not covered by an initial scrub we still 2580 * want to see if there are errors, but it's ok to discover them 2581 * asynchronously. 2582 */ 2583 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2584 /* 2585 * Flag all the ranges that still need scrubbing, but 2586 * register them now to make data available. 2587 */ 2588 if (!nfit_spa->nd_region) { 2589 nfit_spa->ars_required = 1; 2590 acpi_nfit_register_region(acpi_desc, nfit_spa); 2591 } 2592 } 2593 acpi_desc->init_complete = 1; 2594 2595 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 2596 acpi_nfit_async_scrub(acpi_desc, nfit_spa); 2597 acpi_desc->scrub_count++; 2598 if (acpi_desc->scrub_count_state) 2599 sysfs_notify_dirent(acpi_desc->scrub_count_state); 2600 mutex_unlock(&acpi_desc->init_mutex); 2601 } 2602 2603 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 2604 { 2605 struct nfit_spa *nfit_spa; 2606 int rc; 2607 2608 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 2609 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) { 2610 /* BLK regions don't need to wait for ars results */ 2611 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 2612 if (rc) 2613 return rc; 2614 } 2615 2616 if (!acpi_desc->cancel) 2617 queue_work(nfit_wq, &acpi_desc->work); 2618 return 0; 2619 } 2620 2621 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 2622 struct nfit_table_prev *prev) 2623 { 2624 struct device *dev = acpi_desc->dev; 2625 2626 if (!list_empty(&prev->spas) || 2627 !list_empty(&prev->memdevs) || 2628 !list_empty(&prev->dcrs) || 2629 !list_empty(&prev->bdws) || 2630 !list_empty(&prev->idts) || 2631 !list_empty(&prev->flushes)) { 2632 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 2633 return -ENXIO; 2634 } 2635 return 0; 2636 } 2637 2638 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) 2639 { 2640 struct device *dev = acpi_desc->dev; 2641 struct kernfs_node *nfit; 2642 struct device *bus_dev; 2643 2644 if (!ars_supported(acpi_desc->nvdimm_bus)) 2645 return 0; 2646 2647 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 2648 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); 2649 if (!nfit) { 2650 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); 2651 return -ENODEV; 2652 } 2653 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); 2654 sysfs_put(nfit); 2655 if (!acpi_desc->scrub_count_state) { 2656 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); 2657 return -ENODEV; 2658 } 2659 2660 return 0; 2661 } 2662 2663 static void acpi_nfit_unregister(void *data) 2664 { 2665 struct acpi_nfit_desc *acpi_desc = data; 2666 2667 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 2668 } 2669 2670 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) 2671 { 2672 struct device *dev = acpi_desc->dev; 2673 struct nfit_table_prev prev; 2674 const void *end; 2675 int rc; 2676 2677 if (!acpi_desc->nvdimm_bus) { 2678 acpi_nfit_init_dsms(acpi_desc); 2679 2680 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, 2681 &acpi_desc->nd_desc); 2682 if (!acpi_desc->nvdimm_bus) 2683 return -ENOMEM; 2684 2685 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, 2686 acpi_desc); 2687 if (rc) 2688 return rc; 2689 2690 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); 2691 if (rc) 2692 return rc; 2693 2694 /* register this acpi_desc for mce notifications */ 2695 mutex_lock(&acpi_desc_lock); 2696 list_add_tail(&acpi_desc->list, &acpi_descs); 2697 mutex_unlock(&acpi_desc_lock); 2698 } 2699 2700 mutex_lock(&acpi_desc->init_mutex); 2701 2702 INIT_LIST_HEAD(&prev.spas); 2703 INIT_LIST_HEAD(&prev.memdevs); 2704 INIT_LIST_HEAD(&prev.dcrs); 2705 INIT_LIST_HEAD(&prev.bdws); 2706 INIT_LIST_HEAD(&prev.idts); 2707 INIT_LIST_HEAD(&prev.flushes); 2708 2709 list_cut_position(&prev.spas, &acpi_desc->spas, 2710 acpi_desc->spas.prev); 2711 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 2712 acpi_desc->memdevs.prev); 2713 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 2714 acpi_desc->dcrs.prev); 2715 list_cut_position(&prev.bdws, &acpi_desc->bdws, 2716 acpi_desc->bdws.prev); 2717 list_cut_position(&prev.idts, &acpi_desc->idts, 2718 acpi_desc->idts.prev); 2719 list_cut_position(&prev.flushes, &acpi_desc->flushes, 2720 acpi_desc->flushes.prev); 2721 2722 end = data + sz; 2723 while (!IS_ERR_OR_NULL(data)) 2724 data = add_table(acpi_desc, &prev, data, end); 2725 2726 if (IS_ERR(data)) { 2727 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, 2728 PTR_ERR(data)); 2729 rc = PTR_ERR(data); 2730 goto out_unlock; 2731 } 2732 2733 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 2734 if (rc) 2735 goto out_unlock; 2736 2737 rc = nfit_mem_init(acpi_desc); 2738 if (rc) 2739 goto out_unlock; 2740 2741 rc = acpi_nfit_register_dimms(acpi_desc); 2742 if (rc) 2743 goto out_unlock; 2744 2745 rc = acpi_nfit_register_regions(acpi_desc); 2746 2747 out_unlock: 2748 mutex_unlock(&acpi_desc->init_mutex); 2749 return rc; 2750 } 2751 EXPORT_SYMBOL_GPL(acpi_nfit_init); 2752 2753 struct acpi_nfit_flush_work { 2754 struct work_struct work; 2755 struct completion cmp; 2756 }; 2757 2758 static void flush_probe(struct work_struct *work) 2759 { 2760 struct acpi_nfit_flush_work *flush; 2761 2762 flush = container_of(work, typeof(*flush), work); 2763 complete(&flush->cmp); 2764 } 2765 2766 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 2767 { 2768 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 2769 struct device *dev = acpi_desc->dev; 2770 struct acpi_nfit_flush_work flush; 2771 int rc; 2772 2773 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 2774 device_lock(dev); 2775 device_unlock(dev); 2776 2777 /* bounce the init_mutex to make init_complete valid */ 2778 mutex_lock(&acpi_desc->init_mutex); 2779 if (acpi_desc->cancel || acpi_desc->init_complete) { 2780 mutex_unlock(&acpi_desc->init_mutex); 2781 return 0; 2782 } 2783 2784 /* 2785 * Scrub work could take 10s of seconds, userspace may give up so we 2786 * need to be interruptible while waiting. 2787 */ 2788 INIT_WORK_ONSTACK(&flush.work, flush_probe); 2789 COMPLETION_INITIALIZER_ONSTACK(flush.cmp); 2790 queue_work(nfit_wq, &flush.work); 2791 mutex_unlock(&acpi_desc->init_mutex); 2792 2793 rc = wait_for_completion_interruptible(&flush.cmp); 2794 cancel_work_sync(&flush.work); 2795 return rc; 2796 } 2797 2798 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 2799 struct nvdimm *nvdimm, unsigned int cmd) 2800 { 2801 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 2802 2803 if (nvdimm) 2804 return 0; 2805 if (cmd != ND_CMD_ARS_START) 2806 return 0; 2807 2808 /* 2809 * The kernel and userspace may race to initiate a scrub, but 2810 * the scrub thread is prepared to lose that initial race. It 2811 * just needs guarantees that any ars it initiates are not 2812 * interrupted by any intervening start reqeusts from userspace. 2813 */ 2814 if (work_busy(&acpi_desc->work)) 2815 return -EBUSY; 2816 2817 return 0; 2818 } 2819 2820 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc) 2821 { 2822 struct device *dev = acpi_desc->dev; 2823 struct nfit_spa *nfit_spa; 2824 2825 if (work_busy(&acpi_desc->work)) 2826 return -EBUSY; 2827 2828 mutex_lock(&acpi_desc->init_mutex); 2829 if (acpi_desc->cancel) { 2830 mutex_unlock(&acpi_desc->init_mutex); 2831 return 0; 2832 } 2833 2834 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2835 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2836 2837 if (nfit_spa_type(spa) != NFIT_SPA_PM) 2838 continue; 2839 2840 nfit_spa->ars_required = 1; 2841 } 2842 queue_work(nfit_wq, &acpi_desc->work); 2843 dev_dbg(dev, "%s: ars_scan triggered\n", __func__); 2844 mutex_unlock(&acpi_desc->init_mutex); 2845 2846 return 0; 2847 } 2848 2849 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 2850 { 2851 struct nvdimm_bus_descriptor *nd_desc; 2852 2853 dev_set_drvdata(dev, acpi_desc); 2854 acpi_desc->dev = dev; 2855 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 2856 nd_desc = &acpi_desc->nd_desc; 2857 nd_desc->provider_name = "ACPI.NFIT"; 2858 nd_desc->module = THIS_MODULE; 2859 nd_desc->ndctl = acpi_nfit_ctl; 2860 nd_desc->flush_probe = acpi_nfit_flush_probe; 2861 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 2862 nd_desc->attr_groups = acpi_nfit_attribute_groups; 2863 2864 INIT_LIST_HEAD(&acpi_desc->spas); 2865 INIT_LIST_HEAD(&acpi_desc->dcrs); 2866 INIT_LIST_HEAD(&acpi_desc->bdws); 2867 INIT_LIST_HEAD(&acpi_desc->idts); 2868 INIT_LIST_HEAD(&acpi_desc->flushes); 2869 INIT_LIST_HEAD(&acpi_desc->memdevs); 2870 INIT_LIST_HEAD(&acpi_desc->dimms); 2871 INIT_LIST_HEAD(&acpi_desc->list); 2872 mutex_init(&acpi_desc->init_mutex); 2873 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); 2874 } 2875 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); 2876 2877 static void acpi_nfit_put_table(void *table) 2878 { 2879 acpi_put_table(table); 2880 } 2881 2882 void acpi_nfit_shutdown(void *data) 2883 { 2884 struct acpi_nfit_desc *acpi_desc = data; 2885 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 2886 2887 /* 2888 * Destruct under acpi_desc_lock so that nfit_handle_mce does not 2889 * race teardown 2890 */ 2891 mutex_lock(&acpi_desc_lock); 2892 list_del(&acpi_desc->list); 2893 mutex_unlock(&acpi_desc_lock); 2894 2895 mutex_lock(&acpi_desc->init_mutex); 2896 acpi_desc->cancel = 1; 2897 mutex_unlock(&acpi_desc->init_mutex); 2898 2899 /* 2900 * Bounce the nvdimm bus lock to make sure any in-flight 2901 * acpi_nfit_ars_rescan() submissions have had a chance to 2902 * either submit or see ->cancel set. 2903 */ 2904 device_lock(bus_dev); 2905 device_unlock(bus_dev); 2906 2907 flush_workqueue(nfit_wq); 2908 } 2909 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); 2910 2911 static int acpi_nfit_add(struct acpi_device *adev) 2912 { 2913 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 2914 struct acpi_nfit_desc *acpi_desc; 2915 struct device *dev = &adev->dev; 2916 struct acpi_table_header *tbl; 2917 acpi_status status = AE_OK; 2918 acpi_size sz; 2919 int rc = 0; 2920 2921 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); 2922 if (ACPI_FAILURE(status)) { 2923 /* This is ok, we could have an nvdimm hotplugged later */ 2924 dev_dbg(dev, "failed to find NFIT at startup\n"); 2925 return 0; 2926 } 2927 2928 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); 2929 if (rc) 2930 return rc; 2931 sz = tbl->length; 2932 2933 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 2934 if (!acpi_desc) 2935 return -ENOMEM; 2936 acpi_nfit_desc_init(acpi_desc, &adev->dev); 2937 2938 /* Save the acpi header for exporting the revision via sysfs */ 2939 acpi_desc->acpi_header = *tbl; 2940 2941 /* Evaluate _FIT and override with that if present */ 2942 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 2943 if (ACPI_SUCCESS(status) && buf.length > 0) { 2944 union acpi_object *obj = buf.pointer; 2945 2946 if (obj->type == ACPI_TYPE_BUFFER) 2947 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 2948 obj->buffer.length); 2949 else 2950 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", 2951 __func__, (int) obj->type); 2952 kfree(buf.pointer); 2953 } else 2954 /* skip over the lead-in header table */ 2955 rc = acpi_nfit_init(acpi_desc, (void *) tbl 2956 + sizeof(struct acpi_table_nfit), 2957 sz - sizeof(struct acpi_table_nfit)); 2958 2959 if (rc) 2960 return rc; 2961 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); 2962 } 2963 2964 static int acpi_nfit_remove(struct acpi_device *adev) 2965 { 2966 /* see acpi_nfit_unregister */ 2967 return 0; 2968 } 2969 2970 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) 2971 { 2972 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 2973 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 2974 union acpi_object *obj; 2975 acpi_status status; 2976 int ret; 2977 2978 dev_dbg(dev, "%s: event: %d\n", __func__, event); 2979 2980 if (event != NFIT_NOTIFY_UPDATE) 2981 return; 2982 2983 if (!dev->driver) { 2984 /* dev->driver may be null if we're being removed */ 2985 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 2986 return; 2987 } 2988 2989 if (!acpi_desc) { 2990 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 2991 if (!acpi_desc) 2992 return; 2993 acpi_nfit_desc_init(acpi_desc, dev); 2994 } else { 2995 /* 2996 * Finish previous registration before considering new 2997 * regions. 2998 */ 2999 flush_workqueue(nfit_wq); 3000 } 3001 3002 /* Evaluate _FIT */ 3003 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); 3004 if (ACPI_FAILURE(status)) { 3005 dev_err(dev, "failed to evaluate _FIT\n"); 3006 return; 3007 } 3008 3009 obj = buf.pointer; 3010 if (obj->type == ACPI_TYPE_BUFFER) { 3011 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3012 obj->buffer.length); 3013 if (ret) 3014 dev_err(dev, "failed to merge updated NFIT\n"); 3015 } else 3016 dev_err(dev, "Invalid _FIT\n"); 3017 kfree(buf.pointer); 3018 } 3019 EXPORT_SYMBOL_GPL(__acpi_nfit_notify); 3020 3021 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 3022 { 3023 device_lock(&adev->dev); 3024 __acpi_nfit_notify(&adev->dev, adev->handle, event); 3025 device_unlock(&adev->dev); 3026 } 3027 3028 static const struct acpi_device_id acpi_nfit_ids[] = { 3029 { "ACPI0012", 0 }, 3030 { "", 0 }, 3031 }; 3032 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 3033 3034 static struct acpi_driver acpi_nfit_driver = { 3035 .name = KBUILD_MODNAME, 3036 .ids = acpi_nfit_ids, 3037 .ops = { 3038 .add = acpi_nfit_add, 3039 .remove = acpi_nfit_remove, 3040 .notify = acpi_nfit_notify, 3041 }, 3042 }; 3043 3044 static __init int nfit_init(void) 3045 { 3046 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 3047 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 3048 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 3049 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 3050 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 3051 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 3052 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 3053 3054 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]); 3055 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]); 3056 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]); 3057 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]); 3058 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]); 3059 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]); 3060 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]); 3061 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]); 3062 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); 3063 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); 3064 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 3065 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 3066 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); 3067 3068 nfit_wq = create_singlethread_workqueue("nfit"); 3069 if (!nfit_wq) 3070 return -ENOMEM; 3071 3072 nfit_mce_register(); 3073 3074 return acpi_bus_register_driver(&acpi_nfit_driver); 3075 } 3076 3077 static __exit void nfit_exit(void) 3078 { 3079 nfit_mce_unregister(); 3080 acpi_bus_unregister_driver(&acpi_nfit_driver); 3081 destroy_workqueue(nfit_wq); 3082 WARN_ON(!list_empty(&acpi_descs)); 3083 } 3084 3085 module_init(nfit_init); 3086 module_exit(nfit_exit); 3087 MODULE_LICENSE("GPL v2"); 3088 MODULE_AUTHOR("Intel Corporation"); 3089