1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/libnvdimm.h> 6 #include <linux/badblocks.h> 7 #include <linux/suspend.h> 8 #include <linux/export.h> 9 #include <linux/module.h> 10 #include <linux/blkdev.h> 11 #include <linux/device.h> 12 #include <linux/ctype.h> 13 #include <linux/ndctl.h> 14 #include <linux/mutex.h> 15 #include <linux/slab.h> 16 #include <linux/io.h> 17 #include "nd-core.h" 18 #include "nd.h" 19 20 LIST_HEAD(nvdimm_bus_list); 21 DEFINE_MUTEX(nvdimm_bus_list_mutex); 22 23 void nvdimm_bus_lock(struct device *dev) 24 { 25 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 26 27 if (!nvdimm_bus) 28 return; 29 mutex_lock(&nvdimm_bus->reconfig_mutex); 30 } 31 EXPORT_SYMBOL(nvdimm_bus_lock); 32 33 void nvdimm_bus_unlock(struct device *dev) 34 { 35 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 36 37 if (!nvdimm_bus) 38 return; 39 mutex_unlock(&nvdimm_bus->reconfig_mutex); 40 } 41 EXPORT_SYMBOL(nvdimm_bus_unlock); 42 43 bool is_nvdimm_bus_locked(struct device *dev) 44 { 45 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 46 47 if (!nvdimm_bus) 48 return false; 49 return mutex_is_locked(&nvdimm_bus->reconfig_mutex); 50 } 51 EXPORT_SYMBOL(is_nvdimm_bus_locked); 52 53 struct nvdimm_map { 54 struct nvdimm_bus *nvdimm_bus; 55 struct list_head list; 56 resource_size_t offset; 57 unsigned long flags; 58 size_t size; 59 union { 60 void *mem; 61 void __iomem *iomem; 62 }; 63 struct kref kref; 64 }; 65 66 static struct nvdimm_map *find_nvdimm_map(struct device *dev, 67 resource_size_t offset) 68 { 69 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 70 struct nvdimm_map *nvdimm_map; 71 72 list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list) 73 if (nvdimm_map->offset == offset) 74 return nvdimm_map; 75 return NULL; 76 } 77 78 static struct nvdimm_map *alloc_nvdimm_map(struct device *dev, 79 resource_size_t offset, size_t size, unsigned long flags) 80 { 81 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 82 struct nvdimm_map *nvdimm_map; 83 84 nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL); 85 if (!nvdimm_map) 86 return NULL; 87 88 INIT_LIST_HEAD(&nvdimm_map->list); 89 nvdimm_map->nvdimm_bus = nvdimm_bus; 90 nvdimm_map->offset = offset; 91 nvdimm_map->flags = flags; 92 nvdimm_map->size = size; 93 kref_init(&nvdimm_map->kref); 94 95 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) { 96 dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n", 97 &offset, size, dev_name(dev)); 98 goto err_request_region; 99 } 100 101 if (flags) 102 nvdimm_map->mem = memremap(offset, size, flags); 103 else 104 nvdimm_map->iomem = ioremap(offset, size); 105 106 if (!nvdimm_map->mem) 107 goto err_map; 108 109 dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!", 110 __func__); 111 list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list); 112 113 return nvdimm_map; 114 115 err_map: 116 release_mem_region(offset, size); 117 err_request_region: 118 kfree(nvdimm_map); 119 return NULL; 120 } 121 122 static void nvdimm_map_release(struct kref *kref) 123 { 124 struct nvdimm_bus *nvdimm_bus; 125 struct nvdimm_map *nvdimm_map; 126 127 nvdimm_map = container_of(kref, struct nvdimm_map, kref); 128 nvdimm_bus = nvdimm_map->nvdimm_bus; 129 130 dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset); 131 list_del(&nvdimm_map->list); 132 if (nvdimm_map->flags) 133 memunmap(nvdimm_map->mem); 134 else 135 iounmap(nvdimm_map->iomem); 136 release_mem_region(nvdimm_map->offset, nvdimm_map->size); 137 kfree(nvdimm_map); 138 } 139 140 static void nvdimm_map_put(void *data) 141 { 142 struct nvdimm_map *nvdimm_map = data; 143 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; 144 145 nvdimm_bus_lock(&nvdimm_bus->dev); 146 kref_put(&nvdimm_map->kref, nvdimm_map_release); 147 nvdimm_bus_unlock(&nvdimm_bus->dev); 148 } 149 150 /** 151 * devm_nvdimm_memremap - map a resource that is shared across regions 152 * @dev: device that will own a reference to the shared mapping 153 * @offset: physical base address of the mapping 154 * @size: mapping size 155 * @flags: memremap flags, or, if zero, perform an ioremap instead 156 */ 157 void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, 158 size_t size, unsigned long flags) 159 { 160 struct nvdimm_map *nvdimm_map; 161 162 nvdimm_bus_lock(dev); 163 nvdimm_map = find_nvdimm_map(dev, offset); 164 if (!nvdimm_map) 165 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); 166 else 167 kref_get(&nvdimm_map->kref); 168 nvdimm_bus_unlock(dev); 169 170 if (!nvdimm_map) 171 return NULL; 172 173 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) 174 return NULL; 175 176 return nvdimm_map->mem; 177 } 178 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap); 179 180 u64 nd_fletcher64(void *addr, size_t len, bool le) 181 { 182 u32 *buf = addr; 183 u32 lo32 = 0; 184 u64 hi32 = 0; 185 int i; 186 187 for (i = 0; i < len / sizeof(u32); i++) { 188 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i]; 189 hi32 += lo32; 190 } 191 192 return hi32 << 32 | lo32; 193 } 194 EXPORT_SYMBOL_GPL(nd_fletcher64); 195 196 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus) 197 { 198 /* struct nvdimm_bus definition is private to libnvdimm */ 199 return nvdimm_bus->nd_desc; 200 } 201 EXPORT_SYMBOL_GPL(to_nd_desc); 202 203 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus) 204 { 205 /* struct nvdimm_bus definition is private to libnvdimm */ 206 return &nvdimm_bus->dev; 207 } 208 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev); 209 210 static bool is_uuid_sep(char sep) 211 { 212 if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0') 213 return true; 214 return false; 215 } 216 217 static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf, 218 size_t len) 219 { 220 const char *str = buf; 221 u8 uuid[16]; 222 int i; 223 224 for (i = 0; i < 16; i++) { 225 if (!isxdigit(str[0]) || !isxdigit(str[1])) { 226 dev_dbg(dev, "pos: %d buf[%zd]: %c buf[%zd]: %c\n", 227 i, str - buf, str[0], 228 str + 1 - buf, str[1]); 229 return -EINVAL; 230 } 231 232 uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]); 233 str += 2; 234 if (is_uuid_sep(*str)) 235 str++; 236 } 237 238 memcpy(uuid_out, uuid, sizeof(uuid)); 239 return 0; 240 } 241 242 /** 243 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes 244 * @dev: container device for the uuid property 245 * @uuid_out: uuid buffer to replace 246 * @buf: raw sysfs buffer to parse 247 * 248 * Enforce that uuids can only be changed while the device is disabled 249 * (driver detached) 250 * LOCKING: expects nd_device_lock() is held on entry 251 */ 252 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, 253 size_t len) 254 { 255 u8 uuid[16]; 256 int rc; 257 258 if (dev->driver) 259 return -EBUSY; 260 261 rc = nd_uuid_parse(dev, uuid, buf, len); 262 if (rc) 263 return rc; 264 265 kfree(*uuid_out); 266 *uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 267 if (!(*uuid_out)) 268 return -ENOMEM; 269 270 return 0; 271 } 272 273 ssize_t nd_size_select_show(unsigned long current_size, 274 const unsigned long *supported, char *buf) 275 { 276 ssize_t len = 0; 277 int i; 278 279 for (i = 0; supported[i]; i++) 280 if (current_size == supported[i]) 281 len += sprintf(buf + len, "[%ld] ", supported[i]); 282 else 283 len += sprintf(buf + len, "%ld ", supported[i]); 284 len += sprintf(buf + len, "\n"); 285 return len; 286 } 287 288 ssize_t nd_size_select_store(struct device *dev, const char *buf, 289 unsigned long *current_size, const unsigned long *supported) 290 { 291 unsigned long lbasize; 292 int rc, i; 293 294 if (dev->driver) 295 return -EBUSY; 296 297 rc = kstrtoul(buf, 0, &lbasize); 298 if (rc) 299 return rc; 300 301 for (i = 0; supported[i]; i++) 302 if (lbasize == supported[i]) 303 break; 304 305 if (supported[i]) { 306 *current_size = lbasize; 307 return 0; 308 } else { 309 return -EINVAL; 310 } 311 } 312 313 static ssize_t commands_show(struct device *dev, 314 struct device_attribute *attr, char *buf) 315 { 316 int cmd, len = 0; 317 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 318 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 319 320 for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG) 321 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd)); 322 len += sprintf(buf + len, "\n"); 323 return len; 324 } 325 static DEVICE_ATTR_RO(commands); 326 327 static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus) 328 { 329 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 330 struct device *parent = nvdimm_bus->dev.parent; 331 332 if (nd_desc->provider_name) 333 return nd_desc->provider_name; 334 else if (parent) 335 return dev_name(parent); 336 else 337 return "unknown"; 338 } 339 340 static ssize_t provider_show(struct device *dev, 341 struct device_attribute *attr, char *buf) 342 { 343 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 344 345 return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus)); 346 } 347 static DEVICE_ATTR_RO(provider); 348 349 static int flush_namespaces(struct device *dev, void *data) 350 { 351 nd_device_lock(dev); 352 nd_device_unlock(dev); 353 return 0; 354 } 355 356 static int flush_regions_dimms(struct device *dev, void *data) 357 { 358 nd_device_lock(dev); 359 nd_device_unlock(dev); 360 device_for_each_child(dev, NULL, flush_namespaces); 361 return 0; 362 } 363 364 static ssize_t wait_probe_show(struct device *dev, 365 struct device_attribute *attr, char *buf) 366 { 367 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 368 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 369 int rc; 370 371 if (nd_desc->flush_probe) { 372 rc = nd_desc->flush_probe(nd_desc); 373 if (rc) 374 return rc; 375 } 376 nd_synchronize(); 377 device_for_each_child(dev, NULL, flush_regions_dimms); 378 return sprintf(buf, "1\n"); 379 } 380 static DEVICE_ATTR_RO(wait_probe); 381 382 static struct attribute *nvdimm_bus_attributes[] = { 383 &dev_attr_commands.attr, 384 &dev_attr_wait_probe.attr, 385 &dev_attr_provider.attr, 386 NULL, 387 }; 388 389 static const struct attribute_group nvdimm_bus_attribute_group = { 390 .attrs = nvdimm_bus_attributes, 391 }; 392 393 static ssize_t capability_show(struct device *dev, 394 struct device_attribute *attr, char *buf) 395 { 396 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 397 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 398 enum nvdimm_fwa_capability cap; 399 400 if (!nd_desc->fw_ops) 401 return -EOPNOTSUPP; 402 403 nvdimm_bus_lock(dev); 404 cap = nd_desc->fw_ops->capability(nd_desc); 405 nvdimm_bus_unlock(dev); 406 407 switch (cap) { 408 case NVDIMM_FWA_CAP_QUIESCE: 409 return sprintf(buf, "quiesce\n"); 410 case NVDIMM_FWA_CAP_LIVE: 411 return sprintf(buf, "live\n"); 412 default: 413 return -EOPNOTSUPP; 414 } 415 } 416 417 static DEVICE_ATTR_RO(capability); 418 419 static ssize_t activate_show(struct device *dev, 420 struct device_attribute *attr, char *buf) 421 { 422 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 423 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 424 enum nvdimm_fwa_capability cap; 425 enum nvdimm_fwa_state state; 426 427 if (!nd_desc->fw_ops) 428 return -EOPNOTSUPP; 429 430 nvdimm_bus_lock(dev); 431 cap = nd_desc->fw_ops->capability(nd_desc); 432 state = nd_desc->fw_ops->activate_state(nd_desc); 433 nvdimm_bus_unlock(dev); 434 435 if (cap < NVDIMM_FWA_CAP_QUIESCE) 436 return -EOPNOTSUPP; 437 438 switch (state) { 439 case NVDIMM_FWA_IDLE: 440 return sprintf(buf, "idle\n"); 441 case NVDIMM_FWA_BUSY: 442 return sprintf(buf, "busy\n"); 443 case NVDIMM_FWA_ARMED: 444 return sprintf(buf, "armed\n"); 445 case NVDIMM_FWA_ARM_OVERFLOW: 446 return sprintf(buf, "overflow\n"); 447 default: 448 return -ENXIO; 449 } 450 } 451 452 static int exec_firmware_activate(void *data) 453 { 454 struct nvdimm_bus_descriptor *nd_desc = data; 455 456 return nd_desc->fw_ops->activate(nd_desc); 457 } 458 459 static ssize_t activate_store(struct device *dev, 460 struct device_attribute *attr, const char *buf, size_t len) 461 { 462 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 463 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 464 enum nvdimm_fwa_state state; 465 bool quiesce; 466 ssize_t rc; 467 468 if (!nd_desc->fw_ops) 469 return -EOPNOTSUPP; 470 471 if (sysfs_streq(buf, "live")) 472 quiesce = false; 473 else if (sysfs_streq(buf, "quiesce")) 474 quiesce = true; 475 else 476 return -EINVAL; 477 478 nvdimm_bus_lock(dev); 479 state = nd_desc->fw_ops->activate_state(nd_desc); 480 481 switch (state) { 482 case NVDIMM_FWA_BUSY: 483 rc = -EBUSY; 484 break; 485 case NVDIMM_FWA_ARMED: 486 case NVDIMM_FWA_ARM_OVERFLOW: 487 if (quiesce) 488 rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc); 489 else 490 rc = nd_desc->fw_ops->activate(nd_desc); 491 break; 492 case NVDIMM_FWA_IDLE: 493 default: 494 rc = -ENXIO; 495 } 496 nvdimm_bus_unlock(dev); 497 498 if (rc == 0) 499 rc = len; 500 return rc; 501 } 502 503 static DEVICE_ATTR_ADMIN_RW(activate); 504 505 static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n) 506 { 507 struct device *dev = container_of(kobj, typeof(*dev), kobj); 508 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 509 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 510 enum nvdimm_fwa_capability cap; 511 512 /* 513 * Both 'activate' and 'capability' disappear when no ops 514 * detected, or a negative capability is indicated. 515 */ 516 if (!nd_desc->fw_ops) 517 return 0; 518 519 nvdimm_bus_lock(dev); 520 cap = nd_desc->fw_ops->capability(nd_desc); 521 nvdimm_bus_unlock(dev); 522 523 if (cap < NVDIMM_FWA_CAP_QUIESCE) 524 return 0; 525 526 return a->mode; 527 } 528 static struct attribute *nvdimm_bus_firmware_attributes[] = { 529 &dev_attr_activate.attr, 530 &dev_attr_capability.attr, 531 NULL, 532 }; 533 534 static const struct attribute_group nvdimm_bus_firmware_attribute_group = { 535 .name = "firmware", 536 .attrs = nvdimm_bus_firmware_attributes, 537 .is_visible = nvdimm_bus_firmware_visible, 538 }; 539 540 const struct attribute_group *nvdimm_bus_attribute_groups[] = { 541 &nvdimm_bus_attribute_group, 542 &nvdimm_bus_firmware_attribute_group, 543 NULL, 544 }; 545 546 int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) 547 { 548 return badrange_add(&nvdimm_bus->badrange, addr, length); 549 } 550 EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange); 551 552 #ifdef CONFIG_BLK_DEV_INTEGRITY 553 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) 554 { 555 struct blk_integrity bi; 556 557 if (meta_size == 0) 558 return 0; 559 560 memset(&bi, 0, sizeof(bi)); 561 562 bi.tuple_size = meta_size; 563 bi.tag_size = meta_size; 564 565 blk_integrity_register(disk, &bi); 566 blk_queue_max_integrity_segments(disk->queue, 1); 567 568 return 0; 569 } 570 EXPORT_SYMBOL(nd_integrity_init); 571 572 #else /* CONFIG_BLK_DEV_INTEGRITY */ 573 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) 574 { 575 return 0; 576 } 577 EXPORT_SYMBOL(nd_integrity_init); 578 579 #endif 580 581 static __init int libnvdimm_init(void) 582 { 583 int rc; 584 585 rc = nvdimm_bus_init(); 586 if (rc) 587 return rc; 588 rc = nvdimm_init(); 589 if (rc) 590 goto err_dimm; 591 rc = nd_region_init(); 592 if (rc) 593 goto err_region; 594 595 nd_label_init(); 596 597 return 0; 598 err_region: 599 nvdimm_exit(); 600 err_dimm: 601 nvdimm_bus_exit(); 602 return rc; 603 } 604 605 static __exit void libnvdimm_exit(void) 606 { 607 WARN_ON(!list_empty(&nvdimm_bus_list)); 608 nd_region_exit(); 609 nvdimm_exit(); 610 nvdimm_bus_exit(); 611 nvdimm_devs_exit(); 612 } 613 614 MODULE_LICENSE("GPL v2"); 615 MODULE_AUTHOR("Intel Corporation"); 616 subsys_initcall(libnvdimm_init); 617 module_exit(libnvdimm_exit); 618