1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/libnvdimm.h> 6 #include <linux/suspend.h> 7 #include <linux/export.h> 8 #include <linux/module.h> 9 #include <linux/blkdev.h> 10 #include <linux/blk-integrity.h> 11 #include <linux/device.h> 12 #include <linux/ctype.h> 13 #include <linux/ndctl.h> 14 #include <linux/mutex.h> 15 #include <linux/slab.h> 16 #include <linux/io.h> 17 #include "nd-core.h" 18 #include "nd.h" 19 20 LIST_HEAD(nvdimm_bus_list); 21 DEFINE_MUTEX(nvdimm_bus_list_mutex); 22 23 void nvdimm_bus_lock(struct device *dev) 24 { 25 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 26 27 if (!nvdimm_bus) 28 return; 29 mutex_lock(&nvdimm_bus->reconfig_mutex); 30 } 31 EXPORT_SYMBOL(nvdimm_bus_lock); 32 33 void nvdimm_bus_unlock(struct device *dev) 34 { 35 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 36 37 if (!nvdimm_bus) 38 return; 39 mutex_unlock(&nvdimm_bus->reconfig_mutex); 40 } 41 EXPORT_SYMBOL(nvdimm_bus_unlock); 42 43 bool is_nvdimm_bus_locked(struct device *dev) 44 { 45 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 46 47 if (!nvdimm_bus) 48 return false; 49 return mutex_is_locked(&nvdimm_bus->reconfig_mutex); 50 } 51 EXPORT_SYMBOL(is_nvdimm_bus_locked); 52 53 struct nvdimm_map { 54 struct nvdimm_bus *nvdimm_bus; 55 struct list_head list; 56 resource_size_t offset; 57 unsigned long flags; 58 size_t size; 59 union { 60 void *mem; 61 void __iomem *iomem; 62 }; 63 struct kref kref; 64 }; 65 66 static struct nvdimm_map *find_nvdimm_map(struct device *dev, 67 resource_size_t offset) 68 { 69 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 70 struct nvdimm_map *nvdimm_map; 71 72 list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list) 73 if (nvdimm_map->offset == offset) 74 return nvdimm_map; 75 return NULL; 76 } 77 78 static struct nvdimm_map *alloc_nvdimm_map(struct device *dev, 79 resource_size_t offset, size_t size, unsigned long flags) 80 { 81 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 82 struct nvdimm_map *nvdimm_map; 83 84 nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL); 85 if (!nvdimm_map) 86 return NULL; 87 88 INIT_LIST_HEAD(&nvdimm_map->list); 89 nvdimm_map->nvdimm_bus = nvdimm_bus; 90 nvdimm_map->offset = offset; 91 nvdimm_map->flags = flags; 92 nvdimm_map->size = size; 93 kref_init(&nvdimm_map->kref); 94 95 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) { 96 dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n", 97 &offset, size, dev_name(dev)); 98 goto err_request_region; 99 } 100 101 if (flags) 102 nvdimm_map->mem = memremap(offset, size, flags); 103 else 104 nvdimm_map->iomem = ioremap(offset, size); 105 106 if (!nvdimm_map->mem) 107 goto err_map; 108 109 dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!", 110 __func__); 111 list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list); 112 113 return nvdimm_map; 114 115 err_map: 116 release_mem_region(offset, size); 117 err_request_region: 118 kfree(nvdimm_map); 119 return NULL; 120 } 121 122 static void nvdimm_map_release(struct kref *kref) 123 { 124 struct nvdimm_bus *nvdimm_bus; 125 struct nvdimm_map *nvdimm_map; 126 127 nvdimm_map = container_of(kref, struct nvdimm_map, kref); 128 nvdimm_bus = nvdimm_map->nvdimm_bus; 129 130 dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset); 131 list_del(&nvdimm_map->list); 132 if (nvdimm_map->flags) 133 memunmap(nvdimm_map->mem); 134 else 135 iounmap(nvdimm_map->iomem); 136 release_mem_region(nvdimm_map->offset, nvdimm_map->size); 137 kfree(nvdimm_map); 138 } 139 140 static void nvdimm_map_put(void *data) 141 { 142 struct nvdimm_map *nvdimm_map = data; 143 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; 144 145 nvdimm_bus_lock(&nvdimm_bus->dev); 146 kref_put(&nvdimm_map->kref, nvdimm_map_release); 147 nvdimm_bus_unlock(&nvdimm_bus->dev); 148 } 149 150 /** 151 * devm_nvdimm_memremap - map a resource that is shared across regions 152 * @dev: device that will own a reference to the shared mapping 153 * @offset: physical base address of the mapping 154 * @size: mapping size 155 * @flags: memremap flags, or, if zero, perform an ioremap instead 156 */ 157 void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, 158 size_t size, unsigned long flags) 159 { 160 struct nvdimm_map *nvdimm_map; 161 162 nvdimm_bus_lock(dev); 163 nvdimm_map = find_nvdimm_map(dev, offset); 164 if (!nvdimm_map) 165 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); 166 else 167 kref_get(&nvdimm_map->kref); 168 nvdimm_bus_unlock(dev); 169 170 if (!nvdimm_map) 171 return NULL; 172 173 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) 174 return NULL; 175 176 return nvdimm_map->mem; 177 } 178 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap); 179 180 u64 nd_fletcher64(void *addr, size_t len, bool le) 181 { 182 u32 *buf = addr; 183 u32 lo32 = 0; 184 u64 hi32 = 0; 185 int i; 186 187 for (i = 0; i < len / sizeof(u32); i++) { 188 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i]; 189 hi32 += lo32; 190 } 191 192 return hi32 << 32 | lo32; 193 } 194 EXPORT_SYMBOL_GPL(nd_fletcher64); 195 196 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus) 197 { 198 /* struct nvdimm_bus definition is private to libnvdimm */ 199 return nvdimm_bus->nd_desc; 200 } 201 EXPORT_SYMBOL_GPL(to_nd_desc); 202 203 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus) 204 { 205 /* struct nvdimm_bus definition is private to libnvdimm */ 206 return &nvdimm_bus->dev; 207 } 208 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev); 209 210 /** 211 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes 212 * @dev: container device for the uuid property 213 * @uuid_out: uuid buffer to replace 214 * @buf: raw sysfs buffer to parse 215 * 216 * Enforce that uuids can only be changed while the device is disabled 217 * (driver detached) 218 * LOCKING: expects nd_device_lock() is held on entry 219 */ 220 int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf, 221 size_t len) 222 { 223 uuid_t uuid; 224 int rc; 225 226 if (dev->driver) 227 return -EBUSY; 228 229 rc = uuid_parse(buf, &uuid); 230 if (rc) 231 return rc; 232 233 kfree(*uuid_out); 234 *uuid_out = kmemdup(&uuid, sizeof(uuid), GFP_KERNEL); 235 if (!(*uuid_out)) 236 return -ENOMEM; 237 238 return 0; 239 } 240 241 ssize_t nd_size_select_show(unsigned long current_size, 242 const unsigned long *supported, char *buf) 243 { 244 ssize_t len = 0; 245 int i; 246 247 for (i = 0; supported[i]; i++) 248 if (current_size == supported[i]) 249 len += sprintf(buf + len, "[%ld] ", supported[i]); 250 else 251 len += sprintf(buf + len, "%ld ", supported[i]); 252 len += sprintf(buf + len, "\n"); 253 return len; 254 } 255 256 ssize_t nd_size_select_store(struct device *dev, const char *buf, 257 unsigned long *current_size, const unsigned long *supported) 258 { 259 unsigned long lbasize; 260 int rc, i; 261 262 if (dev->driver) 263 return -EBUSY; 264 265 rc = kstrtoul(buf, 0, &lbasize); 266 if (rc) 267 return rc; 268 269 for (i = 0; supported[i]; i++) 270 if (lbasize == supported[i]) 271 break; 272 273 if (supported[i]) { 274 *current_size = lbasize; 275 return 0; 276 } else { 277 return -EINVAL; 278 } 279 } 280 281 static ssize_t commands_show(struct device *dev, 282 struct device_attribute *attr, char *buf) 283 { 284 int cmd, len = 0; 285 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 286 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 287 288 for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG) 289 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd)); 290 len += sprintf(buf + len, "\n"); 291 return len; 292 } 293 static DEVICE_ATTR_RO(commands); 294 295 static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus) 296 { 297 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 298 struct device *parent = nvdimm_bus->dev.parent; 299 300 if (nd_desc->provider_name) 301 return nd_desc->provider_name; 302 else if (parent) 303 return dev_name(parent); 304 else 305 return "unknown"; 306 } 307 308 static ssize_t provider_show(struct device *dev, 309 struct device_attribute *attr, char *buf) 310 { 311 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 312 313 return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus)); 314 } 315 static DEVICE_ATTR_RO(provider); 316 317 static int flush_namespaces(struct device *dev, void *data) 318 { 319 nd_device_lock(dev); 320 nd_device_unlock(dev); 321 return 0; 322 } 323 324 static int flush_regions_dimms(struct device *dev, void *data) 325 { 326 nd_device_lock(dev); 327 nd_device_unlock(dev); 328 device_for_each_child(dev, NULL, flush_namespaces); 329 return 0; 330 } 331 332 static ssize_t wait_probe_show(struct device *dev, 333 struct device_attribute *attr, char *buf) 334 { 335 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 336 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 337 int rc; 338 339 if (nd_desc->flush_probe) { 340 rc = nd_desc->flush_probe(nd_desc); 341 if (rc) 342 return rc; 343 } 344 nd_synchronize(); 345 device_for_each_child(dev, NULL, flush_regions_dimms); 346 return sprintf(buf, "1\n"); 347 } 348 static DEVICE_ATTR_RO(wait_probe); 349 350 static struct attribute *nvdimm_bus_attributes[] = { 351 &dev_attr_commands.attr, 352 &dev_attr_wait_probe.attr, 353 &dev_attr_provider.attr, 354 NULL, 355 }; 356 357 static const struct attribute_group nvdimm_bus_attribute_group = { 358 .attrs = nvdimm_bus_attributes, 359 }; 360 361 static ssize_t capability_show(struct device *dev, 362 struct device_attribute *attr, char *buf) 363 { 364 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 365 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 366 enum nvdimm_fwa_capability cap; 367 368 if (!nd_desc->fw_ops) 369 return -EOPNOTSUPP; 370 371 nvdimm_bus_lock(dev); 372 cap = nd_desc->fw_ops->capability(nd_desc); 373 nvdimm_bus_unlock(dev); 374 375 switch (cap) { 376 case NVDIMM_FWA_CAP_QUIESCE: 377 return sprintf(buf, "quiesce\n"); 378 case NVDIMM_FWA_CAP_LIVE: 379 return sprintf(buf, "live\n"); 380 default: 381 return -EOPNOTSUPP; 382 } 383 } 384 385 static DEVICE_ATTR_RO(capability); 386 387 static ssize_t activate_show(struct device *dev, 388 struct device_attribute *attr, char *buf) 389 { 390 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 391 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 392 enum nvdimm_fwa_capability cap; 393 enum nvdimm_fwa_state state; 394 395 if (!nd_desc->fw_ops) 396 return -EOPNOTSUPP; 397 398 nvdimm_bus_lock(dev); 399 cap = nd_desc->fw_ops->capability(nd_desc); 400 state = nd_desc->fw_ops->activate_state(nd_desc); 401 nvdimm_bus_unlock(dev); 402 403 if (cap < NVDIMM_FWA_CAP_QUIESCE) 404 return -EOPNOTSUPP; 405 406 switch (state) { 407 case NVDIMM_FWA_IDLE: 408 return sprintf(buf, "idle\n"); 409 case NVDIMM_FWA_BUSY: 410 return sprintf(buf, "busy\n"); 411 case NVDIMM_FWA_ARMED: 412 return sprintf(buf, "armed\n"); 413 case NVDIMM_FWA_ARM_OVERFLOW: 414 return sprintf(buf, "overflow\n"); 415 default: 416 return -ENXIO; 417 } 418 } 419 420 static int exec_firmware_activate(void *data) 421 { 422 struct nvdimm_bus_descriptor *nd_desc = data; 423 424 return nd_desc->fw_ops->activate(nd_desc); 425 } 426 427 static ssize_t activate_store(struct device *dev, 428 struct device_attribute *attr, const char *buf, size_t len) 429 { 430 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 431 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 432 enum nvdimm_fwa_state state; 433 bool quiesce; 434 ssize_t rc; 435 436 if (!nd_desc->fw_ops) 437 return -EOPNOTSUPP; 438 439 if (sysfs_streq(buf, "live")) 440 quiesce = false; 441 else if (sysfs_streq(buf, "quiesce")) 442 quiesce = true; 443 else 444 return -EINVAL; 445 446 nvdimm_bus_lock(dev); 447 state = nd_desc->fw_ops->activate_state(nd_desc); 448 449 switch (state) { 450 case NVDIMM_FWA_BUSY: 451 rc = -EBUSY; 452 break; 453 case NVDIMM_FWA_ARMED: 454 case NVDIMM_FWA_ARM_OVERFLOW: 455 if (quiesce) 456 rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc); 457 else 458 rc = nd_desc->fw_ops->activate(nd_desc); 459 break; 460 case NVDIMM_FWA_IDLE: 461 default: 462 rc = -ENXIO; 463 } 464 nvdimm_bus_unlock(dev); 465 466 if (rc == 0) 467 rc = len; 468 return rc; 469 } 470 471 static DEVICE_ATTR_ADMIN_RW(activate); 472 473 static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n) 474 { 475 struct device *dev = container_of(kobj, typeof(*dev), kobj); 476 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 477 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 478 enum nvdimm_fwa_capability cap; 479 480 /* 481 * Both 'activate' and 'capability' disappear when no ops 482 * detected, or a negative capability is indicated. 483 */ 484 if (!nd_desc->fw_ops) 485 return 0; 486 487 nvdimm_bus_lock(dev); 488 cap = nd_desc->fw_ops->capability(nd_desc); 489 nvdimm_bus_unlock(dev); 490 491 if (cap < NVDIMM_FWA_CAP_QUIESCE) 492 return 0; 493 494 return a->mode; 495 } 496 static struct attribute *nvdimm_bus_firmware_attributes[] = { 497 &dev_attr_activate.attr, 498 &dev_attr_capability.attr, 499 NULL, 500 }; 501 502 static const struct attribute_group nvdimm_bus_firmware_attribute_group = { 503 .name = "firmware", 504 .attrs = nvdimm_bus_firmware_attributes, 505 .is_visible = nvdimm_bus_firmware_visible, 506 }; 507 508 const struct attribute_group *nvdimm_bus_attribute_groups[] = { 509 &nvdimm_bus_attribute_group, 510 &nvdimm_bus_firmware_attribute_group, 511 NULL, 512 }; 513 514 int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) 515 { 516 return badrange_add(&nvdimm_bus->badrange, addr, length); 517 } 518 EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange); 519 520 #ifdef CONFIG_BLK_DEV_INTEGRITY 521 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) 522 { 523 struct blk_integrity bi; 524 525 if (meta_size == 0) 526 return 0; 527 528 memset(&bi, 0, sizeof(bi)); 529 530 bi.tuple_size = meta_size; 531 bi.tag_size = meta_size; 532 533 blk_integrity_register(disk, &bi); 534 blk_queue_max_integrity_segments(disk->queue, 1); 535 536 return 0; 537 } 538 EXPORT_SYMBOL(nd_integrity_init); 539 540 #else /* CONFIG_BLK_DEV_INTEGRITY */ 541 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) 542 { 543 return 0; 544 } 545 EXPORT_SYMBOL(nd_integrity_init); 546 547 #endif 548 549 static __init int libnvdimm_init(void) 550 { 551 int rc; 552 553 rc = nvdimm_bus_init(); 554 if (rc) 555 return rc; 556 rc = nvdimm_init(); 557 if (rc) 558 goto err_dimm; 559 rc = nd_region_init(); 560 if (rc) 561 goto err_region; 562 563 nd_label_init(); 564 565 return 0; 566 err_region: 567 nvdimm_exit(); 568 err_dimm: 569 nvdimm_bus_exit(); 570 return rc; 571 } 572 573 static __exit void libnvdimm_exit(void) 574 { 575 WARN_ON(!list_empty(&nvdimm_bus_list)); 576 nd_region_exit(); 577 nvdimm_exit(); 578 nvdimm_bus_exit(); 579 nvdimm_devs_exit(); 580 } 581 582 MODULE_LICENSE("GPL v2"); 583 MODULE_AUTHOR("Intel Corporation"); 584 subsys_initcall(libnvdimm_init); 585 module_exit(libnvdimm_exit); 586