1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/libnvdimm.h> 6 #include <linux/suspend.h> 7 #include <linux/export.h> 8 #include <linux/module.h> 9 #include <linux/blkdev.h> 10 #include <linux/blk-integrity.h> 11 #include <linux/device.h> 12 #include <linux/ctype.h> 13 #include <linux/ndctl.h> 14 #include <linux/mutex.h> 15 #include <linux/slab.h> 16 #include <linux/io.h> 17 #include "nd-core.h" 18 #include "nd.h" 19 20 LIST_HEAD(nvdimm_bus_list); 21 DEFINE_MUTEX(nvdimm_bus_list_mutex); 22 23 void nvdimm_bus_lock(struct device *dev) 24 { 25 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 26 27 if (!nvdimm_bus) 28 return; 29 mutex_lock(&nvdimm_bus->reconfig_mutex); 30 } 31 EXPORT_SYMBOL(nvdimm_bus_lock); 32 33 void nvdimm_bus_unlock(struct device *dev) 34 { 35 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 36 37 if (!nvdimm_bus) 38 return; 39 mutex_unlock(&nvdimm_bus->reconfig_mutex); 40 } 41 EXPORT_SYMBOL(nvdimm_bus_unlock); 42 43 bool is_nvdimm_bus_locked(struct device *dev) 44 { 45 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 46 47 if (!nvdimm_bus) 48 return false; 49 return mutex_is_locked(&nvdimm_bus->reconfig_mutex); 50 } 51 EXPORT_SYMBOL(is_nvdimm_bus_locked); 52 53 struct nvdimm_map { 54 struct nvdimm_bus *nvdimm_bus; 55 struct list_head list; 56 resource_size_t offset; 57 unsigned long flags; 58 size_t size; 59 union { 60 void *mem; 61 void __iomem *iomem; 62 }; 63 struct kref kref; 64 }; 65 66 static struct nvdimm_map *find_nvdimm_map(struct device *dev, 67 resource_size_t offset) 68 { 69 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 70 struct nvdimm_map *nvdimm_map; 71 72 list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list) 73 if (nvdimm_map->offset == offset) 74 return nvdimm_map; 75 return NULL; 76 } 77 78 static struct nvdimm_map *alloc_nvdimm_map(struct device *dev, 79 resource_size_t offset, size_t size, unsigned long flags) 80 { 81 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 82 struct nvdimm_map *nvdimm_map; 83 84 nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL); 85 if (!nvdimm_map) 86 return NULL; 87 88 INIT_LIST_HEAD(&nvdimm_map->list); 89 nvdimm_map->nvdimm_bus = nvdimm_bus; 90 nvdimm_map->offset = offset; 91 nvdimm_map->flags = flags; 92 nvdimm_map->size = size; 93 kref_init(&nvdimm_map->kref); 94 95 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) { 96 dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n", 97 &offset, size, dev_name(dev)); 98 goto err_request_region; 99 } 100 101 if (flags) 102 nvdimm_map->mem = memremap(offset, size, flags); 103 else 104 nvdimm_map->iomem = ioremap(offset, size); 105 106 if (!nvdimm_map->mem) 107 goto err_map; 108 109 dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!", 110 __func__); 111 list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list); 112 113 return nvdimm_map; 114 115 err_map: 116 release_mem_region(offset, size); 117 err_request_region: 118 kfree(nvdimm_map); 119 return NULL; 120 } 121 122 static void nvdimm_map_release(struct kref *kref) 123 { 124 struct nvdimm_bus *nvdimm_bus; 125 struct nvdimm_map *nvdimm_map; 126 127 nvdimm_map = container_of(kref, struct nvdimm_map, kref); 128 nvdimm_bus = nvdimm_map->nvdimm_bus; 129 130 dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset); 131 list_del(&nvdimm_map->list); 132 if (nvdimm_map->flags) 133 memunmap(nvdimm_map->mem); 134 else 135 iounmap(nvdimm_map->iomem); 136 release_mem_region(nvdimm_map->offset, nvdimm_map->size); 137 kfree(nvdimm_map); 138 } 139 140 static void nvdimm_map_put(void *data) 141 { 142 struct nvdimm_map *nvdimm_map = data; 143 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; 144 145 nvdimm_bus_lock(&nvdimm_bus->dev); 146 kref_put(&nvdimm_map->kref, nvdimm_map_release); 147 nvdimm_bus_unlock(&nvdimm_bus->dev); 148 } 149 150 /** 151 * devm_nvdimm_memremap - map a resource that is shared across regions 152 * @dev: device that will own a reference to the shared mapping 153 * @offset: physical base address of the mapping 154 * @size: mapping size 155 * @flags: memremap flags, or, if zero, perform an ioremap instead 156 */ 157 void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, 158 size_t size, unsigned long flags) 159 { 160 struct nvdimm_map *nvdimm_map; 161 162 nvdimm_bus_lock(dev); 163 nvdimm_map = find_nvdimm_map(dev, offset); 164 if (!nvdimm_map) 165 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); 166 else 167 kref_get(&nvdimm_map->kref); 168 nvdimm_bus_unlock(dev); 169 170 if (!nvdimm_map) 171 return NULL; 172 173 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) 174 return NULL; 175 176 return nvdimm_map->mem; 177 } 178 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap); 179 180 u64 nd_fletcher64(void *addr, size_t len, bool le) 181 { 182 u32 *buf = addr; 183 u32 lo32 = 0; 184 u64 hi32 = 0; 185 int i; 186 187 for (i = 0; i < len / sizeof(u32); i++) { 188 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i]; 189 hi32 += lo32; 190 } 191 192 return hi32 << 32 | lo32; 193 } 194 EXPORT_SYMBOL_GPL(nd_fletcher64); 195 196 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus) 197 { 198 /* struct nvdimm_bus definition is private to libnvdimm */ 199 return nvdimm_bus->nd_desc; 200 } 201 EXPORT_SYMBOL_GPL(to_nd_desc); 202 203 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus) 204 { 205 /* struct nvdimm_bus definition is private to libnvdimm */ 206 return &nvdimm_bus->dev; 207 } 208 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev); 209 210 /** 211 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes 212 * @dev: container device for the uuid property 213 * @uuid_out: uuid buffer to replace 214 * @buf: raw sysfs buffer to parse 215 * 216 * Enforce that uuids can only be changed while the device is disabled 217 * (driver detached) 218 * LOCKING: expects device_lock() is held on entry 219 */ 220 int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf, 221 size_t len) 222 { 223 uuid_t uuid; 224 int rc; 225 226 if (dev->driver) 227 return -EBUSY; 228 229 rc = uuid_parse(buf, &uuid); 230 if (rc) 231 return rc; 232 233 kfree(*uuid_out); 234 *uuid_out = kmemdup(&uuid, sizeof(uuid), GFP_KERNEL); 235 if (!(*uuid_out)) 236 return -ENOMEM; 237 238 return 0; 239 } 240 241 ssize_t nd_size_select_show(unsigned long current_size, 242 const unsigned long *supported, char *buf) 243 { 244 ssize_t len = 0; 245 int i; 246 247 for (i = 0; supported[i]; i++) 248 if (current_size == supported[i]) 249 len += sprintf(buf + len, "[%ld] ", supported[i]); 250 else 251 len += sprintf(buf + len, "%ld ", supported[i]); 252 len += sprintf(buf + len, "\n"); 253 return len; 254 } 255 256 ssize_t nd_size_select_store(struct device *dev, const char *buf, 257 unsigned long *current_size, const unsigned long *supported) 258 { 259 unsigned long lbasize; 260 int rc, i; 261 262 if (dev->driver) 263 return -EBUSY; 264 265 rc = kstrtoul(buf, 0, &lbasize); 266 if (rc) 267 return rc; 268 269 for (i = 0; supported[i]; i++) 270 if (lbasize == supported[i]) 271 break; 272 273 if (supported[i]) { 274 *current_size = lbasize; 275 return 0; 276 } else { 277 return -EINVAL; 278 } 279 } 280 281 static ssize_t commands_show(struct device *dev, 282 struct device_attribute *attr, char *buf) 283 { 284 int cmd, len = 0; 285 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 286 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 287 288 for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG) 289 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd)); 290 len += sprintf(buf + len, "\n"); 291 return len; 292 } 293 static DEVICE_ATTR_RO(commands); 294 295 static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus) 296 { 297 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 298 struct device *parent = nvdimm_bus->dev.parent; 299 300 if (nd_desc->provider_name) 301 return nd_desc->provider_name; 302 else if (parent) 303 return dev_name(parent); 304 else 305 return "unknown"; 306 } 307 308 static ssize_t provider_show(struct device *dev, 309 struct device_attribute *attr, char *buf) 310 { 311 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 312 313 return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus)); 314 } 315 static DEVICE_ATTR_RO(provider); 316 317 static int flush_namespaces(struct device *dev, void *data) 318 { 319 device_lock(dev); 320 device_unlock(dev); 321 return 0; 322 } 323 324 static int flush_regions_dimms(struct device *dev, void *data) 325 { 326 device_lock(dev); 327 device_unlock(dev); 328 device_for_each_child(dev, NULL, flush_namespaces); 329 return 0; 330 } 331 332 static ssize_t wait_probe_show(struct device *dev, 333 struct device_attribute *attr, char *buf) 334 { 335 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 336 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 337 int rc; 338 339 if (nd_desc->flush_probe) { 340 rc = nd_desc->flush_probe(nd_desc); 341 if (rc) 342 return rc; 343 } 344 nd_synchronize(); 345 device_for_each_child(dev, NULL, flush_regions_dimms); 346 return sprintf(buf, "1\n"); 347 } 348 static DEVICE_ATTR_RO(wait_probe); 349 350 static struct attribute *nvdimm_bus_attributes[] = { 351 &dev_attr_commands.attr, 352 &dev_attr_wait_probe.attr, 353 &dev_attr_provider.attr, 354 NULL, 355 }; 356 357 static const struct attribute_group nvdimm_bus_attribute_group = { 358 .attrs = nvdimm_bus_attributes, 359 }; 360 361 static ssize_t capability_show(struct device *dev, 362 struct device_attribute *attr, char *buf) 363 { 364 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 365 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 366 enum nvdimm_fwa_capability cap; 367 368 if (!nd_desc->fw_ops) 369 return -EOPNOTSUPP; 370 371 cap = nd_desc->fw_ops->capability(nd_desc); 372 373 switch (cap) { 374 case NVDIMM_FWA_CAP_QUIESCE: 375 return sprintf(buf, "quiesce\n"); 376 case NVDIMM_FWA_CAP_LIVE: 377 return sprintf(buf, "live\n"); 378 default: 379 return -EOPNOTSUPP; 380 } 381 } 382 383 static DEVICE_ATTR_RO(capability); 384 385 static ssize_t activate_show(struct device *dev, 386 struct device_attribute *attr, char *buf) 387 { 388 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 389 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 390 enum nvdimm_fwa_capability cap; 391 enum nvdimm_fwa_state state; 392 393 if (!nd_desc->fw_ops) 394 return -EOPNOTSUPP; 395 396 cap = nd_desc->fw_ops->capability(nd_desc); 397 state = nd_desc->fw_ops->activate_state(nd_desc); 398 399 if (cap < NVDIMM_FWA_CAP_QUIESCE) 400 return -EOPNOTSUPP; 401 402 switch (state) { 403 case NVDIMM_FWA_IDLE: 404 return sprintf(buf, "idle\n"); 405 case NVDIMM_FWA_BUSY: 406 return sprintf(buf, "busy\n"); 407 case NVDIMM_FWA_ARMED: 408 return sprintf(buf, "armed\n"); 409 case NVDIMM_FWA_ARM_OVERFLOW: 410 return sprintf(buf, "overflow\n"); 411 default: 412 return -ENXIO; 413 } 414 } 415 416 static int exec_firmware_activate(void *data) 417 { 418 struct nvdimm_bus_descriptor *nd_desc = data; 419 420 return nd_desc->fw_ops->activate(nd_desc); 421 } 422 423 static ssize_t activate_store(struct device *dev, 424 struct device_attribute *attr, const char *buf, size_t len) 425 { 426 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 427 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 428 enum nvdimm_fwa_state state; 429 bool quiesce; 430 ssize_t rc; 431 432 if (!nd_desc->fw_ops) 433 return -EOPNOTSUPP; 434 435 if (sysfs_streq(buf, "live")) 436 quiesce = false; 437 else if (sysfs_streq(buf, "quiesce")) 438 quiesce = true; 439 else 440 return -EINVAL; 441 442 state = nd_desc->fw_ops->activate_state(nd_desc); 443 444 switch (state) { 445 case NVDIMM_FWA_BUSY: 446 rc = -EBUSY; 447 break; 448 case NVDIMM_FWA_ARMED: 449 case NVDIMM_FWA_ARM_OVERFLOW: 450 if (quiesce) 451 rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc); 452 else 453 rc = nd_desc->fw_ops->activate(nd_desc); 454 break; 455 case NVDIMM_FWA_IDLE: 456 default: 457 rc = -ENXIO; 458 } 459 460 if (rc == 0) 461 rc = len; 462 return rc; 463 } 464 465 static DEVICE_ATTR_ADMIN_RW(activate); 466 467 static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n) 468 { 469 struct device *dev = container_of(kobj, typeof(*dev), kobj); 470 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 471 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 472 enum nvdimm_fwa_capability cap; 473 474 /* 475 * Both 'activate' and 'capability' disappear when no ops 476 * detected, or a negative capability is indicated. 477 */ 478 if (!nd_desc->fw_ops) 479 return 0; 480 481 cap = nd_desc->fw_ops->capability(nd_desc); 482 if (cap < NVDIMM_FWA_CAP_QUIESCE) 483 return 0; 484 485 return a->mode; 486 } 487 static struct attribute *nvdimm_bus_firmware_attributes[] = { 488 &dev_attr_activate.attr, 489 &dev_attr_capability.attr, 490 NULL, 491 }; 492 493 static const struct attribute_group nvdimm_bus_firmware_attribute_group = { 494 .name = "firmware", 495 .attrs = nvdimm_bus_firmware_attributes, 496 .is_visible = nvdimm_bus_firmware_visible, 497 }; 498 499 const struct attribute_group *nvdimm_bus_attribute_groups[] = { 500 &nvdimm_bus_attribute_group, 501 &nvdimm_bus_firmware_attribute_group, 502 NULL, 503 }; 504 505 int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) 506 { 507 return badrange_add(&nvdimm_bus->badrange, addr, length); 508 } 509 EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange); 510 511 #ifdef CONFIG_BLK_DEV_INTEGRITY 512 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) 513 { 514 struct blk_integrity bi; 515 516 if (meta_size == 0) 517 return 0; 518 519 memset(&bi, 0, sizeof(bi)); 520 521 bi.tuple_size = meta_size; 522 bi.tag_size = meta_size; 523 524 blk_integrity_register(disk, &bi); 525 blk_queue_max_integrity_segments(disk->queue, 1); 526 527 return 0; 528 } 529 EXPORT_SYMBOL(nd_integrity_init); 530 531 #else /* CONFIG_BLK_DEV_INTEGRITY */ 532 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) 533 { 534 return 0; 535 } 536 EXPORT_SYMBOL(nd_integrity_init); 537 538 #endif 539 540 static __init int libnvdimm_init(void) 541 { 542 int rc; 543 544 rc = nvdimm_bus_init(); 545 if (rc) 546 return rc; 547 rc = nvdimm_init(); 548 if (rc) 549 goto err_dimm; 550 rc = nd_region_init(); 551 if (rc) 552 goto err_region; 553 554 nd_label_init(); 555 556 return 0; 557 err_region: 558 nvdimm_exit(); 559 err_dimm: 560 nvdimm_bus_exit(); 561 return rc; 562 } 563 564 static __exit void libnvdimm_exit(void) 565 { 566 WARN_ON(!list_empty(&nvdimm_bus_list)); 567 nd_region_exit(); 568 nvdimm_exit(); 569 nvdimm_bus_exit(); 570 nvdimm_devs_exit(); 571 } 572 573 MODULE_LICENSE("GPL v2"); 574 MODULE_AUTHOR("Intel Corporation"); 575 subsys_initcall(libnvdimm_init); 576 module_exit(libnvdimm_exit); 577