1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 nvmem_reg_read_t reg_read; 38 nvmem_reg_write_t reg_write; 39 struct gpio_desc *wp_gpio; 40 void *priv; 41 }; 42 43 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 44 45 #define FLAG_COMPAT BIT(0) 46 47 struct nvmem_cell { 48 const char *name; 49 int offset; 50 int bytes; 51 int bit_offset; 52 int nbits; 53 struct device_node *np; 54 struct nvmem_device *nvmem; 55 struct list_head node; 56 }; 57 58 static DEFINE_MUTEX(nvmem_mutex); 59 static DEFINE_IDA(nvmem_ida); 60 61 static DEFINE_MUTEX(nvmem_cell_mutex); 62 static LIST_HEAD(nvmem_cell_tables); 63 64 static DEFINE_MUTEX(nvmem_lookup_mutex); 65 static LIST_HEAD(nvmem_lookup_list); 66 67 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 68 69 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 70 void *val, size_t bytes) 71 { 72 if (nvmem->reg_read) 73 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 74 75 return -EINVAL; 76 } 77 78 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 79 void *val, size_t bytes) 80 { 81 int ret; 82 83 if (nvmem->reg_write) { 84 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 85 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 86 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 87 return ret; 88 } 89 90 return -EINVAL; 91 } 92 93 #ifdef CONFIG_NVMEM_SYSFS 94 static const char * const nvmem_type_str[] = { 95 [NVMEM_TYPE_UNKNOWN] = "Unknown", 96 [NVMEM_TYPE_EEPROM] = "EEPROM", 97 [NVMEM_TYPE_OTP] = "OTP", 98 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 99 }; 100 101 #ifdef CONFIG_DEBUG_LOCK_ALLOC 102 static struct lock_class_key eeprom_lock_key; 103 #endif 104 105 static ssize_t type_show(struct device *dev, 106 struct device_attribute *attr, char *buf) 107 { 108 struct nvmem_device *nvmem = to_nvmem_device(dev); 109 110 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 111 } 112 113 static DEVICE_ATTR_RO(type); 114 115 static struct attribute *nvmem_attrs[] = { 116 &dev_attr_type.attr, 117 NULL, 118 }; 119 120 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 121 struct bin_attribute *attr, char *buf, 122 loff_t pos, size_t count) 123 { 124 struct device *dev; 125 struct nvmem_device *nvmem; 126 int rc; 127 128 if (attr->private) 129 dev = attr->private; 130 else 131 dev = container_of(kobj, struct device, kobj); 132 nvmem = to_nvmem_device(dev); 133 134 /* Stop the user from reading */ 135 if (pos >= nvmem->size) 136 return 0; 137 138 if (!IS_ALIGNED(pos, nvmem->stride)) 139 return -EINVAL; 140 141 if (count < nvmem->word_size) 142 return -EINVAL; 143 144 if (pos + count > nvmem->size) 145 count = nvmem->size - pos; 146 147 count = round_down(count, nvmem->word_size); 148 149 if (!nvmem->reg_read) 150 return -EPERM; 151 152 rc = nvmem_reg_read(nvmem, pos, buf, count); 153 154 if (rc) 155 return rc; 156 157 return count; 158 } 159 160 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 161 struct bin_attribute *attr, char *buf, 162 loff_t pos, size_t count) 163 { 164 struct device *dev; 165 struct nvmem_device *nvmem; 166 int rc; 167 168 if (attr->private) 169 dev = attr->private; 170 else 171 dev = container_of(kobj, struct device, kobj); 172 nvmem = to_nvmem_device(dev); 173 174 /* Stop the user from writing */ 175 if (pos >= nvmem->size) 176 return -EFBIG; 177 178 if (!IS_ALIGNED(pos, nvmem->stride)) 179 return -EINVAL; 180 181 if (count < nvmem->word_size) 182 return -EINVAL; 183 184 if (pos + count > nvmem->size) 185 count = nvmem->size - pos; 186 187 count = round_down(count, nvmem->word_size); 188 189 if (!nvmem->reg_write) 190 return -EPERM; 191 192 rc = nvmem_reg_write(nvmem, pos, buf, count); 193 194 if (rc) 195 return rc; 196 197 return count; 198 } 199 200 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 201 { 202 umode_t mode = 0400; 203 204 if (!nvmem->root_only) 205 mode |= 0044; 206 207 if (!nvmem->read_only) 208 mode |= 0200; 209 210 if (!nvmem->reg_write) 211 mode &= ~0200; 212 213 if (!nvmem->reg_read) 214 mode &= ~0444; 215 216 return mode; 217 } 218 219 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 220 struct bin_attribute *attr, int i) 221 { 222 struct device *dev = container_of(kobj, struct device, kobj); 223 struct nvmem_device *nvmem = to_nvmem_device(dev); 224 225 return nvmem_bin_attr_get_umode(nvmem); 226 } 227 228 /* default read/write permissions */ 229 static struct bin_attribute bin_attr_rw_nvmem = { 230 .attr = { 231 .name = "nvmem", 232 .mode = 0644, 233 }, 234 .read = bin_attr_nvmem_read, 235 .write = bin_attr_nvmem_write, 236 }; 237 238 static struct bin_attribute *nvmem_bin_attributes[] = { 239 &bin_attr_rw_nvmem, 240 NULL, 241 }; 242 243 static const struct attribute_group nvmem_bin_group = { 244 .bin_attrs = nvmem_bin_attributes, 245 .attrs = nvmem_attrs, 246 .is_bin_visible = nvmem_bin_attr_is_visible, 247 }; 248 249 static const struct attribute_group *nvmem_dev_groups[] = { 250 &nvmem_bin_group, 251 NULL, 252 }; 253 254 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 255 .attr = { 256 .name = "eeprom", 257 }, 258 .read = bin_attr_nvmem_read, 259 .write = bin_attr_nvmem_write, 260 }; 261 262 /* 263 * nvmem_setup_compat() - Create an additional binary entry in 264 * drivers sys directory, to be backwards compatible with the older 265 * drivers/misc/eeprom drivers. 266 */ 267 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 268 const struct nvmem_config *config) 269 { 270 int rval; 271 272 if (!config->compat) 273 return 0; 274 275 if (!config->base_dev) 276 return -EINVAL; 277 278 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 279 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 280 nvmem->eeprom.size = nvmem->size; 281 #ifdef CONFIG_DEBUG_LOCK_ALLOC 282 nvmem->eeprom.attr.key = &eeprom_lock_key; 283 #endif 284 nvmem->eeprom.private = &nvmem->dev; 285 nvmem->base_dev = config->base_dev; 286 287 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 288 if (rval) { 289 dev_err(&nvmem->dev, 290 "Failed to create eeprom binary file %d\n", rval); 291 return rval; 292 } 293 294 nvmem->flags |= FLAG_COMPAT; 295 296 return 0; 297 } 298 299 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 300 const struct nvmem_config *config) 301 { 302 if (config->compat) 303 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 304 } 305 306 #else /* CONFIG_NVMEM_SYSFS */ 307 308 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 309 const struct nvmem_config *config) 310 { 311 return -ENOSYS; 312 } 313 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 314 const struct nvmem_config *config) 315 { 316 } 317 318 #endif /* CONFIG_NVMEM_SYSFS */ 319 320 static void nvmem_release(struct device *dev) 321 { 322 struct nvmem_device *nvmem = to_nvmem_device(dev); 323 324 ida_simple_remove(&nvmem_ida, nvmem->id); 325 gpiod_put(nvmem->wp_gpio); 326 kfree(nvmem); 327 } 328 329 static const struct device_type nvmem_provider_type = { 330 .release = nvmem_release, 331 }; 332 333 static struct bus_type nvmem_bus_type = { 334 .name = "nvmem", 335 }; 336 337 static void nvmem_cell_drop(struct nvmem_cell *cell) 338 { 339 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 340 mutex_lock(&nvmem_mutex); 341 list_del(&cell->node); 342 mutex_unlock(&nvmem_mutex); 343 of_node_put(cell->np); 344 kfree_const(cell->name); 345 kfree(cell); 346 } 347 348 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 349 { 350 struct nvmem_cell *cell, *p; 351 352 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 353 nvmem_cell_drop(cell); 354 } 355 356 static void nvmem_cell_add(struct nvmem_cell *cell) 357 { 358 mutex_lock(&nvmem_mutex); 359 list_add_tail(&cell->node, &cell->nvmem->cells); 360 mutex_unlock(&nvmem_mutex); 361 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 362 } 363 364 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 365 const struct nvmem_cell_info *info, 366 struct nvmem_cell *cell) 367 { 368 cell->nvmem = nvmem; 369 cell->offset = info->offset; 370 cell->bytes = info->bytes; 371 cell->name = kstrdup_const(info->name, GFP_KERNEL); 372 if (!cell->name) 373 return -ENOMEM; 374 375 cell->bit_offset = info->bit_offset; 376 cell->nbits = info->nbits; 377 378 if (cell->nbits) 379 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 380 BITS_PER_BYTE); 381 382 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 383 dev_err(&nvmem->dev, 384 "cell %s unaligned to nvmem stride %d\n", 385 cell->name, nvmem->stride); 386 return -EINVAL; 387 } 388 389 return 0; 390 } 391 392 /** 393 * nvmem_add_cells() - Add cell information to an nvmem device 394 * 395 * @nvmem: nvmem device to add cells to. 396 * @info: nvmem cell info to add to the device 397 * @ncells: number of cells in info 398 * 399 * Return: 0 or negative error code on failure. 400 */ 401 static int nvmem_add_cells(struct nvmem_device *nvmem, 402 const struct nvmem_cell_info *info, 403 int ncells) 404 { 405 struct nvmem_cell **cells; 406 int i, rval; 407 408 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 409 if (!cells) 410 return -ENOMEM; 411 412 for (i = 0; i < ncells; i++) { 413 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 414 if (!cells[i]) { 415 rval = -ENOMEM; 416 goto err; 417 } 418 419 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 420 if (rval) { 421 kfree(cells[i]); 422 goto err; 423 } 424 425 nvmem_cell_add(cells[i]); 426 } 427 428 /* remove tmp array */ 429 kfree(cells); 430 431 return 0; 432 err: 433 while (i--) 434 nvmem_cell_drop(cells[i]); 435 436 kfree(cells); 437 438 return rval; 439 } 440 441 /** 442 * nvmem_register_notifier() - Register a notifier block for nvmem events. 443 * 444 * @nb: notifier block to be called on nvmem events. 445 * 446 * Return: 0 on success, negative error number on failure. 447 */ 448 int nvmem_register_notifier(struct notifier_block *nb) 449 { 450 return blocking_notifier_chain_register(&nvmem_notifier, nb); 451 } 452 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 453 454 /** 455 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 456 * 457 * @nb: notifier block to be unregistered. 458 * 459 * Return: 0 on success, negative error number on failure. 460 */ 461 int nvmem_unregister_notifier(struct notifier_block *nb) 462 { 463 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 464 } 465 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 466 467 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 468 { 469 const struct nvmem_cell_info *info; 470 struct nvmem_cell_table *table; 471 struct nvmem_cell *cell; 472 int rval = 0, i; 473 474 mutex_lock(&nvmem_cell_mutex); 475 list_for_each_entry(table, &nvmem_cell_tables, node) { 476 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 477 for (i = 0; i < table->ncells; i++) { 478 info = &table->cells[i]; 479 480 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 481 if (!cell) { 482 rval = -ENOMEM; 483 goto out; 484 } 485 486 rval = nvmem_cell_info_to_nvmem_cell(nvmem, 487 info, 488 cell); 489 if (rval) { 490 kfree(cell); 491 goto out; 492 } 493 494 nvmem_cell_add(cell); 495 } 496 } 497 } 498 499 out: 500 mutex_unlock(&nvmem_cell_mutex); 501 return rval; 502 } 503 504 static struct nvmem_cell * 505 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 506 { 507 struct nvmem_cell *iter, *cell = NULL; 508 509 mutex_lock(&nvmem_mutex); 510 list_for_each_entry(iter, &nvmem->cells, node) { 511 if (strcmp(cell_id, iter->name) == 0) { 512 cell = iter; 513 break; 514 } 515 } 516 mutex_unlock(&nvmem_mutex); 517 518 return cell; 519 } 520 521 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 522 { 523 struct device_node *parent, *child; 524 struct device *dev = &nvmem->dev; 525 struct nvmem_cell *cell; 526 const __be32 *addr; 527 int len; 528 529 parent = dev->of_node; 530 531 for_each_child_of_node(parent, child) { 532 addr = of_get_property(child, "reg", &len); 533 if (!addr || (len < 2 * sizeof(u32))) { 534 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 535 return -EINVAL; 536 } 537 538 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 539 if (!cell) 540 return -ENOMEM; 541 542 cell->nvmem = nvmem; 543 cell->np = of_node_get(child); 544 cell->offset = be32_to_cpup(addr++); 545 cell->bytes = be32_to_cpup(addr); 546 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 547 548 addr = of_get_property(child, "bits", &len); 549 if (addr && len == (2 * sizeof(u32))) { 550 cell->bit_offset = be32_to_cpup(addr++); 551 cell->nbits = be32_to_cpup(addr); 552 } 553 554 if (cell->nbits) 555 cell->bytes = DIV_ROUND_UP( 556 cell->nbits + cell->bit_offset, 557 BITS_PER_BYTE); 558 559 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 560 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 561 cell->name, nvmem->stride); 562 /* Cells already added will be freed later. */ 563 kfree_const(cell->name); 564 kfree(cell); 565 return -EINVAL; 566 } 567 568 nvmem_cell_add(cell); 569 } 570 571 return 0; 572 } 573 574 /** 575 * nvmem_register() - Register a nvmem device for given nvmem_config. 576 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 577 * 578 * @config: nvmem device configuration with which nvmem device is created. 579 * 580 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 581 * on success. 582 */ 583 584 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 585 { 586 struct nvmem_device *nvmem; 587 int rval; 588 589 if (!config->dev) 590 return ERR_PTR(-EINVAL); 591 592 if (!config->reg_read && !config->reg_write) 593 return ERR_PTR(-EINVAL); 594 595 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 596 if (!nvmem) 597 return ERR_PTR(-ENOMEM); 598 599 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 600 if (rval < 0) { 601 kfree(nvmem); 602 return ERR_PTR(rval); 603 } 604 605 if (config->wp_gpio) 606 nvmem->wp_gpio = config->wp_gpio; 607 else 608 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 609 GPIOD_OUT_HIGH); 610 if (IS_ERR(nvmem->wp_gpio)) { 611 ida_simple_remove(&nvmem_ida, nvmem->id); 612 rval = PTR_ERR(nvmem->wp_gpio); 613 kfree(nvmem); 614 return ERR_PTR(rval); 615 } 616 617 kref_init(&nvmem->refcnt); 618 INIT_LIST_HEAD(&nvmem->cells); 619 620 nvmem->id = rval; 621 nvmem->owner = config->owner; 622 if (!nvmem->owner && config->dev->driver) 623 nvmem->owner = config->dev->driver->owner; 624 nvmem->stride = config->stride ?: 1; 625 nvmem->word_size = config->word_size ?: 1; 626 nvmem->size = config->size; 627 nvmem->dev.type = &nvmem_provider_type; 628 nvmem->dev.bus = &nvmem_bus_type; 629 nvmem->dev.parent = config->dev; 630 nvmem->root_only = config->root_only; 631 nvmem->priv = config->priv; 632 nvmem->type = config->type; 633 nvmem->reg_read = config->reg_read; 634 nvmem->reg_write = config->reg_write; 635 if (!config->no_of_node) 636 nvmem->dev.of_node = config->dev->of_node; 637 638 switch (config->id) { 639 case NVMEM_DEVID_NONE: 640 dev_set_name(&nvmem->dev, "%s", config->name); 641 break; 642 case NVMEM_DEVID_AUTO: 643 dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 644 break; 645 default: 646 dev_set_name(&nvmem->dev, "%s%d", 647 config->name ? : "nvmem", 648 config->name ? config->id : nvmem->id); 649 break; 650 } 651 652 nvmem->read_only = device_property_present(config->dev, "read-only") || 653 config->read_only || !nvmem->reg_write; 654 655 #ifdef CONFIG_NVMEM_SYSFS 656 nvmem->dev.groups = nvmem_dev_groups; 657 #endif 658 659 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 660 661 rval = device_register(&nvmem->dev); 662 if (rval) 663 goto err_put_device; 664 665 if (config->compat) { 666 rval = nvmem_sysfs_setup_compat(nvmem, config); 667 if (rval) 668 goto err_device_del; 669 } 670 671 if (config->cells) { 672 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 673 if (rval) 674 goto err_teardown_compat; 675 } 676 677 rval = nvmem_add_cells_from_table(nvmem); 678 if (rval) 679 goto err_remove_cells; 680 681 rval = nvmem_add_cells_from_of(nvmem); 682 if (rval) 683 goto err_remove_cells; 684 685 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 686 687 return nvmem; 688 689 err_remove_cells: 690 nvmem_device_remove_all_cells(nvmem); 691 err_teardown_compat: 692 if (config->compat) 693 nvmem_sysfs_remove_compat(nvmem, config); 694 err_device_del: 695 device_del(&nvmem->dev); 696 err_put_device: 697 put_device(&nvmem->dev); 698 699 return ERR_PTR(rval); 700 } 701 EXPORT_SYMBOL_GPL(nvmem_register); 702 703 static void nvmem_device_release(struct kref *kref) 704 { 705 struct nvmem_device *nvmem; 706 707 nvmem = container_of(kref, struct nvmem_device, refcnt); 708 709 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 710 711 if (nvmem->flags & FLAG_COMPAT) 712 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 713 714 nvmem_device_remove_all_cells(nvmem); 715 device_unregister(&nvmem->dev); 716 } 717 718 /** 719 * nvmem_unregister() - Unregister previously registered nvmem device 720 * 721 * @nvmem: Pointer to previously registered nvmem device. 722 */ 723 void nvmem_unregister(struct nvmem_device *nvmem) 724 { 725 kref_put(&nvmem->refcnt, nvmem_device_release); 726 } 727 EXPORT_SYMBOL_GPL(nvmem_unregister); 728 729 static void devm_nvmem_release(struct device *dev, void *res) 730 { 731 nvmem_unregister(*(struct nvmem_device **)res); 732 } 733 734 /** 735 * devm_nvmem_register() - Register a managed nvmem device for given 736 * nvmem_config. 737 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 738 * 739 * @dev: Device that uses the nvmem device. 740 * @config: nvmem device configuration with which nvmem device is created. 741 * 742 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 743 * on success. 744 */ 745 struct nvmem_device *devm_nvmem_register(struct device *dev, 746 const struct nvmem_config *config) 747 { 748 struct nvmem_device **ptr, *nvmem; 749 750 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 751 if (!ptr) 752 return ERR_PTR(-ENOMEM); 753 754 nvmem = nvmem_register(config); 755 756 if (!IS_ERR(nvmem)) { 757 *ptr = nvmem; 758 devres_add(dev, ptr); 759 } else { 760 devres_free(ptr); 761 } 762 763 return nvmem; 764 } 765 EXPORT_SYMBOL_GPL(devm_nvmem_register); 766 767 static int devm_nvmem_match(struct device *dev, void *res, void *data) 768 { 769 struct nvmem_device **r = res; 770 771 return *r == data; 772 } 773 774 /** 775 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 776 * device. 777 * 778 * @dev: Device that uses the nvmem device. 779 * @nvmem: Pointer to previously registered nvmem device. 780 * 781 * Return: Will be negative on error or zero on success. 782 */ 783 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 784 { 785 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 786 } 787 EXPORT_SYMBOL(devm_nvmem_unregister); 788 789 static struct nvmem_device *__nvmem_device_get(void *data, 790 int (*match)(struct device *dev, const void *data)) 791 { 792 struct nvmem_device *nvmem = NULL; 793 struct device *dev; 794 795 mutex_lock(&nvmem_mutex); 796 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 797 if (dev) 798 nvmem = to_nvmem_device(dev); 799 mutex_unlock(&nvmem_mutex); 800 if (!nvmem) 801 return ERR_PTR(-EPROBE_DEFER); 802 803 if (!try_module_get(nvmem->owner)) { 804 dev_err(&nvmem->dev, 805 "could not increase module refcount for cell %s\n", 806 nvmem_dev_name(nvmem)); 807 808 put_device(&nvmem->dev); 809 return ERR_PTR(-EINVAL); 810 } 811 812 kref_get(&nvmem->refcnt); 813 814 return nvmem; 815 } 816 817 static void __nvmem_device_put(struct nvmem_device *nvmem) 818 { 819 put_device(&nvmem->dev); 820 module_put(nvmem->owner); 821 kref_put(&nvmem->refcnt, nvmem_device_release); 822 } 823 824 #if IS_ENABLED(CONFIG_OF) 825 /** 826 * of_nvmem_device_get() - Get nvmem device from a given id 827 * 828 * @np: Device tree node that uses the nvmem device. 829 * @id: nvmem name from nvmem-names property. 830 * 831 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 832 * on success. 833 */ 834 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 835 { 836 837 struct device_node *nvmem_np; 838 int index = 0; 839 840 if (id) 841 index = of_property_match_string(np, "nvmem-names", id); 842 843 nvmem_np = of_parse_phandle(np, "nvmem", index); 844 if (!nvmem_np) 845 return ERR_PTR(-ENOENT); 846 847 return __nvmem_device_get(nvmem_np, device_match_of_node); 848 } 849 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 850 #endif 851 852 /** 853 * nvmem_device_get() - Get nvmem device from a given id 854 * 855 * @dev: Device that uses the nvmem device. 856 * @dev_name: name of the requested nvmem device. 857 * 858 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 859 * on success. 860 */ 861 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 862 { 863 if (dev->of_node) { /* try dt first */ 864 struct nvmem_device *nvmem; 865 866 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 867 868 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 869 return nvmem; 870 871 } 872 873 return __nvmem_device_get((void *)dev_name, device_match_name); 874 } 875 EXPORT_SYMBOL_GPL(nvmem_device_get); 876 877 /** 878 * nvmem_device_find() - Find nvmem device with matching function 879 * 880 * @data: Data to pass to match function 881 * @match: Callback function to check device 882 * 883 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 884 * on success. 885 */ 886 struct nvmem_device *nvmem_device_find(void *data, 887 int (*match)(struct device *dev, const void *data)) 888 { 889 return __nvmem_device_get(data, match); 890 } 891 EXPORT_SYMBOL_GPL(nvmem_device_find); 892 893 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 894 { 895 struct nvmem_device **nvmem = res; 896 897 if (WARN_ON(!nvmem || !*nvmem)) 898 return 0; 899 900 return *nvmem == data; 901 } 902 903 static void devm_nvmem_device_release(struct device *dev, void *res) 904 { 905 nvmem_device_put(*(struct nvmem_device **)res); 906 } 907 908 /** 909 * devm_nvmem_device_put() - put alredy got nvmem device 910 * 911 * @dev: Device that uses the nvmem device. 912 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 913 * that needs to be released. 914 */ 915 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 916 { 917 int ret; 918 919 ret = devres_release(dev, devm_nvmem_device_release, 920 devm_nvmem_device_match, nvmem); 921 922 WARN_ON(ret); 923 } 924 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 925 926 /** 927 * nvmem_device_put() - put alredy got nvmem device 928 * 929 * @nvmem: pointer to nvmem device that needs to be released. 930 */ 931 void nvmem_device_put(struct nvmem_device *nvmem) 932 { 933 __nvmem_device_put(nvmem); 934 } 935 EXPORT_SYMBOL_GPL(nvmem_device_put); 936 937 /** 938 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 939 * 940 * @dev: Device that requests the nvmem device. 941 * @id: name id for the requested nvmem device. 942 * 943 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 944 * on success. The nvmem_cell will be freed by the automatically once the 945 * device is freed. 946 */ 947 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 948 { 949 struct nvmem_device **ptr, *nvmem; 950 951 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 952 if (!ptr) 953 return ERR_PTR(-ENOMEM); 954 955 nvmem = nvmem_device_get(dev, id); 956 if (!IS_ERR(nvmem)) { 957 *ptr = nvmem; 958 devres_add(dev, ptr); 959 } else { 960 devres_free(ptr); 961 } 962 963 return nvmem; 964 } 965 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 966 967 static struct nvmem_cell * 968 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 969 { 970 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 971 struct nvmem_cell_lookup *lookup; 972 struct nvmem_device *nvmem; 973 const char *dev_id; 974 975 if (!dev) 976 return ERR_PTR(-EINVAL); 977 978 dev_id = dev_name(dev); 979 980 mutex_lock(&nvmem_lookup_mutex); 981 982 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 983 if ((strcmp(lookup->dev_id, dev_id) == 0) && 984 (strcmp(lookup->con_id, con_id) == 0)) { 985 /* This is the right entry. */ 986 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 987 device_match_name); 988 if (IS_ERR(nvmem)) { 989 /* Provider may not be registered yet. */ 990 cell = ERR_CAST(nvmem); 991 break; 992 } 993 994 cell = nvmem_find_cell_by_name(nvmem, 995 lookup->cell_name); 996 if (!cell) { 997 __nvmem_device_put(nvmem); 998 cell = ERR_PTR(-ENOENT); 999 } 1000 break; 1001 } 1002 } 1003 1004 mutex_unlock(&nvmem_lookup_mutex); 1005 return cell; 1006 } 1007 1008 #if IS_ENABLED(CONFIG_OF) 1009 static struct nvmem_cell * 1010 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 1011 { 1012 struct nvmem_cell *iter, *cell = NULL; 1013 1014 mutex_lock(&nvmem_mutex); 1015 list_for_each_entry(iter, &nvmem->cells, node) { 1016 if (np == iter->np) { 1017 cell = iter; 1018 break; 1019 } 1020 } 1021 mutex_unlock(&nvmem_mutex); 1022 1023 return cell; 1024 } 1025 1026 /** 1027 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1028 * 1029 * @np: Device tree node that uses the nvmem cell. 1030 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1031 * for the cell at index 0 (the lone cell with no accompanying 1032 * nvmem-cell-names property). 1033 * 1034 * Return: Will be an ERR_PTR() on error or a valid pointer 1035 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1036 * nvmem_cell_put(). 1037 */ 1038 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1039 { 1040 struct device_node *cell_np, *nvmem_np; 1041 struct nvmem_device *nvmem; 1042 struct nvmem_cell *cell; 1043 int index = 0; 1044 1045 /* if cell name exists, find index to the name */ 1046 if (id) 1047 index = of_property_match_string(np, "nvmem-cell-names", id); 1048 1049 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1050 if (!cell_np) 1051 return ERR_PTR(-ENOENT); 1052 1053 nvmem_np = of_get_next_parent(cell_np); 1054 if (!nvmem_np) 1055 return ERR_PTR(-EINVAL); 1056 1057 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1058 of_node_put(nvmem_np); 1059 if (IS_ERR(nvmem)) 1060 return ERR_CAST(nvmem); 1061 1062 cell = nvmem_find_cell_by_node(nvmem, cell_np); 1063 if (!cell) { 1064 __nvmem_device_put(nvmem); 1065 return ERR_PTR(-ENOENT); 1066 } 1067 1068 return cell; 1069 } 1070 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1071 #endif 1072 1073 /** 1074 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1075 * 1076 * @dev: Device that requests the nvmem cell. 1077 * @id: nvmem cell name to get (this corresponds with the name from the 1078 * nvmem-cell-names property for DT systems and with the con_id from 1079 * the lookup entry for non-DT systems). 1080 * 1081 * Return: Will be an ERR_PTR() on error or a valid pointer 1082 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1083 * nvmem_cell_put(). 1084 */ 1085 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1086 { 1087 struct nvmem_cell *cell; 1088 1089 if (dev->of_node) { /* try dt first */ 1090 cell = of_nvmem_cell_get(dev->of_node, id); 1091 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1092 return cell; 1093 } 1094 1095 /* NULL cell id only allowed for device tree; invalid otherwise */ 1096 if (!id) 1097 return ERR_PTR(-EINVAL); 1098 1099 return nvmem_cell_get_from_lookup(dev, id); 1100 } 1101 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1102 1103 static void devm_nvmem_cell_release(struct device *dev, void *res) 1104 { 1105 nvmem_cell_put(*(struct nvmem_cell **)res); 1106 } 1107 1108 /** 1109 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1110 * 1111 * @dev: Device that requests the nvmem cell. 1112 * @id: nvmem cell name id to get. 1113 * 1114 * Return: Will be an ERR_PTR() on error or a valid pointer 1115 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1116 * automatically once the device is freed. 1117 */ 1118 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1119 { 1120 struct nvmem_cell **ptr, *cell; 1121 1122 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1123 if (!ptr) 1124 return ERR_PTR(-ENOMEM); 1125 1126 cell = nvmem_cell_get(dev, id); 1127 if (!IS_ERR(cell)) { 1128 *ptr = cell; 1129 devres_add(dev, ptr); 1130 } else { 1131 devres_free(ptr); 1132 } 1133 1134 return cell; 1135 } 1136 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1137 1138 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1139 { 1140 struct nvmem_cell **c = res; 1141 1142 if (WARN_ON(!c || !*c)) 1143 return 0; 1144 1145 return *c == data; 1146 } 1147 1148 /** 1149 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1150 * from devm_nvmem_cell_get. 1151 * 1152 * @dev: Device that requests the nvmem cell. 1153 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1154 */ 1155 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1156 { 1157 int ret; 1158 1159 ret = devres_release(dev, devm_nvmem_cell_release, 1160 devm_nvmem_cell_match, cell); 1161 1162 WARN_ON(ret); 1163 } 1164 EXPORT_SYMBOL(devm_nvmem_cell_put); 1165 1166 /** 1167 * nvmem_cell_put() - Release previously allocated nvmem cell. 1168 * 1169 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1170 */ 1171 void nvmem_cell_put(struct nvmem_cell *cell) 1172 { 1173 struct nvmem_device *nvmem = cell->nvmem; 1174 1175 __nvmem_device_put(nvmem); 1176 } 1177 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1178 1179 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 1180 { 1181 u8 *p, *b; 1182 int i, extra, bit_offset = cell->bit_offset; 1183 1184 p = b = buf; 1185 if (bit_offset) { 1186 /* First shift */ 1187 *b++ >>= bit_offset; 1188 1189 /* setup rest of the bytes if any */ 1190 for (i = 1; i < cell->bytes; i++) { 1191 /* Get bits from next byte and shift them towards msb */ 1192 *p |= *b << (BITS_PER_BYTE - bit_offset); 1193 1194 p = b; 1195 *b++ >>= bit_offset; 1196 } 1197 } else { 1198 /* point to the msb */ 1199 p += cell->bytes - 1; 1200 } 1201 1202 /* result fits in less bytes */ 1203 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1204 while (--extra >= 0) 1205 *p-- = 0; 1206 1207 /* clear msb bits if any leftover in the last byte */ 1208 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 1209 } 1210 1211 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1212 struct nvmem_cell *cell, 1213 void *buf, size_t *len) 1214 { 1215 int rc; 1216 1217 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1218 1219 if (rc) 1220 return rc; 1221 1222 /* shift bits in-place */ 1223 if (cell->bit_offset || cell->nbits) 1224 nvmem_shift_read_buffer_in_place(cell, buf); 1225 1226 if (len) 1227 *len = cell->bytes; 1228 1229 return 0; 1230 } 1231 1232 /** 1233 * nvmem_cell_read() - Read a given nvmem cell 1234 * 1235 * @cell: nvmem cell to be read. 1236 * @len: pointer to length of cell which will be populated on successful read; 1237 * can be NULL. 1238 * 1239 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1240 * buffer should be freed by the consumer with a kfree(). 1241 */ 1242 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1243 { 1244 struct nvmem_device *nvmem = cell->nvmem; 1245 u8 *buf; 1246 int rc; 1247 1248 if (!nvmem) 1249 return ERR_PTR(-EINVAL); 1250 1251 buf = kzalloc(cell->bytes, GFP_KERNEL); 1252 if (!buf) 1253 return ERR_PTR(-ENOMEM); 1254 1255 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1256 if (rc) { 1257 kfree(buf); 1258 return ERR_PTR(rc); 1259 } 1260 1261 return buf; 1262 } 1263 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1264 1265 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1266 u8 *_buf, int len) 1267 { 1268 struct nvmem_device *nvmem = cell->nvmem; 1269 int i, rc, nbits, bit_offset = cell->bit_offset; 1270 u8 v, *p, *buf, *b, pbyte, pbits; 1271 1272 nbits = cell->nbits; 1273 buf = kzalloc(cell->bytes, GFP_KERNEL); 1274 if (!buf) 1275 return ERR_PTR(-ENOMEM); 1276 1277 memcpy(buf, _buf, len); 1278 p = b = buf; 1279 1280 if (bit_offset) { 1281 pbyte = *b; 1282 *b <<= bit_offset; 1283 1284 /* setup the first byte with lsb bits from nvmem */ 1285 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1286 if (rc) 1287 goto err; 1288 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1289 1290 /* setup rest of the byte if any */ 1291 for (i = 1; i < cell->bytes; i++) { 1292 /* Get last byte bits and shift them towards lsb */ 1293 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1294 pbyte = *b; 1295 p = b; 1296 *b <<= bit_offset; 1297 *b++ |= pbits; 1298 } 1299 } 1300 1301 /* if it's not end on byte boundary */ 1302 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1303 /* setup the last byte with msb bits from nvmem */ 1304 rc = nvmem_reg_read(nvmem, 1305 cell->offset + cell->bytes - 1, &v, 1); 1306 if (rc) 1307 goto err; 1308 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1309 1310 } 1311 1312 return buf; 1313 err: 1314 kfree(buf); 1315 return ERR_PTR(rc); 1316 } 1317 1318 /** 1319 * nvmem_cell_write() - Write to a given nvmem cell 1320 * 1321 * @cell: nvmem cell to be written. 1322 * @buf: Buffer to be written. 1323 * @len: length of buffer to be written to nvmem cell. 1324 * 1325 * Return: length of bytes written or negative on failure. 1326 */ 1327 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1328 { 1329 struct nvmem_device *nvmem = cell->nvmem; 1330 int rc; 1331 1332 if (!nvmem || nvmem->read_only || 1333 (cell->bit_offset == 0 && len != cell->bytes)) 1334 return -EINVAL; 1335 1336 if (cell->bit_offset || cell->nbits) { 1337 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1338 if (IS_ERR(buf)) 1339 return PTR_ERR(buf); 1340 } 1341 1342 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1343 1344 /* free the tmp buffer */ 1345 if (cell->bit_offset || cell->nbits) 1346 kfree(buf); 1347 1348 if (rc) 1349 return rc; 1350 1351 return len; 1352 } 1353 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1354 1355 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1356 void *val, size_t count) 1357 { 1358 struct nvmem_cell *cell; 1359 void *buf; 1360 size_t len; 1361 1362 cell = nvmem_cell_get(dev, cell_id); 1363 if (IS_ERR(cell)) 1364 return PTR_ERR(cell); 1365 1366 buf = nvmem_cell_read(cell, &len); 1367 if (IS_ERR(buf)) { 1368 nvmem_cell_put(cell); 1369 return PTR_ERR(buf); 1370 } 1371 if (len != count) { 1372 kfree(buf); 1373 nvmem_cell_put(cell); 1374 return -EINVAL; 1375 } 1376 memcpy(val, buf, count); 1377 kfree(buf); 1378 nvmem_cell_put(cell); 1379 1380 return 0; 1381 } 1382 1383 /** 1384 * nvmem_cell_read_u8() - Read a cell value as a u8 1385 * 1386 * @dev: Device that requests the nvmem cell. 1387 * @cell_id: Name of nvmem cell to read. 1388 * @val: pointer to output value. 1389 * 1390 * Return: 0 on success or negative errno. 1391 */ 1392 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1393 { 1394 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1395 } 1396 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1397 1398 /** 1399 * nvmem_cell_read_u16() - Read a cell value as a u16 1400 * 1401 * @dev: Device that requests the nvmem cell. 1402 * @cell_id: Name of nvmem cell to read. 1403 * @val: pointer to output value. 1404 * 1405 * Return: 0 on success or negative errno. 1406 */ 1407 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1408 { 1409 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1410 } 1411 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1412 1413 /** 1414 * nvmem_cell_read_u32() - Read a cell value as a u32 1415 * 1416 * @dev: Device that requests the nvmem cell. 1417 * @cell_id: Name of nvmem cell to read. 1418 * @val: pointer to output value. 1419 * 1420 * Return: 0 on success or negative errno. 1421 */ 1422 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1423 { 1424 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1425 } 1426 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1427 1428 /** 1429 * nvmem_cell_read_u64() - Read a cell value as a u64 1430 * 1431 * @dev: Device that requests the nvmem cell. 1432 * @cell_id: Name of nvmem cell to read. 1433 * @val: pointer to output value. 1434 * 1435 * Return: 0 on success or negative errno. 1436 */ 1437 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1438 { 1439 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1440 } 1441 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1442 1443 /** 1444 * nvmem_device_cell_read() - Read a given nvmem device and cell 1445 * 1446 * @nvmem: nvmem device to read from. 1447 * @info: nvmem cell info to be read. 1448 * @buf: buffer pointer which will be populated on successful read. 1449 * 1450 * Return: length of successful bytes read on success and negative 1451 * error code on error. 1452 */ 1453 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1454 struct nvmem_cell_info *info, void *buf) 1455 { 1456 struct nvmem_cell cell; 1457 int rc; 1458 ssize_t len; 1459 1460 if (!nvmem) 1461 return -EINVAL; 1462 1463 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1464 if (rc) 1465 return rc; 1466 1467 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1468 if (rc) 1469 return rc; 1470 1471 return len; 1472 } 1473 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1474 1475 /** 1476 * nvmem_device_cell_write() - Write cell to a given nvmem device 1477 * 1478 * @nvmem: nvmem device to be written to. 1479 * @info: nvmem cell info to be written. 1480 * @buf: buffer to be written to cell. 1481 * 1482 * Return: length of bytes written or negative error code on failure. 1483 */ 1484 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1485 struct nvmem_cell_info *info, void *buf) 1486 { 1487 struct nvmem_cell cell; 1488 int rc; 1489 1490 if (!nvmem) 1491 return -EINVAL; 1492 1493 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1494 if (rc) 1495 return rc; 1496 1497 return nvmem_cell_write(&cell, buf, cell.bytes); 1498 } 1499 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1500 1501 /** 1502 * nvmem_device_read() - Read from a given nvmem device 1503 * 1504 * @nvmem: nvmem device to read from. 1505 * @offset: offset in nvmem device. 1506 * @bytes: number of bytes to read. 1507 * @buf: buffer pointer which will be populated on successful read. 1508 * 1509 * Return: length of successful bytes read on success and negative 1510 * error code on error. 1511 */ 1512 int nvmem_device_read(struct nvmem_device *nvmem, 1513 unsigned int offset, 1514 size_t bytes, void *buf) 1515 { 1516 int rc; 1517 1518 if (!nvmem) 1519 return -EINVAL; 1520 1521 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1522 1523 if (rc) 1524 return rc; 1525 1526 return bytes; 1527 } 1528 EXPORT_SYMBOL_GPL(nvmem_device_read); 1529 1530 /** 1531 * nvmem_device_write() - Write cell to a given nvmem device 1532 * 1533 * @nvmem: nvmem device to be written to. 1534 * @offset: offset in nvmem device. 1535 * @bytes: number of bytes to write. 1536 * @buf: buffer to be written. 1537 * 1538 * Return: length of bytes written or negative error code on failure. 1539 */ 1540 int nvmem_device_write(struct nvmem_device *nvmem, 1541 unsigned int offset, 1542 size_t bytes, void *buf) 1543 { 1544 int rc; 1545 1546 if (!nvmem) 1547 return -EINVAL; 1548 1549 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1550 1551 if (rc) 1552 return rc; 1553 1554 1555 return bytes; 1556 } 1557 EXPORT_SYMBOL_GPL(nvmem_device_write); 1558 1559 /** 1560 * nvmem_add_cell_table() - register a table of cell info entries 1561 * 1562 * @table: table of cell info entries 1563 */ 1564 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1565 { 1566 mutex_lock(&nvmem_cell_mutex); 1567 list_add_tail(&table->node, &nvmem_cell_tables); 1568 mutex_unlock(&nvmem_cell_mutex); 1569 } 1570 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1571 1572 /** 1573 * nvmem_del_cell_table() - remove a previously registered cell info table 1574 * 1575 * @table: table of cell info entries 1576 */ 1577 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1578 { 1579 mutex_lock(&nvmem_cell_mutex); 1580 list_del(&table->node); 1581 mutex_unlock(&nvmem_cell_mutex); 1582 } 1583 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1584 1585 /** 1586 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1587 * 1588 * @entries: array of cell lookup entries 1589 * @nentries: number of cell lookup entries in the array 1590 */ 1591 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1592 { 1593 int i; 1594 1595 mutex_lock(&nvmem_lookup_mutex); 1596 for (i = 0; i < nentries; i++) 1597 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1598 mutex_unlock(&nvmem_lookup_mutex); 1599 } 1600 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1601 1602 /** 1603 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1604 * entries 1605 * 1606 * @entries: array of cell lookup entries 1607 * @nentries: number of cell lookup entries in the array 1608 */ 1609 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1610 { 1611 int i; 1612 1613 mutex_lock(&nvmem_lookup_mutex); 1614 for (i = 0; i < nentries; i++) 1615 list_del(&entries[i].node); 1616 mutex_unlock(&nvmem_lookup_mutex); 1617 } 1618 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1619 1620 /** 1621 * nvmem_dev_name() - Get the name of a given nvmem device. 1622 * 1623 * @nvmem: nvmem device. 1624 * 1625 * Return: name of the nvmem device. 1626 */ 1627 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1628 { 1629 return dev_name(&nvmem->dev); 1630 } 1631 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1632 1633 static int __init nvmem_init(void) 1634 { 1635 return bus_register(&nvmem_bus_type); 1636 } 1637 1638 static void __exit nvmem_exit(void) 1639 { 1640 bus_unregister(&nvmem_bus_type); 1641 } 1642 1643 subsys_initcall(nvmem_init); 1644 module_exit(nvmem_exit); 1645 1646 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1647 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1648 MODULE_DESCRIPTION("nvmem Driver Core"); 1649 MODULE_LICENSE("GPL v2"); 1650