1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 nvmem_reg_read_t reg_read; 38 nvmem_reg_write_t reg_write; 39 struct gpio_desc *wp_gpio; 40 void *priv; 41 }; 42 43 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 44 45 #define FLAG_COMPAT BIT(0) 46 47 struct nvmem_cell { 48 const char *name; 49 int offset; 50 int bytes; 51 int bit_offset; 52 int nbits; 53 struct device_node *np; 54 struct nvmem_device *nvmem; 55 struct list_head node; 56 }; 57 58 static DEFINE_MUTEX(nvmem_mutex); 59 static DEFINE_IDA(nvmem_ida); 60 61 static DEFINE_MUTEX(nvmem_cell_mutex); 62 static LIST_HEAD(nvmem_cell_tables); 63 64 static DEFINE_MUTEX(nvmem_lookup_mutex); 65 static LIST_HEAD(nvmem_lookup_list); 66 67 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 68 69 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 70 void *val, size_t bytes) 71 { 72 if (nvmem->reg_read) 73 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 74 75 return -EINVAL; 76 } 77 78 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 79 void *val, size_t bytes) 80 { 81 int ret; 82 83 if (nvmem->reg_write) { 84 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 85 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 86 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 87 return ret; 88 } 89 90 return -EINVAL; 91 } 92 93 #ifdef CONFIG_NVMEM_SYSFS 94 static const char * const nvmem_type_str[] = { 95 [NVMEM_TYPE_UNKNOWN] = "Unknown", 96 [NVMEM_TYPE_EEPROM] = "EEPROM", 97 [NVMEM_TYPE_OTP] = "OTP", 98 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 99 }; 100 101 #ifdef CONFIG_DEBUG_LOCK_ALLOC 102 static struct lock_class_key eeprom_lock_key; 103 #endif 104 105 static ssize_t type_show(struct device *dev, 106 struct device_attribute *attr, char *buf) 107 { 108 struct nvmem_device *nvmem = to_nvmem_device(dev); 109 110 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 111 } 112 113 static DEVICE_ATTR_RO(type); 114 115 static struct attribute *nvmem_attrs[] = { 116 &dev_attr_type.attr, 117 NULL, 118 }; 119 120 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 121 struct bin_attribute *attr, char *buf, 122 loff_t pos, size_t count) 123 { 124 struct device *dev; 125 struct nvmem_device *nvmem; 126 int rc; 127 128 if (attr->private) 129 dev = attr->private; 130 else 131 dev = kobj_to_dev(kobj); 132 nvmem = to_nvmem_device(dev); 133 134 /* Stop the user from reading */ 135 if (pos >= nvmem->size) 136 return 0; 137 138 if (!IS_ALIGNED(pos, nvmem->stride)) 139 return -EINVAL; 140 141 if (count < nvmem->word_size) 142 return -EINVAL; 143 144 if (pos + count > nvmem->size) 145 count = nvmem->size - pos; 146 147 count = round_down(count, nvmem->word_size); 148 149 if (!nvmem->reg_read) 150 return -EPERM; 151 152 rc = nvmem_reg_read(nvmem, pos, buf, count); 153 154 if (rc) 155 return rc; 156 157 return count; 158 } 159 160 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 161 struct bin_attribute *attr, char *buf, 162 loff_t pos, size_t count) 163 { 164 struct device *dev; 165 struct nvmem_device *nvmem; 166 int rc; 167 168 if (attr->private) 169 dev = attr->private; 170 else 171 dev = kobj_to_dev(kobj); 172 nvmem = to_nvmem_device(dev); 173 174 /* Stop the user from writing */ 175 if (pos >= nvmem->size) 176 return -EFBIG; 177 178 if (!IS_ALIGNED(pos, nvmem->stride)) 179 return -EINVAL; 180 181 if (count < nvmem->word_size) 182 return -EINVAL; 183 184 if (pos + count > nvmem->size) 185 count = nvmem->size - pos; 186 187 count = round_down(count, nvmem->word_size); 188 189 if (!nvmem->reg_write) 190 return -EPERM; 191 192 rc = nvmem_reg_write(nvmem, pos, buf, count); 193 194 if (rc) 195 return rc; 196 197 return count; 198 } 199 200 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 201 { 202 umode_t mode = 0400; 203 204 if (!nvmem->root_only) 205 mode |= 0044; 206 207 if (!nvmem->read_only) 208 mode |= 0200; 209 210 if (!nvmem->reg_write) 211 mode &= ~0200; 212 213 if (!nvmem->reg_read) 214 mode &= ~0444; 215 216 return mode; 217 } 218 219 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 220 struct bin_attribute *attr, int i) 221 { 222 struct device *dev = kobj_to_dev(kobj); 223 struct nvmem_device *nvmem = to_nvmem_device(dev); 224 225 return nvmem_bin_attr_get_umode(nvmem); 226 } 227 228 /* default read/write permissions */ 229 static struct bin_attribute bin_attr_rw_nvmem = { 230 .attr = { 231 .name = "nvmem", 232 .mode = 0644, 233 }, 234 .read = bin_attr_nvmem_read, 235 .write = bin_attr_nvmem_write, 236 }; 237 238 static struct bin_attribute *nvmem_bin_attributes[] = { 239 &bin_attr_rw_nvmem, 240 NULL, 241 }; 242 243 static const struct attribute_group nvmem_bin_group = { 244 .bin_attrs = nvmem_bin_attributes, 245 .attrs = nvmem_attrs, 246 .is_bin_visible = nvmem_bin_attr_is_visible, 247 }; 248 249 static const struct attribute_group *nvmem_dev_groups[] = { 250 &nvmem_bin_group, 251 NULL, 252 }; 253 254 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 255 .attr = { 256 .name = "eeprom", 257 }, 258 .read = bin_attr_nvmem_read, 259 .write = bin_attr_nvmem_write, 260 }; 261 262 /* 263 * nvmem_setup_compat() - Create an additional binary entry in 264 * drivers sys directory, to be backwards compatible with the older 265 * drivers/misc/eeprom drivers. 266 */ 267 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 268 const struct nvmem_config *config) 269 { 270 int rval; 271 272 if (!config->compat) 273 return 0; 274 275 if (!config->base_dev) 276 return -EINVAL; 277 278 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 279 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 280 nvmem->eeprom.size = nvmem->size; 281 #ifdef CONFIG_DEBUG_LOCK_ALLOC 282 nvmem->eeprom.attr.key = &eeprom_lock_key; 283 #endif 284 nvmem->eeprom.private = &nvmem->dev; 285 nvmem->base_dev = config->base_dev; 286 287 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 288 if (rval) { 289 dev_err(&nvmem->dev, 290 "Failed to create eeprom binary file %d\n", rval); 291 return rval; 292 } 293 294 nvmem->flags |= FLAG_COMPAT; 295 296 return 0; 297 } 298 299 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 300 const struct nvmem_config *config) 301 { 302 if (config->compat) 303 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 304 } 305 306 #else /* CONFIG_NVMEM_SYSFS */ 307 308 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 309 const struct nvmem_config *config) 310 { 311 return -ENOSYS; 312 } 313 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 314 const struct nvmem_config *config) 315 { 316 } 317 318 #endif /* CONFIG_NVMEM_SYSFS */ 319 320 static void nvmem_release(struct device *dev) 321 { 322 struct nvmem_device *nvmem = to_nvmem_device(dev); 323 324 ida_free(&nvmem_ida, nvmem->id); 325 gpiod_put(nvmem->wp_gpio); 326 kfree(nvmem); 327 } 328 329 static const struct device_type nvmem_provider_type = { 330 .release = nvmem_release, 331 }; 332 333 static struct bus_type nvmem_bus_type = { 334 .name = "nvmem", 335 }; 336 337 static void nvmem_cell_drop(struct nvmem_cell *cell) 338 { 339 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 340 mutex_lock(&nvmem_mutex); 341 list_del(&cell->node); 342 mutex_unlock(&nvmem_mutex); 343 of_node_put(cell->np); 344 kfree_const(cell->name); 345 kfree(cell); 346 } 347 348 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 349 { 350 struct nvmem_cell *cell, *p; 351 352 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 353 nvmem_cell_drop(cell); 354 } 355 356 static void nvmem_cell_add(struct nvmem_cell *cell) 357 { 358 mutex_lock(&nvmem_mutex); 359 list_add_tail(&cell->node, &cell->nvmem->cells); 360 mutex_unlock(&nvmem_mutex); 361 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 362 } 363 364 static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem, 365 const struct nvmem_cell_info *info, 366 struct nvmem_cell *cell) 367 { 368 cell->nvmem = nvmem; 369 cell->offset = info->offset; 370 cell->bytes = info->bytes; 371 cell->name = info->name; 372 373 cell->bit_offset = info->bit_offset; 374 cell->nbits = info->nbits; 375 376 if (cell->nbits) 377 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 378 BITS_PER_BYTE); 379 380 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 381 dev_err(&nvmem->dev, 382 "cell %s unaligned to nvmem stride %d\n", 383 cell->name ?: "<unknown>", nvmem->stride); 384 return -EINVAL; 385 } 386 387 return 0; 388 } 389 390 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 391 const struct nvmem_cell_info *info, 392 struct nvmem_cell *cell) 393 { 394 int err; 395 396 err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell); 397 if (err) 398 return err; 399 400 cell->name = kstrdup_const(info->name, GFP_KERNEL); 401 if (!cell->name) 402 return -ENOMEM; 403 404 return 0; 405 } 406 407 /** 408 * nvmem_add_cells() - Add cell information to an nvmem device 409 * 410 * @nvmem: nvmem device to add cells to. 411 * @info: nvmem cell info to add to the device 412 * @ncells: number of cells in info 413 * 414 * Return: 0 or negative error code on failure. 415 */ 416 static int nvmem_add_cells(struct nvmem_device *nvmem, 417 const struct nvmem_cell_info *info, 418 int ncells) 419 { 420 struct nvmem_cell **cells; 421 int i, rval; 422 423 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 424 if (!cells) 425 return -ENOMEM; 426 427 for (i = 0; i < ncells; i++) { 428 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 429 if (!cells[i]) { 430 rval = -ENOMEM; 431 goto err; 432 } 433 434 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 435 if (rval) { 436 kfree(cells[i]); 437 goto err; 438 } 439 440 nvmem_cell_add(cells[i]); 441 } 442 443 /* remove tmp array */ 444 kfree(cells); 445 446 return 0; 447 err: 448 while (i--) 449 nvmem_cell_drop(cells[i]); 450 451 kfree(cells); 452 453 return rval; 454 } 455 456 /** 457 * nvmem_register_notifier() - Register a notifier block for nvmem events. 458 * 459 * @nb: notifier block to be called on nvmem events. 460 * 461 * Return: 0 on success, negative error number on failure. 462 */ 463 int nvmem_register_notifier(struct notifier_block *nb) 464 { 465 return blocking_notifier_chain_register(&nvmem_notifier, nb); 466 } 467 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 468 469 /** 470 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 471 * 472 * @nb: notifier block to be unregistered. 473 * 474 * Return: 0 on success, negative error number on failure. 475 */ 476 int nvmem_unregister_notifier(struct notifier_block *nb) 477 { 478 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 479 } 480 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 481 482 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 483 { 484 const struct nvmem_cell_info *info; 485 struct nvmem_cell_table *table; 486 struct nvmem_cell *cell; 487 int rval = 0, i; 488 489 mutex_lock(&nvmem_cell_mutex); 490 list_for_each_entry(table, &nvmem_cell_tables, node) { 491 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 492 for (i = 0; i < table->ncells; i++) { 493 info = &table->cells[i]; 494 495 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 496 if (!cell) { 497 rval = -ENOMEM; 498 goto out; 499 } 500 501 rval = nvmem_cell_info_to_nvmem_cell(nvmem, 502 info, 503 cell); 504 if (rval) { 505 kfree(cell); 506 goto out; 507 } 508 509 nvmem_cell_add(cell); 510 } 511 } 512 } 513 514 out: 515 mutex_unlock(&nvmem_cell_mutex); 516 return rval; 517 } 518 519 static struct nvmem_cell * 520 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 521 { 522 struct nvmem_cell *iter, *cell = NULL; 523 524 mutex_lock(&nvmem_mutex); 525 list_for_each_entry(iter, &nvmem->cells, node) { 526 if (strcmp(cell_id, iter->name) == 0) { 527 cell = iter; 528 break; 529 } 530 } 531 mutex_unlock(&nvmem_mutex); 532 533 return cell; 534 } 535 536 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 537 { 538 struct device_node *parent, *child; 539 struct device *dev = &nvmem->dev; 540 struct nvmem_cell *cell; 541 const __be32 *addr; 542 int len; 543 544 parent = dev->of_node; 545 546 for_each_child_of_node(parent, child) { 547 addr = of_get_property(child, "reg", &len); 548 if (!addr || (len < 2 * sizeof(u32))) { 549 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 550 return -EINVAL; 551 } 552 553 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 554 if (!cell) 555 return -ENOMEM; 556 557 cell->nvmem = nvmem; 558 cell->np = of_node_get(child); 559 cell->offset = be32_to_cpup(addr++); 560 cell->bytes = be32_to_cpup(addr); 561 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 562 563 addr = of_get_property(child, "bits", &len); 564 if (addr && len == (2 * sizeof(u32))) { 565 cell->bit_offset = be32_to_cpup(addr++); 566 cell->nbits = be32_to_cpup(addr); 567 } 568 569 if (cell->nbits) 570 cell->bytes = DIV_ROUND_UP( 571 cell->nbits + cell->bit_offset, 572 BITS_PER_BYTE); 573 574 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 575 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 576 cell->name, nvmem->stride); 577 /* Cells already added will be freed later. */ 578 kfree_const(cell->name); 579 kfree(cell); 580 return -EINVAL; 581 } 582 583 nvmem_cell_add(cell); 584 } 585 586 return 0; 587 } 588 589 /** 590 * nvmem_register() - Register a nvmem device for given nvmem_config. 591 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 592 * 593 * @config: nvmem device configuration with which nvmem device is created. 594 * 595 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 596 * on success. 597 */ 598 599 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 600 { 601 struct nvmem_device *nvmem; 602 int rval; 603 604 if (!config->dev) 605 return ERR_PTR(-EINVAL); 606 607 if (!config->reg_read && !config->reg_write) 608 return ERR_PTR(-EINVAL); 609 610 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 611 if (!nvmem) 612 return ERR_PTR(-ENOMEM); 613 614 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 615 if (rval < 0) { 616 kfree(nvmem); 617 return ERR_PTR(rval); 618 } 619 620 if (config->wp_gpio) 621 nvmem->wp_gpio = config->wp_gpio; 622 else 623 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 624 GPIOD_OUT_HIGH); 625 if (IS_ERR(nvmem->wp_gpio)) { 626 ida_free(&nvmem_ida, nvmem->id); 627 rval = PTR_ERR(nvmem->wp_gpio); 628 kfree(nvmem); 629 return ERR_PTR(rval); 630 } 631 632 kref_init(&nvmem->refcnt); 633 INIT_LIST_HEAD(&nvmem->cells); 634 635 nvmem->id = rval; 636 nvmem->owner = config->owner; 637 if (!nvmem->owner && config->dev->driver) 638 nvmem->owner = config->dev->driver->owner; 639 nvmem->stride = config->stride ?: 1; 640 nvmem->word_size = config->word_size ?: 1; 641 nvmem->size = config->size; 642 nvmem->dev.type = &nvmem_provider_type; 643 nvmem->dev.bus = &nvmem_bus_type; 644 nvmem->dev.parent = config->dev; 645 nvmem->root_only = config->root_only; 646 nvmem->priv = config->priv; 647 nvmem->type = config->type; 648 nvmem->reg_read = config->reg_read; 649 nvmem->reg_write = config->reg_write; 650 if (!config->no_of_node) 651 nvmem->dev.of_node = config->dev->of_node; 652 653 switch (config->id) { 654 case NVMEM_DEVID_NONE: 655 dev_set_name(&nvmem->dev, "%s", config->name); 656 break; 657 case NVMEM_DEVID_AUTO: 658 dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 659 break; 660 default: 661 dev_set_name(&nvmem->dev, "%s%d", 662 config->name ? : "nvmem", 663 config->name ? config->id : nvmem->id); 664 break; 665 } 666 667 nvmem->read_only = device_property_present(config->dev, "read-only") || 668 config->read_only || !nvmem->reg_write; 669 670 #ifdef CONFIG_NVMEM_SYSFS 671 nvmem->dev.groups = nvmem_dev_groups; 672 #endif 673 674 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 675 676 rval = device_register(&nvmem->dev); 677 if (rval) 678 goto err_put_device; 679 680 if (config->compat) { 681 rval = nvmem_sysfs_setup_compat(nvmem, config); 682 if (rval) 683 goto err_device_del; 684 } 685 686 if (config->cells) { 687 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 688 if (rval) 689 goto err_teardown_compat; 690 } 691 692 rval = nvmem_add_cells_from_table(nvmem); 693 if (rval) 694 goto err_remove_cells; 695 696 rval = nvmem_add_cells_from_of(nvmem); 697 if (rval) 698 goto err_remove_cells; 699 700 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 701 702 return nvmem; 703 704 err_remove_cells: 705 nvmem_device_remove_all_cells(nvmem); 706 err_teardown_compat: 707 if (config->compat) 708 nvmem_sysfs_remove_compat(nvmem, config); 709 err_device_del: 710 device_del(&nvmem->dev); 711 err_put_device: 712 put_device(&nvmem->dev); 713 714 return ERR_PTR(rval); 715 } 716 EXPORT_SYMBOL_GPL(nvmem_register); 717 718 static void nvmem_device_release(struct kref *kref) 719 { 720 struct nvmem_device *nvmem; 721 722 nvmem = container_of(kref, struct nvmem_device, refcnt); 723 724 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 725 726 if (nvmem->flags & FLAG_COMPAT) 727 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 728 729 nvmem_device_remove_all_cells(nvmem); 730 device_unregister(&nvmem->dev); 731 } 732 733 /** 734 * nvmem_unregister() - Unregister previously registered nvmem device 735 * 736 * @nvmem: Pointer to previously registered nvmem device. 737 */ 738 void nvmem_unregister(struct nvmem_device *nvmem) 739 { 740 kref_put(&nvmem->refcnt, nvmem_device_release); 741 } 742 EXPORT_SYMBOL_GPL(nvmem_unregister); 743 744 static void devm_nvmem_release(struct device *dev, void *res) 745 { 746 nvmem_unregister(*(struct nvmem_device **)res); 747 } 748 749 /** 750 * devm_nvmem_register() - Register a managed nvmem device for given 751 * nvmem_config. 752 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 753 * 754 * @dev: Device that uses the nvmem device. 755 * @config: nvmem device configuration with which nvmem device is created. 756 * 757 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 758 * on success. 759 */ 760 struct nvmem_device *devm_nvmem_register(struct device *dev, 761 const struct nvmem_config *config) 762 { 763 struct nvmem_device **ptr, *nvmem; 764 765 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 766 if (!ptr) 767 return ERR_PTR(-ENOMEM); 768 769 nvmem = nvmem_register(config); 770 771 if (!IS_ERR(nvmem)) { 772 *ptr = nvmem; 773 devres_add(dev, ptr); 774 } else { 775 devres_free(ptr); 776 } 777 778 return nvmem; 779 } 780 EXPORT_SYMBOL_GPL(devm_nvmem_register); 781 782 static int devm_nvmem_match(struct device *dev, void *res, void *data) 783 { 784 struct nvmem_device **r = res; 785 786 return *r == data; 787 } 788 789 /** 790 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 791 * device. 792 * 793 * @dev: Device that uses the nvmem device. 794 * @nvmem: Pointer to previously registered nvmem device. 795 * 796 * Return: Will be negative on error or zero on success. 797 */ 798 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 799 { 800 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 801 } 802 EXPORT_SYMBOL(devm_nvmem_unregister); 803 804 static struct nvmem_device *__nvmem_device_get(void *data, 805 int (*match)(struct device *dev, const void *data)) 806 { 807 struct nvmem_device *nvmem = NULL; 808 struct device *dev; 809 810 mutex_lock(&nvmem_mutex); 811 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 812 if (dev) 813 nvmem = to_nvmem_device(dev); 814 mutex_unlock(&nvmem_mutex); 815 if (!nvmem) 816 return ERR_PTR(-EPROBE_DEFER); 817 818 if (!try_module_get(nvmem->owner)) { 819 dev_err(&nvmem->dev, 820 "could not increase module refcount for cell %s\n", 821 nvmem_dev_name(nvmem)); 822 823 put_device(&nvmem->dev); 824 return ERR_PTR(-EINVAL); 825 } 826 827 kref_get(&nvmem->refcnt); 828 829 return nvmem; 830 } 831 832 static void __nvmem_device_put(struct nvmem_device *nvmem) 833 { 834 put_device(&nvmem->dev); 835 module_put(nvmem->owner); 836 kref_put(&nvmem->refcnt, nvmem_device_release); 837 } 838 839 #if IS_ENABLED(CONFIG_OF) 840 /** 841 * of_nvmem_device_get() - Get nvmem device from a given id 842 * 843 * @np: Device tree node that uses the nvmem device. 844 * @id: nvmem name from nvmem-names property. 845 * 846 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 847 * on success. 848 */ 849 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 850 { 851 852 struct device_node *nvmem_np; 853 struct nvmem_device *nvmem; 854 int index = 0; 855 856 if (id) 857 index = of_property_match_string(np, "nvmem-names", id); 858 859 nvmem_np = of_parse_phandle(np, "nvmem", index); 860 if (!nvmem_np) 861 return ERR_PTR(-ENOENT); 862 863 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 864 of_node_put(nvmem_np); 865 return nvmem; 866 } 867 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 868 #endif 869 870 /** 871 * nvmem_device_get() - Get nvmem device from a given id 872 * 873 * @dev: Device that uses the nvmem device. 874 * @dev_name: name of the requested nvmem device. 875 * 876 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 877 * on success. 878 */ 879 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 880 { 881 if (dev->of_node) { /* try dt first */ 882 struct nvmem_device *nvmem; 883 884 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 885 886 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 887 return nvmem; 888 889 } 890 891 return __nvmem_device_get((void *)dev_name, device_match_name); 892 } 893 EXPORT_SYMBOL_GPL(nvmem_device_get); 894 895 /** 896 * nvmem_device_find() - Find nvmem device with matching function 897 * 898 * @data: Data to pass to match function 899 * @match: Callback function to check device 900 * 901 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 902 * on success. 903 */ 904 struct nvmem_device *nvmem_device_find(void *data, 905 int (*match)(struct device *dev, const void *data)) 906 { 907 return __nvmem_device_get(data, match); 908 } 909 EXPORT_SYMBOL_GPL(nvmem_device_find); 910 911 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 912 { 913 struct nvmem_device **nvmem = res; 914 915 if (WARN_ON(!nvmem || !*nvmem)) 916 return 0; 917 918 return *nvmem == data; 919 } 920 921 static void devm_nvmem_device_release(struct device *dev, void *res) 922 { 923 nvmem_device_put(*(struct nvmem_device **)res); 924 } 925 926 /** 927 * devm_nvmem_device_put() - put alredy got nvmem device 928 * 929 * @dev: Device that uses the nvmem device. 930 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 931 * that needs to be released. 932 */ 933 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 934 { 935 int ret; 936 937 ret = devres_release(dev, devm_nvmem_device_release, 938 devm_nvmem_device_match, nvmem); 939 940 WARN_ON(ret); 941 } 942 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 943 944 /** 945 * nvmem_device_put() - put alredy got nvmem device 946 * 947 * @nvmem: pointer to nvmem device that needs to be released. 948 */ 949 void nvmem_device_put(struct nvmem_device *nvmem) 950 { 951 __nvmem_device_put(nvmem); 952 } 953 EXPORT_SYMBOL_GPL(nvmem_device_put); 954 955 /** 956 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 957 * 958 * @dev: Device that requests the nvmem device. 959 * @id: name id for the requested nvmem device. 960 * 961 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 962 * on success. The nvmem_cell will be freed by the automatically once the 963 * device is freed. 964 */ 965 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 966 { 967 struct nvmem_device **ptr, *nvmem; 968 969 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 970 if (!ptr) 971 return ERR_PTR(-ENOMEM); 972 973 nvmem = nvmem_device_get(dev, id); 974 if (!IS_ERR(nvmem)) { 975 *ptr = nvmem; 976 devres_add(dev, ptr); 977 } else { 978 devres_free(ptr); 979 } 980 981 return nvmem; 982 } 983 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 984 985 static struct nvmem_cell * 986 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 987 { 988 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 989 struct nvmem_cell_lookup *lookup; 990 struct nvmem_device *nvmem; 991 const char *dev_id; 992 993 if (!dev) 994 return ERR_PTR(-EINVAL); 995 996 dev_id = dev_name(dev); 997 998 mutex_lock(&nvmem_lookup_mutex); 999 1000 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1001 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1002 (strcmp(lookup->con_id, con_id) == 0)) { 1003 /* This is the right entry. */ 1004 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1005 device_match_name); 1006 if (IS_ERR(nvmem)) { 1007 /* Provider may not be registered yet. */ 1008 cell = ERR_CAST(nvmem); 1009 break; 1010 } 1011 1012 cell = nvmem_find_cell_by_name(nvmem, 1013 lookup->cell_name); 1014 if (!cell) { 1015 __nvmem_device_put(nvmem); 1016 cell = ERR_PTR(-ENOENT); 1017 } 1018 break; 1019 } 1020 } 1021 1022 mutex_unlock(&nvmem_lookup_mutex); 1023 return cell; 1024 } 1025 1026 #if IS_ENABLED(CONFIG_OF) 1027 static struct nvmem_cell * 1028 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 1029 { 1030 struct nvmem_cell *iter, *cell = NULL; 1031 1032 mutex_lock(&nvmem_mutex); 1033 list_for_each_entry(iter, &nvmem->cells, node) { 1034 if (np == iter->np) { 1035 cell = iter; 1036 break; 1037 } 1038 } 1039 mutex_unlock(&nvmem_mutex); 1040 1041 return cell; 1042 } 1043 1044 /** 1045 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1046 * 1047 * @np: Device tree node that uses the nvmem cell. 1048 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1049 * for the cell at index 0 (the lone cell with no accompanying 1050 * nvmem-cell-names property). 1051 * 1052 * Return: Will be an ERR_PTR() on error or a valid pointer 1053 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1054 * nvmem_cell_put(). 1055 */ 1056 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1057 { 1058 struct device_node *cell_np, *nvmem_np; 1059 struct nvmem_device *nvmem; 1060 struct nvmem_cell *cell; 1061 int index = 0; 1062 1063 /* if cell name exists, find index to the name */ 1064 if (id) 1065 index = of_property_match_string(np, "nvmem-cell-names", id); 1066 1067 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1068 if (!cell_np) 1069 return ERR_PTR(-ENOENT); 1070 1071 nvmem_np = of_get_next_parent(cell_np); 1072 if (!nvmem_np) 1073 return ERR_PTR(-EINVAL); 1074 1075 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1076 of_node_put(nvmem_np); 1077 if (IS_ERR(nvmem)) 1078 return ERR_CAST(nvmem); 1079 1080 cell = nvmem_find_cell_by_node(nvmem, cell_np); 1081 if (!cell) { 1082 __nvmem_device_put(nvmem); 1083 return ERR_PTR(-ENOENT); 1084 } 1085 1086 return cell; 1087 } 1088 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1089 #endif 1090 1091 /** 1092 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1093 * 1094 * @dev: Device that requests the nvmem cell. 1095 * @id: nvmem cell name to get (this corresponds with the name from the 1096 * nvmem-cell-names property for DT systems and with the con_id from 1097 * the lookup entry for non-DT systems). 1098 * 1099 * Return: Will be an ERR_PTR() on error or a valid pointer 1100 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1101 * nvmem_cell_put(). 1102 */ 1103 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1104 { 1105 struct nvmem_cell *cell; 1106 1107 if (dev->of_node) { /* try dt first */ 1108 cell = of_nvmem_cell_get(dev->of_node, id); 1109 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1110 return cell; 1111 } 1112 1113 /* NULL cell id only allowed for device tree; invalid otherwise */ 1114 if (!id) 1115 return ERR_PTR(-EINVAL); 1116 1117 return nvmem_cell_get_from_lookup(dev, id); 1118 } 1119 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1120 1121 static void devm_nvmem_cell_release(struct device *dev, void *res) 1122 { 1123 nvmem_cell_put(*(struct nvmem_cell **)res); 1124 } 1125 1126 /** 1127 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1128 * 1129 * @dev: Device that requests the nvmem cell. 1130 * @id: nvmem cell name id to get. 1131 * 1132 * Return: Will be an ERR_PTR() on error or a valid pointer 1133 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1134 * automatically once the device is freed. 1135 */ 1136 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1137 { 1138 struct nvmem_cell **ptr, *cell; 1139 1140 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1141 if (!ptr) 1142 return ERR_PTR(-ENOMEM); 1143 1144 cell = nvmem_cell_get(dev, id); 1145 if (!IS_ERR(cell)) { 1146 *ptr = cell; 1147 devres_add(dev, ptr); 1148 } else { 1149 devres_free(ptr); 1150 } 1151 1152 return cell; 1153 } 1154 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1155 1156 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1157 { 1158 struct nvmem_cell **c = res; 1159 1160 if (WARN_ON(!c || !*c)) 1161 return 0; 1162 1163 return *c == data; 1164 } 1165 1166 /** 1167 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1168 * from devm_nvmem_cell_get. 1169 * 1170 * @dev: Device that requests the nvmem cell. 1171 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1172 */ 1173 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1174 { 1175 int ret; 1176 1177 ret = devres_release(dev, devm_nvmem_cell_release, 1178 devm_nvmem_cell_match, cell); 1179 1180 WARN_ON(ret); 1181 } 1182 EXPORT_SYMBOL(devm_nvmem_cell_put); 1183 1184 /** 1185 * nvmem_cell_put() - Release previously allocated nvmem cell. 1186 * 1187 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1188 */ 1189 void nvmem_cell_put(struct nvmem_cell *cell) 1190 { 1191 struct nvmem_device *nvmem = cell->nvmem; 1192 1193 __nvmem_device_put(nvmem); 1194 } 1195 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1196 1197 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 1198 { 1199 u8 *p, *b; 1200 int i, extra, bit_offset = cell->bit_offset; 1201 1202 p = b = buf; 1203 if (bit_offset) { 1204 /* First shift */ 1205 *b++ >>= bit_offset; 1206 1207 /* setup rest of the bytes if any */ 1208 for (i = 1; i < cell->bytes; i++) { 1209 /* Get bits from next byte and shift them towards msb */ 1210 *p |= *b << (BITS_PER_BYTE - bit_offset); 1211 1212 p = b; 1213 *b++ >>= bit_offset; 1214 } 1215 } else { 1216 /* point to the msb */ 1217 p += cell->bytes - 1; 1218 } 1219 1220 /* result fits in less bytes */ 1221 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1222 while (--extra >= 0) 1223 *p-- = 0; 1224 1225 /* clear msb bits if any leftover in the last byte */ 1226 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 1227 } 1228 1229 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1230 struct nvmem_cell *cell, 1231 void *buf, size_t *len) 1232 { 1233 int rc; 1234 1235 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1236 1237 if (rc) 1238 return rc; 1239 1240 /* shift bits in-place */ 1241 if (cell->bit_offset || cell->nbits) 1242 nvmem_shift_read_buffer_in_place(cell, buf); 1243 1244 if (len) 1245 *len = cell->bytes; 1246 1247 return 0; 1248 } 1249 1250 /** 1251 * nvmem_cell_read() - Read a given nvmem cell 1252 * 1253 * @cell: nvmem cell to be read. 1254 * @len: pointer to length of cell which will be populated on successful read; 1255 * can be NULL. 1256 * 1257 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1258 * buffer should be freed by the consumer with a kfree(). 1259 */ 1260 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1261 { 1262 struct nvmem_device *nvmem = cell->nvmem; 1263 u8 *buf; 1264 int rc; 1265 1266 if (!nvmem) 1267 return ERR_PTR(-EINVAL); 1268 1269 buf = kzalloc(cell->bytes, GFP_KERNEL); 1270 if (!buf) 1271 return ERR_PTR(-ENOMEM); 1272 1273 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1274 if (rc) { 1275 kfree(buf); 1276 return ERR_PTR(rc); 1277 } 1278 1279 return buf; 1280 } 1281 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1282 1283 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1284 u8 *_buf, int len) 1285 { 1286 struct nvmem_device *nvmem = cell->nvmem; 1287 int i, rc, nbits, bit_offset = cell->bit_offset; 1288 u8 v, *p, *buf, *b, pbyte, pbits; 1289 1290 nbits = cell->nbits; 1291 buf = kzalloc(cell->bytes, GFP_KERNEL); 1292 if (!buf) 1293 return ERR_PTR(-ENOMEM); 1294 1295 memcpy(buf, _buf, len); 1296 p = b = buf; 1297 1298 if (bit_offset) { 1299 pbyte = *b; 1300 *b <<= bit_offset; 1301 1302 /* setup the first byte with lsb bits from nvmem */ 1303 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1304 if (rc) 1305 goto err; 1306 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1307 1308 /* setup rest of the byte if any */ 1309 for (i = 1; i < cell->bytes; i++) { 1310 /* Get last byte bits and shift them towards lsb */ 1311 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1312 pbyte = *b; 1313 p = b; 1314 *b <<= bit_offset; 1315 *b++ |= pbits; 1316 } 1317 } 1318 1319 /* if it's not end on byte boundary */ 1320 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1321 /* setup the last byte with msb bits from nvmem */ 1322 rc = nvmem_reg_read(nvmem, 1323 cell->offset + cell->bytes - 1, &v, 1); 1324 if (rc) 1325 goto err; 1326 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1327 1328 } 1329 1330 return buf; 1331 err: 1332 kfree(buf); 1333 return ERR_PTR(rc); 1334 } 1335 1336 /** 1337 * nvmem_cell_write() - Write to a given nvmem cell 1338 * 1339 * @cell: nvmem cell to be written. 1340 * @buf: Buffer to be written. 1341 * @len: length of buffer to be written to nvmem cell. 1342 * 1343 * Return: length of bytes written or negative on failure. 1344 */ 1345 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1346 { 1347 struct nvmem_device *nvmem = cell->nvmem; 1348 int rc; 1349 1350 if (!nvmem || nvmem->read_only || 1351 (cell->bit_offset == 0 && len != cell->bytes)) 1352 return -EINVAL; 1353 1354 if (cell->bit_offset || cell->nbits) { 1355 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1356 if (IS_ERR(buf)) 1357 return PTR_ERR(buf); 1358 } 1359 1360 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1361 1362 /* free the tmp buffer */ 1363 if (cell->bit_offset || cell->nbits) 1364 kfree(buf); 1365 1366 if (rc) 1367 return rc; 1368 1369 return len; 1370 } 1371 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1372 1373 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1374 void *val, size_t count) 1375 { 1376 struct nvmem_cell *cell; 1377 void *buf; 1378 size_t len; 1379 1380 cell = nvmem_cell_get(dev, cell_id); 1381 if (IS_ERR(cell)) 1382 return PTR_ERR(cell); 1383 1384 buf = nvmem_cell_read(cell, &len); 1385 if (IS_ERR(buf)) { 1386 nvmem_cell_put(cell); 1387 return PTR_ERR(buf); 1388 } 1389 if (len != count) { 1390 kfree(buf); 1391 nvmem_cell_put(cell); 1392 return -EINVAL; 1393 } 1394 memcpy(val, buf, count); 1395 kfree(buf); 1396 nvmem_cell_put(cell); 1397 1398 return 0; 1399 } 1400 1401 /** 1402 * nvmem_cell_read_u8() - Read a cell value as a u8 1403 * 1404 * @dev: Device that requests the nvmem cell. 1405 * @cell_id: Name of nvmem cell to read. 1406 * @val: pointer to output value. 1407 * 1408 * Return: 0 on success or negative errno. 1409 */ 1410 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1411 { 1412 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1413 } 1414 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1415 1416 /** 1417 * nvmem_cell_read_u16() - Read a cell value as a u16 1418 * 1419 * @dev: Device that requests the nvmem cell. 1420 * @cell_id: Name of nvmem cell to read. 1421 * @val: pointer to output value. 1422 * 1423 * Return: 0 on success or negative errno. 1424 */ 1425 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1426 { 1427 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1428 } 1429 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1430 1431 /** 1432 * nvmem_cell_read_u32() - Read a cell value as a u32 1433 * 1434 * @dev: Device that requests the nvmem cell. 1435 * @cell_id: Name of nvmem cell to read. 1436 * @val: pointer to output value. 1437 * 1438 * Return: 0 on success or negative errno. 1439 */ 1440 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1441 { 1442 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1443 } 1444 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1445 1446 /** 1447 * nvmem_cell_read_u64() - Read a cell value as a u64 1448 * 1449 * @dev: Device that requests the nvmem cell. 1450 * @cell_id: Name of nvmem cell to read. 1451 * @val: pointer to output value. 1452 * 1453 * Return: 0 on success or negative errno. 1454 */ 1455 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1456 { 1457 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1458 } 1459 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1460 1461 /** 1462 * nvmem_device_cell_read() - Read a given nvmem device and cell 1463 * 1464 * @nvmem: nvmem device to read from. 1465 * @info: nvmem cell info to be read. 1466 * @buf: buffer pointer which will be populated on successful read. 1467 * 1468 * Return: length of successful bytes read on success and negative 1469 * error code on error. 1470 */ 1471 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1472 struct nvmem_cell_info *info, void *buf) 1473 { 1474 struct nvmem_cell cell; 1475 int rc; 1476 ssize_t len; 1477 1478 if (!nvmem) 1479 return -EINVAL; 1480 1481 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); 1482 if (rc) 1483 return rc; 1484 1485 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1486 if (rc) 1487 return rc; 1488 1489 return len; 1490 } 1491 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1492 1493 /** 1494 * nvmem_device_cell_write() - Write cell to a given nvmem device 1495 * 1496 * @nvmem: nvmem device to be written to. 1497 * @info: nvmem cell info to be written. 1498 * @buf: buffer to be written to cell. 1499 * 1500 * Return: length of bytes written or negative error code on failure. 1501 */ 1502 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1503 struct nvmem_cell_info *info, void *buf) 1504 { 1505 struct nvmem_cell cell; 1506 int rc; 1507 1508 if (!nvmem) 1509 return -EINVAL; 1510 1511 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); 1512 if (rc) 1513 return rc; 1514 1515 return nvmem_cell_write(&cell, buf, cell.bytes); 1516 } 1517 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1518 1519 /** 1520 * nvmem_device_read() - Read from a given nvmem device 1521 * 1522 * @nvmem: nvmem device to read from. 1523 * @offset: offset in nvmem device. 1524 * @bytes: number of bytes to read. 1525 * @buf: buffer pointer which will be populated on successful read. 1526 * 1527 * Return: length of successful bytes read on success and negative 1528 * error code on error. 1529 */ 1530 int nvmem_device_read(struct nvmem_device *nvmem, 1531 unsigned int offset, 1532 size_t bytes, void *buf) 1533 { 1534 int rc; 1535 1536 if (!nvmem) 1537 return -EINVAL; 1538 1539 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1540 1541 if (rc) 1542 return rc; 1543 1544 return bytes; 1545 } 1546 EXPORT_SYMBOL_GPL(nvmem_device_read); 1547 1548 /** 1549 * nvmem_device_write() - Write cell to a given nvmem device 1550 * 1551 * @nvmem: nvmem device to be written to. 1552 * @offset: offset in nvmem device. 1553 * @bytes: number of bytes to write. 1554 * @buf: buffer to be written. 1555 * 1556 * Return: length of bytes written or negative error code on failure. 1557 */ 1558 int nvmem_device_write(struct nvmem_device *nvmem, 1559 unsigned int offset, 1560 size_t bytes, void *buf) 1561 { 1562 int rc; 1563 1564 if (!nvmem) 1565 return -EINVAL; 1566 1567 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1568 1569 if (rc) 1570 return rc; 1571 1572 1573 return bytes; 1574 } 1575 EXPORT_SYMBOL_GPL(nvmem_device_write); 1576 1577 /** 1578 * nvmem_add_cell_table() - register a table of cell info entries 1579 * 1580 * @table: table of cell info entries 1581 */ 1582 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1583 { 1584 mutex_lock(&nvmem_cell_mutex); 1585 list_add_tail(&table->node, &nvmem_cell_tables); 1586 mutex_unlock(&nvmem_cell_mutex); 1587 } 1588 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1589 1590 /** 1591 * nvmem_del_cell_table() - remove a previously registered cell info table 1592 * 1593 * @table: table of cell info entries 1594 */ 1595 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1596 { 1597 mutex_lock(&nvmem_cell_mutex); 1598 list_del(&table->node); 1599 mutex_unlock(&nvmem_cell_mutex); 1600 } 1601 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1602 1603 /** 1604 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1605 * 1606 * @entries: array of cell lookup entries 1607 * @nentries: number of cell lookup entries in the array 1608 */ 1609 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1610 { 1611 int i; 1612 1613 mutex_lock(&nvmem_lookup_mutex); 1614 for (i = 0; i < nentries; i++) 1615 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1616 mutex_unlock(&nvmem_lookup_mutex); 1617 } 1618 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1619 1620 /** 1621 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1622 * entries 1623 * 1624 * @entries: array of cell lookup entries 1625 * @nentries: number of cell lookup entries in the array 1626 */ 1627 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1628 { 1629 int i; 1630 1631 mutex_lock(&nvmem_lookup_mutex); 1632 for (i = 0; i < nentries; i++) 1633 list_del(&entries[i].node); 1634 mutex_unlock(&nvmem_lookup_mutex); 1635 } 1636 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1637 1638 /** 1639 * nvmem_dev_name() - Get the name of a given nvmem device. 1640 * 1641 * @nvmem: nvmem device. 1642 * 1643 * Return: name of the nvmem device. 1644 */ 1645 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1646 { 1647 return dev_name(&nvmem->dev); 1648 } 1649 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1650 1651 static int __init nvmem_init(void) 1652 { 1653 return bus_register(&nvmem_bus_type); 1654 } 1655 1656 static void __exit nvmem_exit(void) 1657 { 1658 bus_unregister(&nvmem_bus_type); 1659 } 1660 1661 subsys_initcall(nvmem_init); 1662 module_exit(nvmem_exit); 1663 1664 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1665 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1666 MODULE_DESCRIPTION("nvmem Driver Core"); 1667 MODULE_LICENSE("GPL v2"); 1668