1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 nvmem_reg_read_t reg_read; 38 nvmem_reg_write_t reg_write; 39 struct gpio_desc *wp_gpio; 40 void *priv; 41 }; 42 43 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 44 45 #define FLAG_COMPAT BIT(0) 46 47 struct nvmem_cell { 48 const char *name; 49 int offset; 50 int bytes; 51 int bit_offset; 52 int nbits; 53 struct device_node *np; 54 struct nvmem_device *nvmem; 55 struct list_head node; 56 }; 57 58 static DEFINE_MUTEX(nvmem_mutex); 59 static DEFINE_IDA(nvmem_ida); 60 61 static DEFINE_MUTEX(nvmem_cell_mutex); 62 static LIST_HEAD(nvmem_cell_tables); 63 64 static DEFINE_MUTEX(nvmem_lookup_mutex); 65 static LIST_HEAD(nvmem_lookup_list); 66 67 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 68 69 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 70 void *val, size_t bytes) 71 { 72 if (nvmem->reg_read) 73 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 74 75 return -EINVAL; 76 } 77 78 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 79 void *val, size_t bytes) 80 { 81 int ret; 82 83 if (nvmem->reg_write) { 84 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 85 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 86 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 87 return ret; 88 } 89 90 return -EINVAL; 91 } 92 93 #ifdef CONFIG_NVMEM_SYSFS 94 static const char * const nvmem_type_str[] = { 95 [NVMEM_TYPE_UNKNOWN] = "Unknown", 96 [NVMEM_TYPE_EEPROM] = "EEPROM", 97 [NVMEM_TYPE_OTP] = "OTP", 98 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 99 }; 100 101 #ifdef CONFIG_DEBUG_LOCK_ALLOC 102 static struct lock_class_key eeprom_lock_key; 103 #endif 104 105 static ssize_t type_show(struct device *dev, 106 struct device_attribute *attr, char *buf) 107 { 108 struct nvmem_device *nvmem = to_nvmem_device(dev); 109 110 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 111 } 112 113 static DEVICE_ATTR_RO(type); 114 115 static struct attribute *nvmem_attrs[] = { 116 &dev_attr_type.attr, 117 NULL, 118 }; 119 120 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 121 struct bin_attribute *attr, char *buf, 122 loff_t pos, size_t count) 123 { 124 struct device *dev; 125 struct nvmem_device *nvmem; 126 int rc; 127 128 if (attr->private) 129 dev = attr->private; 130 else 131 dev = container_of(kobj, struct device, kobj); 132 nvmem = to_nvmem_device(dev); 133 134 /* Stop the user from reading */ 135 if (pos >= nvmem->size) 136 return 0; 137 138 if (count < nvmem->word_size) 139 return -EINVAL; 140 141 if (pos + count > nvmem->size) 142 count = nvmem->size - pos; 143 144 count = round_down(count, nvmem->word_size); 145 146 if (!nvmem->reg_read) 147 return -EPERM; 148 149 rc = nvmem_reg_read(nvmem, pos, buf, count); 150 151 if (rc) 152 return rc; 153 154 return count; 155 } 156 157 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 158 struct bin_attribute *attr, char *buf, 159 loff_t pos, size_t count) 160 { 161 struct device *dev; 162 struct nvmem_device *nvmem; 163 int rc; 164 165 if (attr->private) 166 dev = attr->private; 167 else 168 dev = container_of(kobj, struct device, kobj); 169 nvmem = to_nvmem_device(dev); 170 171 /* Stop the user from writing */ 172 if (pos >= nvmem->size) 173 return -EFBIG; 174 175 if (count < nvmem->word_size) 176 return -EINVAL; 177 178 if (pos + count > nvmem->size) 179 count = nvmem->size - pos; 180 181 count = round_down(count, nvmem->word_size); 182 183 if (!nvmem->reg_write) 184 return -EPERM; 185 186 rc = nvmem_reg_write(nvmem, pos, buf, count); 187 188 if (rc) 189 return rc; 190 191 return count; 192 } 193 194 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 195 { 196 umode_t mode = 0400; 197 198 if (!nvmem->root_only) 199 mode |= 0044; 200 201 if (!nvmem->read_only) 202 mode |= 0200; 203 204 if (!nvmem->reg_write) 205 mode &= ~0200; 206 207 if (!nvmem->reg_read) 208 mode &= ~0444; 209 210 return mode; 211 } 212 213 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 214 struct bin_attribute *attr, int i) 215 { 216 struct device *dev = container_of(kobj, struct device, kobj); 217 struct nvmem_device *nvmem = to_nvmem_device(dev); 218 219 return nvmem_bin_attr_get_umode(nvmem); 220 } 221 222 /* default read/write permissions */ 223 static struct bin_attribute bin_attr_rw_nvmem = { 224 .attr = { 225 .name = "nvmem", 226 .mode = 0644, 227 }, 228 .read = bin_attr_nvmem_read, 229 .write = bin_attr_nvmem_write, 230 }; 231 232 static struct bin_attribute *nvmem_bin_attributes[] = { 233 &bin_attr_rw_nvmem, 234 NULL, 235 }; 236 237 static const struct attribute_group nvmem_bin_group = { 238 .bin_attrs = nvmem_bin_attributes, 239 .attrs = nvmem_attrs, 240 .is_bin_visible = nvmem_bin_attr_is_visible, 241 }; 242 243 static const struct attribute_group *nvmem_dev_groups[] = { 244 &nvmem_bin_group, 245 NULL, 246 }; 247 248 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 249 .attr = { 250 .name = "eeprom", 251 }, 252 .read = bin_attr_nvmem_read, 253 .write = bin_attr_nvmem_write, 254 }; 255 256 /* 257 * nvmem_setup_compat() - Create an additional binary entry in 258 * drivers sys directory, to be backwards compatible with the older 259 * drivers/misc/eeprom drivers. 260 */ 261 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 262 const struct nvmem_config *config) 263 { 264 int rval; 265 266 if (!config->compat) 267 return 0; 268 269 if (!config->base_dev) 270 return -EINVAL; 271 272 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 273 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 274 nvmem->eeprom.size = nvmem->size; 275 #ifdef CONFIG_DEBUG_LOCK_ALLOC 276 nvmem->eeprom.attr.key = &eeprom_lock_key; 277 #endif 278 nvmem->eeprom.private = &nvmem->dev; 279 nvmem->base_dev = config->base_dev; 280 281 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 282 if (rval) { 283 dev_err(&nvmem->dev, 284 "Failed to create eeprom binary file %d\n", rval); 285 return rval; 286 } 287 288 nvmem->flags |= FLAG_COMPAT; 289 290 return 0; 291 } 292 293 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 294 const struct nvmem_config *config) 295 { 296 if (config->compat) 297 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 298 } 299 300 #else /* CONFIG_NVMEM_SYSFS */ 301 302 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 303 const struct nvmem_config *config) 304 { 305 return -ENOSYS; 306 } 307 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 308 const struct nvmem_config *config) 309 { 310 } 311 312 #endif /* CONFIG_NVMEM_SYSFS */ 313 314 static void nvmem_release(struct device *dev) 315 { 316 struct nvmem_device *nvmem = to_nvmem_device(dev); 317 318 ida_simple_remove(&nvmem_ida, nvmem->id); 319 gpiod_put(nvmem->wp_gpio); 320 kfree(nvmem); 321 } 322 323 static const struct device_type nvmem_provider_type = { 324 .release = nvmem_release, 325 }; 326 327 static struct bus_type nvmem_bus_type = { 328 .name = "nvmem", 329 }; 330 331 static void nvmem_cell_drop(struct nvmem_cell *cell) 332 { 333 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 334 mutex_lock(&nvmem_mutex); 335 list_del(&cell->node); 336 mutex_unlock(&nvmem_mutex); 337 of_node_put(cell->np); 338 kfree_const(cell->name); 339 kfree(cell); 340 } 341 342 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 343 { 344 struct nvmem_cell *cell, *p; 345 346 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 347 nvmem_cell_drop(cell); 348 } 349 350 static void nvmem_cell_add(struct nvmem_cell *cell) 351 { 352 mutex_lock(&nvmem_mutex); 353 list_add_tail(&cell->node, &cell->nvmem->cells); 354 mutex_unlock(&nvmem_mutex); 355 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 356 } 357 358 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 359 const struct nvmem_cell_info *info, 360 struct nvmem_cell *cell) 361 { 362 cell->nvmem = nvmem; 363 cell->offset = info->offset; 364 cell->bytes = info->bytes; 365 cell->name = kstrdup_const(info->name, GFP_KERNEL); 366 if (!cell->name) 367 return -ENOMEM; 368 369 cell->bit_offset = info->bit_offset; 370 cell->nbits = info->nbits; 371 372 if (cell->nbits) 373 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 374 BITS_PER_BYTE); 375 376 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 377 dev_err(&nvmem->dev, 378 "cell %s unaligned to nvmem stride %d\n", 379 cell->name, nvmem->stride); 380 return -EINVAL; 381 } 382 383 return 0; 384 } 385 386 /** 387 * nvmem_add_cells() - Add cell information to an nvmem device 388 * 389 * @nvmem: nvmem device to add cells to. 390 * @info: nvmem cell info to add to the device 391 * @ncells: number of cells in info 392 * 393 * Return: 0 or negative error code on failure. 394 */ 395 static int nvmem_add_cells(struct nvmem_device *nvmem, 396 const struct nvmem_cell_info *info, 397 int ncells) 398 { 399 struct nvmem_cell **cells; 400 int i, rval; 401 402 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 403 if (!cells) 404 return -ENOMEM; 405 406 for (i = 0; i < ncells; i++) { 407 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 408 if (!cells[i]) { 409 rval = -ENOMEM; 410 goto err; 411 } 412 413 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 414 if (rval) { 415 kfree(cells[i]); 416 goto err; 417 } 418 419 nvmem_cell_add(cells[i]); 420 } 421 422 /* remove tmp array */ 423 kfree(cells); 424 425 return 0; 426 err: 427 while (i--) 428 nvmem_cell_drop(cells[i]); 429 430 kfree(cells); 431 432 return rval; 433 } 434 435 /** 436 * nvmem_register_notifier() - Register a notifier block for nvmem events. 437 * 438 * @nb: notifier block to be called on nvmem events. 439 * 440 * Return: 0 on success, negative error number on failure. 441 */ 442 int nvmem_register_notifier(struct notifier_block *nb) 443 { 444 return blocking_notifier_chain_register(&nvmem_notifier, nb); 445 } 446 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 447 448 /** 449 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 450 * 451 * @nb: notifier block to be unregistered. 452 * 453 * Return: 0 on success, negative error number on failure. 454 */ 455 int nvmem_unregister_notifier(struct notifier_block *nb) 456 { 457 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 458 } 459 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 460 461 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 462 { 463 const struct nvmem_cell_info *info; 464 struct nvmem_cell_table *table; 465 struct nvmem_cell *cell; 466 int rval = 0, i; 467 468 mutex_lock(&nvmem_cell_mutex); 469 list_for_each_entry(table, &nvmem_cell_tables, node) { 470 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 471 for (i = 0; i < table->ncells; i++) { 472 info = &table->cells[i]; 473 474 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 475 if (!cell) { 476 rval = -ENOMEM; 477 goto out; 478 } 479 480 rval = nvmem_cell_info_to_nvmem_cell(nvmem, 481 info, 482 cell); 483 if (rval) { 484 kfree(cell); 485 goto out; 486 } 487 488 nvmem_cell_add(cell); 489 } 490 } 491 } 492 493 out: 494 mutex_unlock(&nvmem_cell_mutex); 495 return rval; 496 } 497 498 static struct nvmem_cell * 499 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 500 { 501 struct nvmem_cell *iter, *cell = NULL; 502 503 mutex_lock(&nvmem_mutex); 504 list_for_each_entry(iter, &nvmem->cells, node) { 505 if (strcmp(cell_id, iter->name) == 0) { 506 cell = iter; 507 break; 508 } 509 } 510 mutex_unlock(&nvmem_mutex); 511 512 return cell; 513 } 514 515 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 516 { 517 struct device_node *parent, *child; 518 struct device *dev = &nvmem->dev; 519 struct nvmem_cell *cell; 520 const __be32 *addr; 521 int len; 522 523 parent = dev->of_node; 524 525 for_each_child_of_node(parent, child) { 526 addr = of_get_property(child, "reg", &len); 527 if (!addr || (len < 2 * sizeof(u32))) { 528 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 529 return -EINVAL; 530 } 531 532 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 533 if (!cell) 534 return -ENOMEM; 535 536 cell->nvmem = nvmem; 537 cell->np = of_node_get(child); 538 cell->offset = be32_to_cpup(addr++); 539 cell->bytes = be32_to_cpup(addr); 540 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 541 542 addr = of_get_property(child, "bits", &len); 543 if (addr && len == (2 * sizeof(u32))) { 544 cell->bit_offset = be32_to_cpup(addr++); 545 cell->nbits = be32_to_cpup(addr); 546 } 547 548 if (cell->nbits) 549 cell->bytes = DIV_ROUND_UP( 550 cell->nbits + cell->bit_offset, 551 BITS_PER_BYTE); 552 553 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 554 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 555 cell->name, nvmem->stride); 556 /* Cells already added will be freed later. */ 557 kfree_const(cell->name); 558 kfree(cell); 559 return -EINVAL; 560 } 561 562 nvmem_cell_add(cell); 563 } 564 565 return 0; 566 } 567 568 /** 569 * nvmem_register() - Register a nvmem device for given nvmem_config. 570 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 571 * 572 * @config: nvmem device configuration with which nvmem device is created. 573 * 574 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 575 * on success. 576 */ 577 578 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 579 { 580 struct nvmem_device *nvmem; 581 int rval; 582 583 if (!config->dev) 584 return ERR_PTR(-EINVAL); 585 586 if (!config->reg_read && !config->reg_write) 587 return ERR_PTR(-EINVAL); 588 589 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 590 if (!nvmem) 591 return ERR_PTR(-ENOMEM); 592 593 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 594 if (rval < 0) { 595 kfree(nvmem); 596 return ERR_PTR(rval); 597 } 598 599 if (config->wp_gpio) 600 nvmem->wp_gpio = config->wp_gpio; 601 else 602 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 603 GPIOD_OUT_HIGH); 604 if (IS_ERR(nvmem->wp_gpio)) { 605 ida_simple_remove(&nvmem_ida, nvmem->id); 606 rval = PTR_ERR(nvmem->wp_gpio); 607 kfree(nvmem); 608 return ERR_PTR(rval); 609 } 610 611 kref_init(&nvmem->refcnt); 612 INIT_LIST_HEAD(&nvmem->cells); 613 614 nvmem->id = rval; 615 nvmem->owner = config->owner; 616 if (!nvmem->owner && config->dev->driver) 617 nvmem->owner = config->dev->driver->owner; 618 nvmem->stride = config->stride ?: 1; 619 nvmem->word_size = config->word_size ?: 1; 620 nvmem->size = config->size; 621 nvmem->dev.type = &nvmem_provider_type; 622 nvmem->dev.bus = &nvmem_bus_type; 623 nvmem->dev.parent = config->dev; 624 nvmem->root_only = config->root_only; 625 nvmem->priv = config->priv; 626 nvmem->type = config->type; 627 nvmem->reg_read = config->reg_read; 628 nvmem->reg_write = config->reg_write; 629 if (!config->no_of_node) 630 nvmem->dev.of_node = config->dev->of_node; 631 632 if (config->id == -1 && config->name) { 633 dev_set_name(&nvmem->dev, "%s", config->name); 634 } else { 635 dev_set_name(&nvmem->dev, "%s%d", 636 config->name ? : "nvmem", 637 config->name ? config->id : nvmem->id); 638 } 639 640 nvmem->read_only = device_property_present(config->dev, "read-only") || 641 config->read_only || !nvmem->reg_write; 642 643 #ifdef CONFIG_NVMEM_SYSFS 644 nvmem->dev.groups = nvmem_dev_groups; 645 #endif 646 647 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 648 649 rval = device_register(&nvmem->dev); 650 if (rval) 651 goto err_put_device; 652 653 if (config->compat) { 654 rval = nvmem_sysfs_setup_compat(nvmem, config); 655 if (rval) 656 goto err_device_del; 657 } 658 659 if (config->cells) { 660 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 661 if (rval) 662 goto err_teardown_compat; 663 } 664 665 rval = nvmem_add_cells_from_table(nvmem); 666 if (rval) 667 goto err_remove_cells; 668 669 rval = nvmem_add_cells_from_of(nvmem); 670 if (rval) 671 goto err_remove_cells; 672 673 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 674 675 return nvmem; 676 677 err_remove_cells: 678 nvmem_device_remove_all_cells(nvmem); 679 err_teardown_compat: 680 if (config->compat) 681 nvmem_sysfs_remove_compat(nvmem, config); 682 err_device_del: 683 device_del(&nvmem->dev); 684 err_put_device: 685 put_device(&nvmem->dev); 686 687 return ERR_PTR(rval); 688 } 689 EXPORT_SYMBOL_GPL(nvmem_register); 690 691 static void nvmem_device_release(struct kref *kref) 692 { 693 struct nvmem_device *nvmem; 694 695 nvmem = container_of(kref, struct nvmem_device, refcnt); 696 697 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 698 699 if (nvmem->flags & FLAG_COMPAT) 700 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 701 702 nvmem_device_remove_all_cells(nvmem); 703 device_unregister(&nvmem->dev); 704 } 705 706 /** 707 * nvmem_unregister() - Unregister previously registered nvmem device 708 * 709 * @nvmem: Pointer to previously registered nvmem device. 710 */ 711 void nvmem_unregister(struct nvmem_device *nvmem) 712 { 713 kref_put(&nvmem->refcnt, nvmem_device_release); 714 } 715 EXPORT_SYMBOL_GPL(nvmem_unregister); 716 717 static void devm_nvmem_release(struct device *dev, void *res) 718 { 719 nvmem_unregister(*(struct nvmem_device **)res); 720 } 721 722 /** 723 * devm_nvmem_register() - Register a managed nvmem device for given 724 * nvmem_config. 725 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 726 * 727 * @dev: Device that uses the nvmem device. 728 * @config: nvmem device configuration with which nvmem device is created. 729 * 730 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 731 * on success. 732 */ 733 struct nvmem_device *devm_nvmem_register(struct device *dev, 734 const struct nvmem_config *config) 735 { 736 struct nvmem_device **ptr, *nvmem; 737 738 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 739 if (!ptr) 740 return ERR_PTR(-ENOMEM); 741 742 nvmem = nvmem_register(config); 743 744 if (!IS_ERR(nvmem)) { 745 *ptr = nvmem; 746 devres_add(dev, ptr); 747 } else { 748 devres_free(ptr); 749 } 750 751 return nvmem; 752 } 753 EXPORT_SYMBOL_GPL(devm_nvmem_register); 754 755 static int devm_nvmem_match(struct device *dev, void *res, void *data) 756 { 757 struct nvmem_device **r = res; 758 759 return *r == data; 760 } 761 762 /** 763 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 764 * device. 765 * 766 * @dev: Device that uses the nvmem device. 767 * @nvmem: Pointer to previously registered nvmem device. 768 * 769 * Return: Will be an negative on error or a zero on success. 770 */ 771 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 772 { 773 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 774 } 775 EXPORT_SYMBOL(devm_nvmem_unregister); 776 777 static struct nvmem_device *__nvmem_device_get(void *data, 778 int (*match)(struct device *dev, const void *data)) 779 { 780 struct nvmem_device *nvmem = NULL; 781 struct device *dev; 782 783 mutex_lock(&nvmem_mutex); 784 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 785 if (dev) 786 nvmem = to_nvmem_device(dev); 787 mutex_unlock(&nvmem_mutex); 788 if (!nvmem) 789 return ERR_PTR(-EPROBE_DEFER); 790 791 if (!try_module_get(nvmem->owner)) { 792 dev_err(&nvmem->dev, 793 "could not increase module refcount for cell %s\n", 794 nvmem_dev_name(nvmem)); 795 796 put_device(&nvmem->dev); 797 return ERR_PTR(-EINVAL); 798 } 799 800 kref_get(&nvmem->refcnt); 801 802 return nvmem; 803 } 804 805 static void __nvmem_device_put(struct nvmem_device *nvmem) 806 { 807 put_device(&nvmem->dev); 808 module_put(nvmem->owner); 809 kref_put(&nvmem->refcnt, nvmem_device_release); 810 } 811 812 #if IS_ENABLED(CONFIG_OF) 813 /** 814 * of_nvmem_device_get() - Get nvmem device from a given id 815 * 816 * @np: Device tree node that uses the nvmem device. 817 * @id: nvmem name from nvmem-names property. 818 * 819 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 820 * on success. 821 */ 822 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 823 { 824 825 struct device_node *nvmem_np; 826 int index = 0; 827 828 if (id) 829 index = of_property_match_string(np, "nvmem-names", id); 830 831 nvmem_np = of_parse_phandle(np, "nvmem", index); 832 if (!nvmem_np) 833 return ERR_PTR(-ENOENT); 834 835 return __nvmem_device_get(nvmem_np, device_match_of_node); 836 } 837 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 838 #endif 839 840 /** 841 * nvmem_device_get() - Get nvmem device from a given id 842 * 843 * @dev: Device that uses the nvmem device. 844 * @dev_name: name of the requested nvmem device. 845 * 846 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 847 * on success. 848 */ 849 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 850 { 851 if (dev->of_node) { /* try dt first */ 852 struct nvmem_device *nvmem; 853 854 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 855 856 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 857 return nvmem; 858 859 } 860 861 return __nvmem_device_get((void *)dev_name, device_match_name); 862 } 863 EXPORT_SYMBOL_GPL(nvmem_device_get); 864 865 /** 866 * nvmem_device_find() - Find nvmem device with matching function 867 * 868 * @data: Data to pass to match function 869 * @match: Callback function to check device 870 * 871 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 872 * on success. 873 */ 874 struct nvmem_device *nvmem_device_find(void *data, 875 int (*match)(struct device *dev, const void *data)) 876 { 877 return __nvmem_device_get(data, match); 878 } 879 EXPORT_SYMBOL_GPL(nvmem_device_find); 880 881 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 882 { 883 struct nvmem_device **nvmem = res; 884 885 if (WARN_ON(!nvmem || !*nvmem)) 886 return 0; 887 888 return *nvmem == data; 889 } 890 891 static void devm_nvmem_device_release(struct device *dev, void *res) 892 { 893 nvmem_device_put(*(struct nvmem_device **)res); 894 } 895 896 /** 897 * devm_nvmem_device_put() - put alredy got nvmem device 898 * 899 * @dev: Device that uses the nvmem device. 900 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 901 * that needs to be released. 902 */ 903 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 904 { 905 int ret; 906 907 ret = devres_release(dev, devm_nvmem_device_release, 908 devm_nvmem_device_match, nvmem); 909 910 WARN_ON(ret); 911 } 912 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 913 914 /** 915 * nvmem_device_put() - put alredy got nvmem device 916 * 917 * @nvmem: pointer to nvmem device that needs to be released. 918 */ 919 void nvmem_device_put(struct nvmem_device *nvmem) 920 { 921 __nvmem_device_put(nvmem); 922 } 923 EXPORT_SYMBOL_GPL(nvmem_device_put); 924 925 /** 926 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 927 * 928 * @dev: Device that requests the nvmem device. 929 * @id: name id for the requested nvmem device. 930 * 931 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 932 * on success. The nvmem_cell will be freed by the automatically once the 933 * device is freed. 934 */ 935 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 936 { 937 struct nvmem_device **ptr, *nvmem; 938 939 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 940 if (!ptr) 941 return ERR_PTR(-ENOMEM); 942 943 nvmem = nvmem_device_get(dev, id); 944 if (!IS_ERR(nvmem)) { 945 *ptr = nvmem; 946 devres_add(dev, ptr); 947 } else { 948 devres_free(ptr); 949 } 950 951 return nvmem; 952 } 953 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 954 955 static struct nvmem_cell * 956 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 957 { 958 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 959 struct nvmem_cell_lookup *lookup; 960 struct nvmem_device *nvmem; 961 const char *dev_id; 962 963 if (!dev) 964 return ERR_PTR(-EINVAL); 965 966 dev_id = dev_name(dev); 967 968 mutex_lock(&nvmem_lookup_mutex); 969 970 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 971 if ((strcmp(lookup->dev_id, dev_id) == 0) && 972 (strcmp(lookup->con_id, con_id) == 0)) { 973 /* This is the right entry. */ 974 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 975 device_match_name); 976 if (IS_ERR(nvmem)) { 977 /* Provider may not be registered yet. */ 978 cell = ERR_CAST(nvmem); 979 break; 980 } 981 982 cell = nvmem_find_cell_by_name(nvmem, 983 lookup->cell_name); 984 if (!cell) { 985 __nvmem_device_put(nvmem); 986 cell = ERR_PTR(-ENOENT); 987 } 988 break; 989 } 990 } 991 992 mutex_unlock(&nvmem_lookup_mutex); 993 return cell; 994 } 995 996 #if IS_ENABLED(CONFIG_OF) 997 static struct nvmem_cell * 998 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 999 { 1000 struct nvmem_cell *iter, *cell = NULL; 1001 1002 mutex_lock(&nvmem_mutex); 1003 list_for_each_entry(iter, &nvmem->cells, node) { 1004 if (np == iter->np) { 1005 cell = iter; 1006 break; 1007 } 1008 } 1009 mutex_unlock(&nvmem_mutex); 1010 1011 return cell; 1012 } 1013 1014 /** 1015 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1016 * 1017 * @np: Device tree node that uses the nvmem cell. 1018 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1019 * for the cell at index 0 (the lone cell with no accompanying 1020 * nvmem-cell-names property). 1021 * 1022 * Return: Will be an ERR_PTR() on error or a valid pointer 1023 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1024 * nvmem_cell_put(). 1025 */ 1026 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1027 { 1028 struct device_node *cell_np, *nvmem_np; 1029 struct nvmem_device *nvmem; 1030 struct nvmem_cell *cell; 1031 int index = 0; 1032 1033 /* if cell name exists, find index to the name */ 1034 if (id) 1035 index = of_property_match_string(np, "nvmem-cell-names", id); 1036 1037 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1038 if (!cell_np) 1039 return ERR_PTR(-ENOENT); 1040 1041 nvmem_np = of_get_next_parent(cell_np); 1042 if (!nvmem_np) 1043 return ERR_PTR(-EINVAL); 1044 1045 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1046 of_node_put(nvmem_np); 1047 if (IS_ERR(nvmem)) 1048 return ERR_CAST(nvmem); 1049 1050 cell = nvmem_find_cell_by_node(nvmem, cell_np); 1051 if (!cell) { 1052 __nvmem_device_put(nvmem); 1053 return ERR_PTR(-ENOENT); 1054 } 1055 1056 return cell; 1057 } 1058 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1059 #endif 1060 1061 /** 1062 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1063 * 1064 * @dev: Device that requests the nvmem cell. 1065 * @id: nvmem cell name to get (this corresponds with the name from the 1066 * nvmem-cell-names property for DT systems and with the con_id from 1067 * the lookup entry for non-DT systems). 1068 * 1069 * Return: Will be an ERR_PTR() on error or a valid pointer 1070 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1071 * nvmem_cell_put(). 1072 */ 1073 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1074 { 1075 struct nvmem_cell *cell; 1076 1077 if (dev->of_node) { /* try dt first */ 1078 cell = of_nvmem_cell_get(dev->of_node, id); 1079 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1080 return cell; 1081 } 1082 1083 /* NULL cell id only allowed for device tree; invalid otherwise */ 1084 if (!id) 1085 return ERR_PTR(-EINVAL); 1086 1087 return nvmem_cell_get_from_lookup(dev, id); 1088 } 1089 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1090 1091 static void devm_nvmem_cell_release(struct device *dev, void *res) 1092 { 1093 nvmem_cell_put(*(struct nvmem_cell **)res); 1094 } 1095 1096 /** 1097 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1098 * 1099 * @dev: Device that requests the nvmem cell. 1100 * @id: nvmem cell name id to get. 1101 * 1102 * Return: Will be an ERR_PTR() on error or a valid pointer 1103 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1104 * automatically once the device is freed. 1105 */ 1106 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1107 { 1108 struct nvmem_cell **ptr, *cell; 1109 1110 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1111 if (!ptr) 1112 return ERR_PTR(-ENOMEM); 1113 1114 cell = nvmem_cell_get(dev, id); 1115 if (!IS_ERR(cell)) { 1116 *ptr = cell; 1117 devres_add(dev, ptr); 1118 } else { 1119 devres_free(ptr); 1120 } 1121 1122 return cell; 1123 } 1124 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1125 1126 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1127 { 1128 struct nvmem_cell **c = res; 1129 1130 if (WARN_ON(!c || !*c)) 1131 return 0; 1132 1133 return *c == data; 1134 } 1135 1136 /** 1137 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1138 * from devm_nvmem_cell_get. 1139 * 1140 * @dev: Device that requests the nvmem cell. 1141 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1142 */ 1143 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1144 { 1145 int ret; 1146 1147 ret = devres_release(dev, devm_nvmem_cell_release, 1148 devm_nvmem_cell_match, cell); 1149 1150 WARN_ON(ret); 1151 } 1152 EXPORT_SYMBOL(devm_nvmem_cell_put); 1153 1154 /** 1155 * nvmem_cell_put() - Release previously allocated nvmem cell. 1156 * 1157 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1158 */ 1159 void nvmem_cell_put(struct nvmem_cell *cell) 1160 { 1161 struct nvmem_device *nvmem = cell->nvmem; 1162 1163 __nvmem_device_put(nvmem); 1164 } 1165 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1166 1167 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 1168 { 1169 u8 *p, *b; 1170 int i, extra, bit_offset = cell->bit_offset; 1171 1172 p = b = buf; 1173 if (bit_offset) { 1174 /* First shift */ 1175 *b++ >>= bit_offset; 1176 1177 /* setup rest of the bytes if any */ 1178 for (i = 1; i < cell->bytes; i++) { 1179 /* Get bits from next byte and shift them towards msb */ 1180 *p |= *b << (BITS_PER_BYTE - bit_offset); 1181 1182 p = b; 1183 *b++ >>= bit_offset; 1184 } 1185 } else { 1186 /* point to the msb */ 1187 p += cell->bytes - 1; 1188 } 1189 1190 /* result fits in less bytes */ 1191 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1192 while (--extra >= 0) 1193 *p-- = 0; 1194 1195 /* clear msb bits if any leftover in the last byte */ 1196 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 1197 } 1198 1199 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1200 struct nvmem_cell *cell, 1201 void *buf, size_t *len) 1202 { 1203 int rc; 1204 1205 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1206 1207 if (rc) 1208 return rc; 1209 1210 /* shift bits in-place */ 1211 if (cell->bit_offset || cell->nbits) 1212 nvmem_shift_read_buffer_in_place(cell, buf); 1213 1214 if (len) 1215 *len = cell->bytes; 1216 1217 return 0; 1218 } 1219 1220 /** 1221 * nvmem_cell_read() - Read a given nvmem cell 1222 * 1223 * @cell: nvmem cell to be read. 1224 * @len: pointer to length of cell which will be populated on successful read; 1225 * can be NULL. 1226 * 1227 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1228 * buffer should be freed by the consumer with a kfree(). 1229 */ 1230 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1231 { 1232 struct nvmem_device *nvmem = cell->nvmem; 1233 u8 *buf; 1234 int rc; 1235 1236 if (!nvmem) 1237 return ERR_PTR(-EINVAL); 1238 1239 buf = kzalloc(cell->bytes, GFP_KERNEL); 1240 if (!buf) 1241 return ERR_PTR(-ENOMEM); 1242 1243 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1244 if (rc) { 1245 kfree(buf); 1246 return ERR_PTR(rc); 1247 } 1248 1249 return buf; 1250 } 1251 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1252 1253 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1254 u8 *_buf, int len) 1255 { 1256 struct nvmem_device *nvmem = cell->nvmem; 1257 int i, rc, nbits, bit_offset = cell->bit_offset; 1258 u8 v, *p, *buf, *b, pbyte, pbits; 1259 1260 nbits = cell->nbits; 1261 buf = kzalloc(cell->bytes, GFP_KERNEL); 1262 if (!buf) 1263 return ERR_PTR(-ENOMEM); 1264 1265 memcpy(buf, _buf, len); 1266 p = b = buf; 1267 1268 if (bit_offset) { 1269 pbyte = *b; 1270 *b <<= bit_offset; 1271 1272 /* setup the first byte with lsb bits from nvmem */ 1273 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1274 if (rc) 1275 goto err; 1276 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1277 1278 /* setup rest of the byte if any */ 1279 for (i = 1; i < cell->bytes; i++) { 1280 /* Get last byte bits and shift them towards lsb */ 1281 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1282 pbyte = *b; 1283 p = b; 1284 *b <<= bit_offset; 1285 *b++ |= pbits; 1286 } 1287 } 1288 1289 /* if it's not end on byte boundary */ 1290 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1291 /* setup the last byte with msb bits from nvmem */ 1292 rc = nvmem_reg_read(nvmem, 1293 cell->offset + cell->bytes - 1, &v, 1); 1294 if (rc) 1295 goto err; 1296 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1297 1298 } 1299 1300 return buf; 1301 err: 1302 kfree(buf); 1303 return ERR_PTR(rc); 1304 } 1305 1306 /** 1307 * nvmem_cell_write() - Write to a given nvmem cell 1308 * 1309 * @cell: nvmem cell to be written. 1310 * @buf: Buffer to be written. 1311 * @len: length of buffer to be written to nvmem cell. 1312 * 1313 * Return: length of bytes written or negative on failure. 1314 */ 1315 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1316 { 1317 struct nvmem_device *nvmem = cell->nvmem; 1318 int rc; 1319 1320 if (!nvmem || nvmem->read_only || 1321 (cell->bit_offset == 0 && len != cell->bytes)) 1322 return -EINVAL; 1323 1324 if (cell->bit_offset || cell->nbits) { 1325 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1326 if (IS_ERR(buf)) 1327 return PTR_ERR(buf); 1328 } 1329 1330 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1331 1332 /* free the tmp buffer */ 1333 if (cell->bit_offset || cell->nbits) 1334 kfree(buf); 1335 1336 if (rc) 1337 return rc; 1338 1339 return len; 1340 } 1341 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1342 1343 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1344 void *val, size_t count) 1345 { 1346 struct nvmem_cell *cell; 1347 void *buf; 1348 size_t len; 1349 1350 cell = nvmem_cell_get(dev, cell_id); 1351 if (IS_ERR(cell)) 1352 return PTR_ERR(cell); 1353 1354 buf = nvmem_cell_read(cell, &len); 1355 if (IS_ERR(buf)) { 1356 nvmem_cell_put(cell); 1357 return PTR_ERR(buf); 1358 } 1359 if (len != count) { 1360 kfree(buf); 1361 nvmem_cell_put(cell); 1362 return -EINVAL; 1363 } 1364 memcpy(val, buf, count); 1365 kfree(buf); 1366 nvmem_cell_put(cell); 1367 1368 return 0; 1369 } 1370 1371 /** 1372 * nvmem_cell_read_u16() - Read a cell value as an u16 1373 * 1374 * @dev: Device that requests the nvmem cell. 1375 * @cell_id: Name of nvmem cell to read. 1376 * @val: pointer to output value. 1377 * 1378 * Return: 0 on success or negative errno. 1379 */ 1380 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1381 { 1382 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1383 } 1384 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1385 1386 /** 1387 * nvmem_cell_read_u32() - Read a cell value as an u32 1388 * 1389 * @dev: Device that requests the nvmem cell. 1390 * @cell_id: Name of nvmem cell to read. 1391 * @val: pointer to output value. 1392 * 1393 * Return: 0 on success or negative errno. 1394 */ 1395 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1396 { 1397 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1398 } 1399 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1400 1401 /** 1402 * nvmem_cell_read_u64() - Read a cell value as an u64 1403 * 1404 * @dev: Device that requests the nvmem cell. 1405 * @cell_id: Name of nvmem cell to read. 1406 * @val: pointer to output value. 1407 * 1408 * Return: 0 on success or negative errno. 1409 */ 1410 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1411 { 1412 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1413 } 1414 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1415 1416 /** 1417 * nvmem_device_cell_read() - Read a given nvmem device and cell 1418 * 1419 * @nvmem: nvmem device to read from. 1420 * @info: nvmem cell info to be read. 1421 * @buf: buffer pointer which will be populated on successful read. 1422 * 1423 * Return: length of successful bytes read on success and negative 1424 * error code on error. 1425 */ 1426 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1427 struct nvmem_cell_info *info, void *buf) 1428 { 1429 struct nvmem_cell cell; 1430 int rc; 1431 ssize_t len; 1432 1433 if (!nvmem) 1434 return -EINVAL; 1435 1436 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1437 if (rc) 1438 return rc; 1439 1440 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1441 if (rc) 1442 return rc; 1443 1444 return len; 1445 } 1446 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1447 1448 /** 1449 * nvmem_device_cell_write() - Write cell to a given nvmem device 1450 * 1451 * @nvmem: nvmem device to be written to. 1452 * @info: nvmem cell info to be written. 1453 * @buf: buffer to be written to cell. 1454 * 1455 * Return: length of bytes written or negative error code on failure. 1456 */ 1457 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1458 struct nvmem_cell_info *info, void *buf) 1459 { 1460 struct nvmem_cell cell; 1461 int rc; 1462 1463 if (!nvmem) 1464 return -EINVAL; 1465 1466 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1467 if (rc) 1468 return rc; 1469 1470 return nvmem_cell_write(&cell, buf, cell.bytes); 1471 } 1472 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1473 1474 /** 1475 * nvmem_device_read() - Read from a given nvmem device 1476 * 1477 * @nvmem: nvmem device to read from. 1478 * @offset: offset in nvmem device. 1479 * @bytes: number of bytes to read. 1480 * @buf: buffer pointer which will be populated on successful read. 1481 * 1482 * Return: length of successful bytes read on success and negative 1483 * error code on error. 1484 */ 1485 int nvmem_device_read(struct nvmem_device *nvmem, 1486 unsigned int offset, 1487 size_t bytes, void *buf) 1488 { 1489 int rc; 1490 1491 if (!nvmem) 1492 return -EINVAL; 1493 1494 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1495 1496 if (rc) 1497 return rc; 1498 1499 return bytes; 1500 } 1501 EXPORT_SYMBOL_GPL(nvmem_device_read); 1502 1503 /** 1504 * nvmem_device_write() - Write cell to a given nvmem device 1505 * 1506 * @nvmem: nvmem device to be written to. 1507 * @offset: offset in nvmem device. 1508 * @bytes: number of bytes to write. 1509 * @buf: buffer to be written. 1510 * 1511 * Return: length of bytes written or negative error code on failure. 1512 */ 1513 int nvmem_device_write(struct nvmem_device *nvmem, 1514 unsigned int offset, 1515 size_t bytes, void *buf) 1516 { 1517 int rc; 1518 1519 if (!nvmem) 1520 return -EINVAL; 1521 1522 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1523 1524 if (rc) 1525 return rc; 1526 1527 1528 return bytes; 1529 } 1530 EXPORT_SYMBOL_GPL(nvmem_device_write); 1531 1532 /** 1533 * nvmem_add_cell_table() - register a table of cell info entries 1534 * 1535 * @table: table of cell info entries 1536 */ 1537 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1538 { 1539 mutex_lock(&nvmem_cell_mutex); 1540 list_add_tail(&table->node, &nvmem_cell_tables); 1541 mutex_unlock(&nvmem_cell_mutex); 1542 } 1543 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1544 1545 /** 1546 * nvmem_del_cell_table() - remove a previously registered cell info table 1547 * 1548 * @table: table of cell info entries 1549 */ 1550 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1551 { 1552 mutex_lock(&nvmem_cell_mutex); 1553 list_del(&table->node); 1554 mutex_unlock(&nvmem_cell_mutex); 1555 } 1556 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1557 1558 /** 1559 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1560 * 1561 * @entries: array of cell lookup entries 1562 * @nentries: number of cell lookup entries in the array 1563 */ 1564 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1565 { 1566 int i; 1567 1568 mutex_lock(&nvmem_lookup_mutex); 1569 for (i = 0; i < nentries; i++) 1570 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1571 mutex_unlock(&nvmem_lookup_mutex); 1572 } 1573 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1574 1575 /** 1576 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1577 * entries 1578 * 1579 * @entries: array of cell lookup entries 1580 * @nentries: number of cell lookup entries in the array 1581 */ 1582 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1583 { 1584 int i; 1585 1586 mutex_lock(&nvmem_lookup_mutex); 1587 for (i = 0; i < nentries; i++) 1588 list_del(&entries[i].node); 1589 mutex_unlock(&nvmem_lookup_mutex); 1590 } 1591 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1592 1593 /** 1594 * nvmem_dev_name() - Get the name of a given nvmem device. 1595 * 1596 * @nvmem: nvmem device. 1597 * 1598 * Return: name of the nvmem device. 1599 */ 1600 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1601 { 1602 return dev_name(&nvmem->dev); 1603 } 1604 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1605 1606 static int __init nvmem_init(void) 1607 { 1608 return bus_register(&nvmem_bus_type); 1609 } 1610 1611 static void __exit nvmem_exit(void) 1612 { 1613 bus_unregister(&nvmem_bus_type); 1614 } 1615 1616 subsys_initcall(nvmem_init); 1617 module_exit(nvmem_exit); 1618 1619 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1620 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1621 MODULE_DESCRIPTION("nvmem Driver Core"); 1622 MODULE_LICENSE("GPL v2"); 1623