1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 const struct nvmem_keepout *keepout; 38 unsigned int nkeepout; 39 nvmem_reg_read_t reg_read; 40 nvmem_reg_write_t reg_write; 41 nvmem_cell_post_process_t cell_post_process; 42 struct gpio_desc *wp_gpio; 43 struct nvmem_layout *layout; 44 void *priv; 45 }; 46 47 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 48 49 #define FLAG_COMPAT BIT(0) 50 struct nvmem_cell_entry { 51 const char *name; 52 int offset; 53 int bytes; 54 int bit_offset; 55 int nbits; 56 struct device_node *np; 57 struct nvmem_device *nvmem; 58 struct list_head node; 59 }; 60 61 struct nvmem_cell { 62 struct nvmem_cell_entry *entry; 63 const char *id; 64 int index; 65 }; 66 67 static DEFINE_MUTEX(nvmem_mutex); 68 static DEFINE_IDA(nvmem_ida); 69 70 static DEFINE_MUTEX(nvmem_cell_mutex); 71 static LIST_HEAD(nvmem_cell_tables); 72 73 static DEFINE_MUTEX(nvmem_lookup_mutex); 74 static LIST_HEAD(nvmem_lookup_list); 75 76 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 77 78 static DEFINE_SPINLOCK(nvmem_layout_lock); 79 static LIST_HEAD(nvmem_layouts); 80 81 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 82 void *val, size_t bytes) 83 { 84 if (nvmem->reg_read) 85 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 86 87 return -EINVAL; 88 } 89 90 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 91 void *val, size_t bytes) 92 { 93 int ret; 94 95 if (nvmem->reg_write) { 96 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 97 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 98 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 99 return ret; 100 } 101 102 return -EINVAL; 103 } 104 105 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 106 unsigned int offset, void *val, 107 size_t bytes, int write) 108 { 109 110 unsigned int end = offset + bytes; 111 unsigned int kend, ksize; 112 const struct nvmem_keepout *keepout = nvmem->keepout; 113 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 114 int rc; 115 116 /* 117 * Skip all keepouts before the range being accessed. 118 * Keepouts are sorted. 119 */ 120 while ((keepout < keepoutend) && (keepout->end <= offset)) 121 keepout++; 122 123 while ((offset < end) && (keepout < keepoutend)) { 124 /* Access the valid portion before the keepout. */ 125 if (offset < keepout->start) { 126 kend = min(end, keepout->start); 127 ksize = kend - offset; 128 if (write) 129 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 130 else 131 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 132 133 if (rc) 134 return rc; 135 136 offset += ksize; 137 val += ksize; 138 } 139 140 /* 141 * Now we're aligned to the start of this keepout zone. Go 142 * through it. 143 */ 144 kend = min(end, keepout->end); 145 ksize = kend - offset; 146 if (!write) 147 memset(val, keepout->value, ksize); 148 149 val += ksize; 150 offset += ksize; 151 keepout++; 152 } 153 154 /* 155 * If we ran out of keepouts but there's still stuff to do, send it 156 * down directly 157 */ 158 if (offset < end) { 159 ksize = end - offset; 160 if (write) 161 return __nvmem_reg_write(nvmem, offset, val, ksize); 162 else 163 return __nvmem_reg_read(nvmem, offset, val, ksize); 164 } 165 166 return 0; 167 } 168 169 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 170 void *val, size_t bytes) 171 { 172 if (!nvmem->nkeepout) 173 return __nvmem_reg_read(nvmem, offset, val, bytes); 174 175 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 176 } 177 178 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 179 void *val, size_t bytes) 180 { 181 if (!nvmem->nkeepout) 182 return __nvmem_reg_write(nvmem, offset, val, bytes); 183 184 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 185 } 186 187 #ifdef CONFIG_NVMEM_SYSFS 188 static const char * const nvmem_type_str[] = { 189 [NVMEM_TYPE_UNKNOWN] = "Unknown", 190 [NVMEM_TYPE_EEPROM] = "EEPROM", 191 [NVMEM_TYPE_OTP] = "OTP", 192 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 193 [NVMEM_TYPE_FRAM] = "FRAM", 194 }; 195 196 #ifdef CONFIG_DEBUG_LOCK_ALLOC 197 static struct lock_class_key eeprom_lock_key; 198 #endif 199 200 static ssize_t type_show(struct device *dev, 201 struct device_attribute *attr, char *buf) 202 { 203 struct nvmem_device *nvmem = to_nvmem_device(dev); 204 205 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 206 } 207 208 static DEVICE_ATTR_RO(type); 209 210 static struct attribute *nvmem_attrs[] = { 211 &dev_attr_type.attr, 212 NULL, 213 }; 214 215 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 216 struct bin_attribute *attr, char *buf, 217 loff_t pos, size_t count) 218 { 219 struct device *dev; 220 struct nvmem_device *nvmem; 221 int rc; 222 223 if (attr->private) 224 dev = attr->private; 225 else 226 dev = kobj_to_dev(kobj); 227 nvmem = to_nvmem_device(dev); 228 229 /* Stop the user from reading */ 230 if (pos >= nvmem->size) 231 return 0; 232 233 if (!IS_ALIGNED(pos, nvmem->stride)) 234 return -EINVAL; 235 236 if (count < nvmem->word_size) 237 return -EINVAL; 238 239 if (pos + count > nvmem->size) 240 count = nvmem->size - pos; 241 242 count = round_down(count, nvmem->word_size); 243 244 if (!nvmem->reg_read) 245 return -EPERM; 246 247 rc = nvmem_reg_read(nvmem, pos, buf, count); 248 249 if (rc) 250 return rc; 251 252 return count; 253 } 254 255 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 256 struct bin_attribute *attr, char *buf, 257 loff_t pos, size_t count) 258 { 259 struct device *dev; 260 struct nvmem_device *nvmem; 261 int rc; 262 263 if (attr->private) 264 dev = attr->private; 265 else 266 dev = kobj_to_dev(kobj); 267 nvmem = to_nvmem_device(dev); 268 269 /* Stop the user from writing */ 270 if (pos >= nvmem->size) 271 return -EFBIG; 272 273 if (!IS_ALIGNED(pos, nvmem->stride)) 274 return -EINVAL; 275 276 if (count < nvmem->word_size) 277 return -EINVAL; 278 279 if (pos + count > nvmem->size) 280 count = nvmem->size - pos; 281 282 count = round_down(count, nvmem->word_size); 283 284 if (!nvmem->reg_write) 285 return -EPERM; 286 287 rc = nvmem_reg_write(nvmem, pos, buf, count); 288 289 if (rc) 290 return rc; 291 292 return count; 293 } 294 295 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 296 { 297 umode_t mode = 0400; 298 299 if (!nvmem->root_only) 300 mode |= 0044; 301 302 if (!nvmem->read_only) 303 mode |= 0200; 304 305 if (!nvmem->reg_write) 306 mode &= ~0200; 307 308 if (!nvmem->reg_read) 309 mode &= ~0444; 310 311 return mode; 312 } 313 314 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 315 struct bin_attribute *attr, int i) 316 { 317 struct device *dev = kobj_to_dev(kobj); 318 struct nvmem_device *nvmem = to_nvmem_device(dev); 319 320 attr->size = nvmem->size; 321 322 return nvmem_bin_attr_get_umode(nvmem); 323 } 324 325 /* default read/write permissions */ 326 static struct bin_attribute bin_attr_rw_nvmem = { 327 .attr = { 328 .name = "nvmem", 329 .mode = 0644, 330 }, 331 .read = bin_attr_nvmem_read, 332 .write = bin_attr_nvmem_write, 333 }; 334 335 static struct bin_attribute *nvmem_bin_attributes[] = { 336 &bin_attr_rw_nvmem, 337 NULL, 338 }; 339 340 static const struct attribute_group nvmem_bin_group = { 341 .bin_attrs = nvmem_bin_attributes, 342 .attrs = nvmem_attrs, 343 .is_bin_visible = nvmem_bin_attr_is_visible, 344 }; 345 346 static const struct attribute_group *nvmem_dev_groups[] = { 347 &nvmem_bin_group, 348 NULL, 349 }; 350 351 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 352 .attr = { 353 .name = "eeprom", 354 }, 355 .read = bin_attr_nvmem_read, 356 .write = bin_attr_nvmem_write, 357 }; 358 359 /* 360 * nvmem_setup_compat() - Create an additional binary entry in 361 * drivers sys directory, to be backwards compatible with the older 362 * drivers/misc/eeprom drivers. 363 */ 364 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 365 const struct nvmem_config *config) 366 { 367 int rval; 368 369 if (!config->compat) 370 return 0; 371 372 if (!config->base_dev) 373 return -EINVAL; 374 375 if (config->type == NVMEM_TYPE_FRAM) 376 bin_attr_nvmem_eeprom_compat.attr.name = "fram"; 377 378 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 379 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 380 nvmem->eeprom.size = nvmem->size; 381 #ifdef CONFIG_DEBUG_LOCK_ALLOC 382 nvmem->eeprom.attr.key = &eeprom_lock_key; 383 #endif 384 nvmem->eeprom.private = &nvmem->dev; 385 nvmem->base_dev = config->base_dev; 386 387 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 388 if (rval) { 389 dev_err(&nvmem->dev, 390 "Failed to create eeprom binary file %d\n", rval); 391 return rval; 392 } 393 394 nvmem->flags |= FLAG_COMPAT; 395 396 return 0; 397 } 398 399 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 400 const struct nvmem_config *config) 401 { 402 if (config->compat) 403 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 404 } 405 406 #else /* CONFIG_NVMEM_SYSFS */ 407 408 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 409 const struct nvmem_config *config) 410 { 411 return -ENOSYS; 412 } 413 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 414 const struct nvmem_config *config) 415 { 416 } 417 418 #endif /* CONFIG_NVMEM_SYSFS */ 419 420 static void nvmem_release(struct device *dev) 421 { 422 struct nvmem_device *nvmem = to_nvmem_device(dev); 423 424 ida_free(&nvmem_ida, nvmem->id); 425 gpiod_put(nvmem->wp_gpio); 426 kfree(nvmem); 427 } 428 429 static const struct device_type nvmem_provider_type = { 430 .release = nvmem_release, 431 }; 432 433 static struct bus_type nvmem_bus_type = { 434 .name = "nvmem", 435 }; 436 437 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 438 { 439 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 440 mutex_lock(&nvmem_mutex); 441 list_del(&cell->node); 442 mutex_unlock(&nvmem_mutex); 443 of_node_put(cell->np); 444 kfree_const(cell->name); 445 kfree(cell); 446 } 447 448 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 449 { 450 struct nvmem_cell_entry *cell, *p; 451 452 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 453 nvmem_cell_entry_drop(cell); 454 } 455 456 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 457 { 458 mutex_lock(&nvmem_mutex); 459 list_add_tail(&cell->node, &cell->nvmem->cells); 460 mutex_unlock(&nvmem_mutex); 461 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 462 } 463 464 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 465 const struct nvmem_cell_info *info, 466 struct nvmem_cell_entry *cell) 467 { 468 cell->nvmem = nvmem; 469 cell->offset = info->offset; 470 cell->bytes = info->bytes; 471 cell->name = info->name; 472 473 cell->bit_offset = info->bit_offset; 474 cell->nbits = info->nbits; 475 cell->np = info->np; 476 477 if (cell->nbits) 478 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 479 BITS_PER_BYTE); 480 481 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 482 dev_err(&nvmem->dev, 483 "cell %s unaligned to nvmem stride %d\n", 484 cell->name ?: "<unknown>", nvmem->stride); 485 return -EINVAL; 486 } 487 488 return 0; 489 } 490 491 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 492 const struct nvmem_cell_info *info, 493 struct nvmem_cell_entry *cell) 494 { 495 int err; 496 497 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 498 if (err) 499 return err; 500 501 cell->name = kstrdup_const(info->name, GFP_KERNEL); 502 if (!cell->name) 503 return -ENOMEM; 504 505 return 0; 506 } 507 508 /** 509 * nvmem_add_one_cell() - Add one cell information to an nvmem device 510 * 511 * @nvmem: nvmem device to add cells to. 512 * @info: nvmem cell info to add to the device 513 * 514 * Return: 0 or negative error code on failure. 515 */ 516 int nvmem_add_one_cell(struct nvmem_device *nvmem, 517 const struct nvmem_cell_info *info) 518 { 519 struct nvmem_cell_entry *cell; 520 int rval; 521 522 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 523 if (!cell) 524 return -ENOMEM; 525 526 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 527 if (rval) { 528 kfree(cell); 529 return rval; 530 } 531 532 nvmem_cell_entry_add(cell); 533 534 return 0; 535 } 536 EXPORT_SYMBOL_GPL(nvmem_add_one_cell); 537 538 /** 539 * nvmem_add_cells() - Add cell information to an nvmem device 540 * 541 * @nvmem: nvmem device to add cells to. 542 * @info: nvmem cell info to add to the device 543 * @ncells: number of cells in info 544 * 545 * Return: 0 or negative error code on failure. 546 */ 547 static int nvmem_add_cells(struct nvmem_device *nvmem, 548 const struct nvmem_cell_info *info, 549 int ncells) 550 { 551 int i, rval; 552 553 for (i = 0; i < ncells; i++) { 554 rval = nvmem_add_one_cell(nvmem, &info[i]); 555 if (rval) 556 return rval; 557 } 558 559 return 0; 560 } 561 562 /** 563 * nvmem_register_notifier() - Register a notifier block for nvmem events. 564 * 565 * @nb: notifier block to be called on nvmem events. 566 * 567 * Return: 0 on success, negative error number on failure. 568 */ 569 int nvmem_register_notifier(struct notifier_block *nb) 570 { 571 return blocking_notifier_chain_register(&nvmem_notifier, nb); 572 } 573 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 574 575 /** 576 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 577 * 578 * @nb: notifier block to be unregistered. 579 * 580 * Return: 0 on success, negative error number on failure. 581 */ 582 int nvmem_unregister_notifier(struct notifier_block *nb) 583 { 584 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 585 } 586 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 587 588 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 589 { 590 const struct nvmem_cell_info *info; 591 struct nvmem_cell_table *table; 592 struct nvmem_cell_entry *cell; 593 int rval = 0, i; 594 595 mutex_lock(&nvmem_cell_mutex); 596 list_for_each_entry(table, &nvmem_cell_tables, node) { 597 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 598 for (i = 0; i < table->ncells; i++) { 599 info = &table->cells[i]; 600 601 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 602 if (!cell) { 603 rval = -ENOMEM; 604 goto out; 605 } 606 607 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 608 if (rval) { 609 kfree(cell); 610 goto out; 611 } 612 613 nvmem_cell_entry_add(cell); 614 } 615 } 616 } 617 618 out: 619 mutex_unlock(&nvmem_cell_mutex); 620 return rval; 621 } 622 623 static struct nvmem_cell_entry * 624 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 625 { 626 struct nvmem_cell_entry *iter, *cell = NULL; 627 628 mutex_lock(&nvmem_mutex); 629 list_for_each_entry(iter, &nvmem->cells, node) { 630 if (strcmp(cell_id, iter->name) == 0) { 631 cell = iter; 632 break; 633 } 634 } 635 mutex_unlock(&nvmem_mutex); 636 637 return cell; 638 } 639 640 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 641 { 642 unsigned int cur = 0; 643 const struct nvmem_keepout *keepout = nvmem->keepout; 644 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 645 646 while (keepout < keepoutend) { 647 /* Ensure keepouts are sorted and don't overlap. */ 648 if (keepout->start < cur) { 649 dev_err(&nvmem->dev, 650 "Keepout regions aren't sorted or overlap.\n"); 651 652 return -ERANGE; 653 } 654 655 if (keepout->end < keepout->start) { 656 dev_err(&nvmem->dev, 657 "Invalid keepout region.\n"); 658 659 return -EINVAL; 660 } 661 662 /* 663 * Validate keepouts (and holes between) don't violate 664 * word_size constraints. 665 */ 666 if ((keepout->end - keepout->start < nvmem->word_size) || 667 ((keepout->start != cur) && 668 (keepout->start - cur < nvmem->word_size))) { 669 670 dev_err(&nvmem->dev, 671 "Keepout regions violate word_size constraints.\n"); 672 673 return -ERANGE; 674 } 675 676 /* Validate keepouts don't violate stride (alignment). */ 677 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 678 !IS_ALIGNED(keepout->end, nvmem->stride)) { 679 680 dev_err(&nvmem->dev, 681 "Keepout regions violate stride.\n"); 682 683 return -EINVAL; 684 } 685 686 cur = keepout->end; 687 keepout++; 688 } 689 690 return 0; 691 } 692 693 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 694 { 695 struct device *dev = &nvmem->dev; 696 struct device_node *child; 697 const __be32 *addr; 698 int len, ret; 699 700 for_each_child_of_node(dev->of_node, child) { 701 struct nvmem_cell_info info = {0}; 702 703 addr = of_get_property(child, "reg", &len); 704 if (!addr) 705 continue; 706 if (len < 2 * sizeof(u32)) { 707 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 708 of_node_put(child); 709 return -EINVAL; 710 } 711 712 info.offset = be32_to_cpup(addr++); 713 info.bytes = be32_to_cpup(addr); 714 info.name = kasprintf(GFP_KERNEL, "%pOFn", child); 715 716 addr = of_get_property(child, "bits", &len); 717 if (addr && len == (2 * sizeof(u32))) { 718 info.bit_offset = be32_to_cpup(addr++); 719 info.nbits = be32_to_cpup(addr); 720 } 721 722 info.np = of_node_get(child); 723 724 ret = nvmem_add_one_cell(nvmem, &info); 725 kfree(info.name); 726 if (ret) { 727 of_node_put(child); 728 return ret; 729 } 730 } 731 732 return 0; 733 } 734 735 int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner) 736 { 737 layout->owner = owner; 738 739 spin_lock(&nvmem_layout_lock); 740 list_add(&layout->node, &nvmem_layouts); 741 spin_unlock(&nvmem_layout_lock); 742 743 return 0; 744 } 745 EXPORT_SYMBOL_GPL(__nvmem_layout_register); 746 747 void nvmem_layout_unregister(struct nvmem_layout *layout) 748 { 749 spin_lock(&nvmem_layout_lock); 750 list_del(&layout->node); 751 spin_unlock(&nvmem_layout_lock); 752 } 753 EXPORT_SYMBOL_GPL(nvmem_layout_unregister); 754 755 static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem) 756 { 757 struct device_node *layout_np, *np = nvmem->dev.of_node; 758 struct nvmem_layout *l, *layout = NULL; 759 760 layout_np = of_get_child_by_name(np, "nvmem-layout"); 761 if (!layout_np) 762 return NULL; 763 764 spin_lock(&nvmem_layout_lock); 765 766 list_for_each_entry(l, &nvmem_layouts, node) { 767 if (of_match_node(l->of_match_table, layout_np)) { 768 if (try_module_get(l->owner)) 769 layout = l; 770 771 break; 772 } 773 } 774 775 spin_unlock(&nvmem_layout_lock); 776 of_node_put(layout_np); 777 778 return layout; 779 } 780 781 static void nvmem_layout_put(struct nvmem_layout *layout) 782 { 783 if (layout) 784 module_put(layout->owner); 785 } 786 787 static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem) 788 { 789 struct nvmem_layout *layout = nvmem->layout; 790 int ret; 791 792 if (layout && layout->add_cells) { 793 ret = layout->add_cells(&nvmem->dev, nvmem, layout); 794 if (ret) 795 return ret; 796 } 797 798 return 0; 799 } 800 801 #if IS_ENABLED(CONFIG_OF) 802 /** 803 * of_nvmem_layout_get_container() - Get OF node to layout container. 804 * 805 * @nvmem: nvmem device. 806 * 807 * Return: a node pointer with refcount incremented or NULL if no 808 * container exists. Use of_node_put() on it when done. 809 */ 810 struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem) 811 { 812 return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout"); 813 } 814 EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container); 815 #endif 816 817 const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem, 818 struct nvmem_layout *layout) 819 { 820 struct device_node __maybe_unused *layout_np; 821 const struct of_device_id *match; 822 823 layout_np = of_nvmem_layout_get_container(nvmem); 824 match = of_match_node(layout->of_match_table, layout_np); 825 826 return match ? match->data : NULL; 827 } 828 EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data); 829 830 /** 831 * nvmem_register() - Register a nvmem device for given nvmem_config. 832 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 833 * 834 * @config: nvmem device configuration with which nvmem device is created. 835 * 836 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 837 * on success. 838 */ 839 840 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 841 { 842 struct nvmem_device *nvmem; 843 int rval; 844 845 if (!config->dev) 846 return ERR_PTR(-EINVAL); 847 848 if (!config->reg_read && !config->reg_write) 849 return ERR_PTR(-EINVAL); 850 851 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 852 if (!nvmem) 853 return ERR_PTR(-ENOMEM); 854 855 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 856 if (rval < 0) { 857 kfree(nvmem); 858 return ERR_PTR(rval); 859 } 860 861 nvmem->id = rval; 862 863 nvmem->dev.type = &nvmem_provider_type; 864 nvmem->dev.bus = &nvmem_bus_type; 865 nvmem->dev.parent = config->dev; 866 867 device_initialize(&nvmem->dev); 868 869 if (!config->ignore_wp) 870 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 871 GPIOD_OUT_HIGH); 872 if (IS_ERR(nvmem->wp_gpio)) { 873 rval = PTR_ERR(nvmem->wp_gpio); 874 nvmem->wp_gpio = NULL; 875 goto err_put_device; 876 } 877 878 kref_init(&nvmem->refcnt); 879 INIT_LIST_HEAD(&nvmem->cells); 880 881 nvmem->owner = config->owner; 882 if (!nvmem->owner && config->dev->driver) 883 nvmem->owner = config->dev->driver->owner; 884 nvmem->stride = config->stride ?: 1; 885 nvmem->word_size = config->word_size ?: 1; 886 nvmem->size = config->size; 887 nvmem->root_only = config->root_only; 888 nvmem->priv = config->priv; 889 nvmem->type = config->type; 890 nvmem->reg_read = config->reg_read; 891 nvmem->reg_write = config->reg_write; 892 nvmem->cell_post_process = config->cell_post_process; 893 nvmem->keepout = config->keepout; 894 nvmem->nkeepout = config->nkeepout; 895 if (config->of_node) 896 nvmem->dev.of_node = config->of_node; 897 else if (!config->no_of_node) 898 nvmem->dev.of_node = config->dev->of_node; 899 900 switch (config->id) { 901 case NVMEM_DEVID_NONE: 902 rval = dev_set_name(&nvmem->dev, "%s", config->name); 903 break; 904 case NVMEM_DEVID_AUTO: 905 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 906 break; 907 default: 908 rval = dev_set_name(&nvmem->dev, "%s%d", 909 config->name ? : "nvmem", 910 config->name ? config->id : nvmem->id); 911 break; 912 } 913 914 if (rval) 915 goto err_put_device; 916 917 nvmem->read_only = device_property_present(config->dev, "read-only") || 918 config->read_only || !nvmem->reg_write; 919 920 #ifdef CONFIG_NVMEM_SYSFS 921 nvmem->dev.groups = nvmem_dev_groups; 922 #endif 923 924 if (nvmem->nkeepout) { 925 rval = nvmem_validate_keepouts(nvmem); 926 if (rval) 927 goto err_put_device; 928 } 929 930 if (config->compat) { 931 rval = nvmem_sysfs_setup_compat(nvmem, config); 932 if (rval) 933 goto err_put_device; 934 } 935 936 /* 937 * If the driver supplied a layout by config->layout, the module 938 * pointer will be NULL and nvmem_layout_put() will be a noop. 939 */ 940 nvmem->layout = config->layout ?: nvmem_layout_get(nvmem); 941 942 if (config->cells) { 943 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 944 if (rval) 945 goto err_remove_cells; 946 } 947 948 rval = nvmem_add_cells_from_table(nvmem); 949 if (rval) 950 goto err_remove_cells; 951 952 rval = nvmem_add_cells_from_of(nvmem); 953 if (rval) 954 goto err_remove_cells; 955 956 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 957 958 rval = device_add(&nvmem->dev); 959 if (rval) 960 goto err_remove_cells; 961 962 rval = nvmem_add_cells_from_layout(nvmem); 963 if (rval) 964 goto err_remove_cells; 965 966 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 967 968 return nvmem; 969 970 err_remove_cells: 971 nvmem_device_remove_all_cells(nvmem); 972 nvmem_layout_put(nvmem->layout); 973 if (config->compat) 974 nvmem_sysfs_remove_compat(nvmem, config); 975 err_put_device: 976 put_device(&nvmem->dev); 977 978 return ERR_PTR(rval); 979 } 980 EXPORT_SYMBOL_GPL(nvmem_register); 981 982 static void nvmem_device_release(struct kref *kref) 983 { 984 struct nvmem_device *nvmem; 985 986 nvmem = container_of(kref, struct nvmem_device, refcnt); 987 988 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 989 990 if (nvmem->flags & FLAG_COMPAT) 991 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 992 993 nvmem_device_remove_all_cells(nvmem); 994 nvmem_layout_put(nvmem->layout); 995 device_unregister(&nvmem->dev); 996 } 997 998 /** 999 * nvmem_unregister() - Unregister previously registered nvmem device 1000 * 1001 * @nvmem: Pointer to previously registered nvmem device. 1002 */ 1003 void nvmem_unregister(struct nvmem_device *nvmem) 1004 { 1005 if (nvmem) 1006 kref_put(&nvmem->refcnt, nvmem_device_release); 1007 } 1008 EXPORT_SYMBOL_GPL(nvmem_unregister); 1009 1010 static void devm_nvmem_unregister(void *nvmem) 1011 { 1012 nvmem_unregister(nvmem); 1013 } 1014 1015 /** 1016 * devm_nvmem_register() - Register a managed nvmem device for given 1017 * nvmem_config. 1018 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 1019 * 1020 * @dev: Device that uses the nvmem device. 1021 * @config: nvmem device configuration with which nvmem device is created. 1022 * 1023 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 1024 * on success. 1025 */ 1026 struct nvmem_device *devm_nvmem_register(struct device *dev, 1027 const struct nvmem_config *config) 1028 { 1029 struct nvmem_device *nvmem; 1030 int ret; 1031 1032 nvmem = nvmem_register(config); 1033 if (IS_ERR(nvmem)) 1034 return nvmem; 1035 1036 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); 1037 if (ret) 1038 return ERR_PTR(ret); 1039 1040 return nvmem; 1041 } 1042 EXPORT_SYMBOL_GPL(devm_nvmem_register); 1043 1044 static struct nvmem_device *__nvmem_device_get(void *data, 1045 int (*match)(struct device *dev, const void *data)) 1046 { 1047 struct nvmem_device *nvmem = NULL; 1048 struct device *dev; 1049 1050 mutex_lock(&nvmem_mutex); 1051 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 1052 if (dev) 1053 nvmem = to_nvmem_device(dev); 1054 mutex_unlock(&nvmem_mutex); 1055 if (!nvmem) 1056 return ERR_PTR(-EPROBE_DEFER); 1057 1058 if (!try_module_get(nvmem->owner)) { 1059 dev_err(&nvmem->dev, 1060 "could not increase module refcount for cell %s\n", 1061 nvmem_dev_name(nvmem)); 1062 1063 put_device(&nvmem->dev); 1064 return ERR_PTR(-EINVAL); 1065 } 1066 1067 kref_get(&nvmem->refcnt); 1068 1069 return nvmem; 1070 } 1071 1072 static void __nvmem_device_put(struct nvmem_device *nvmem) 1073 { 1074 put_device(&nvmem->dev); 1075 module_put(nvmem->owner); 1076 kref_put(&nvmem->refcnt, nvmem_device_release); 1077 } 1078 1079 #if IS_ENABLED(CONFIG_OF) 1080 /** 1081 * of_nvmem_device_get() - Get nvmem device from a given id 1082 * 1083 * @np: Device tree node that uses the nvmem device. 1084 * @id: nvmem name from nvmem-names property. 1085 * 1086 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1087 * on success. 1088 */ 1089 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1090 { 1091 1092 struct device_node *nvmem_np; 1093 struct nvmem_device *nvmem; 1094 int index = 0; 1095 1096 if (id) 1097 index = of_property_match_string(np, "nvmem-names", id); 1098 1099 nvmem_np = of_parse_phandle(np, "nvmem", index); 1100 if (!nvmem_np) 1101 return ERR_PTR(-ENOENT); 1102 1103 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1104 of_node_put(nvmem_np); 1105 return nvmem; 1106 } 1107 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1108 #endif 1109 1110 /** 1111 * nvmem_device_get() - Get nvmem device from a given id 1112 * 1113 * @dev: Device that uses the nvmem device. 1114 * @dev_name: name of the requested nvmem device. 1115 * 1116 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1117 * on success. 1118 */ 1119 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1120 { 1121 if (dev->of_node) { /* try dt first */ 1122 struct nvmem_device *nvmem; 1123 1124 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1125 1126 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1127 return nvmem; 1128 1129 } 1130 1131 return __nvmem_device_get((void *)dev_name, device_match_name); 1132 } 1133 EXPORT_SYMBOL_GPL(nvmem_device_get); 1134 1135 /** 1136 * nvmem_device_find() - Find nvmem device with matching function 1137 * 1138 * @data: Data to pass to match function 1139 * @match: Callback function to check device 1140 * 1141 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1142 * on success. 1143 */ 1144 struct nvmem_device *nvmem_device_find(void *data, 1145 int (*match)(struct device *dev, const void *data)) 1146 { 1147 return __nvmem_device_get(data, match); 1148 } 1149 EXPORT_SYMBOL_GPL(nvmem_device_find); 1150 1151 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1152 { 1153 struct nvmem_device **nvmem = res; 1154 1155 if (WARN_ON(!nvmem || !*nvmem)) 1156 return 0; 1157 1158 return *nvmem == data; 1159 } 1160 1161 static void devm_nvmem_device_release(struct device *dev, void *res) 1162 { 1163 nvmem_device_put(*(struct nvmem_device **)res); 1164 } 1165 1166 /** 1167 * devm_nvmem_device_put() - put alredy got nvmem device 1168 * 1169 * @dev: Device that uses the nvmem device. 1170 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1171 * that needs to be released. 1172 */ 1173 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1174 { 1175 int ret; 1176 1177 ret = devres_release(dev, devm_nvmem_device_release, 1178 devm_nvmem_device_match, nvmem); 1179 1180 WARN_ON(ret); 1181 } 1182 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1183 1184 /** 1185 * nvmem_device_put() - put alredy got nvmem device 1186 * 1187 * @nvmem: pointer to nvmem device that needs to be released. 1188 */ 1189 void nvmem_device_put(struct nvmem_device *nvmem) 1190 { 1191 __nvmem_device_put(nvmem); 1192 } 1193 EXPORT_SYMBOL_GPL(nvmem_device_put); 1194 1195 /** 1196 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 1197 * 1198 * @dev: Device that requests the nvmem device. 1199 * @id: name id for the requested nvmem device. 1200 * 1201 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 1202 * on success. The nvmem_cell will be freed by the automatically once the 1203 * device is freed. 1204 */ 1205 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1206 { 1207 struct nvmem_device **ptr, *nvmem; 1208 1209 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1210 if (!ptr) 1211 return ERR_PTR(-ENOMEM); 1212 1213 nvmem = nvmem_device_get(dev, id); 1214 if (!IS_ERR(nvmem)) { 1215 *ptr = nvmem; 1216 devres_add(dev, ptr); 1217 } else { 1218 devres_free(ptr); 1219 } 1220 1221 return nvmem; 1222 } 1223 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1224 1225 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 1226 const char *id, int index) 1227 { 1228 struct nvmem_cell *cell; 1229 const char *name = NULL; 1230 1231 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1232 if (!cell) 1233 return ERR_PTR(-ENOMEM); 1234 1235 if (id) { 1236 name = kstrdup_const(id, GFP_KERNEL); 1237 if (!name) { 1238 kfree(cell); 1239 return ERR_PTR(-ENOMEM); 1240 } 1241 } 1242 1243 cell->id = name; 1244 cell->entry = entry; 1245 cell->index = index; 1246 1247 return cell; 1248 } 1249 1250 static struct nvmem_cell * 1251 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1252 { 1253 struct nvmem_cell_entry *cell_entry; 1254 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1255 struct nvmem_cell_lookup *lookup; 1256 struct nvmem_device *nvmem; 1257 const char *dev_id; 1258 1259 if (!dev) 1260 return ERR_PTR(-EINVAL); 1261 1262 dev_id = dev_name(dev); 1263 1264 mutex_lock(&nvmem_lookup_mutex); 1265 1266 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1267 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1268 (strcmp(lookup->con_id, con_id) == 0)) { 1269 /* This is the right entry. */ 1270 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1271 device_match_name); 1272 if (IS_ERR(nvmem)) { 1273 /* Provider may not be registered yet. */ 1274 cell = ERR_CAST(nvmem); 1275 break; 1276 } 1277 1278 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1279 lookup->cell_name); 1280 if (!cell_entry) { 1281 __nvmem_device_put(nvmem); 1282 cell = ERR_PTR(-ENOENT); 1283 } else { 1284 cell = nvmem_create_cell(cell_entry, con_id, 0); 1285 if (IS_ERR(cell)) 1286 __nvmem_device_put(nvmem); 1287 } 1288 break; 1289 } 1290 } 1291 1292 mutex_unlock(&nvmem_lookup_mutex); 1293 return cell; 1294 } 1295 1296 #if IS_ENABLED(CONFIG_OF) 1297 static struct nvmem_cell_entry * 1298 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1299 { 1300 struct nvmem_cell_entry *iter, *cell = NULL; 1301 1302 mutex_lock(&nvmem_mutex); 1303 list_for_each_entry(iter, &nvmem->cells, node) { 1304 if (np == iter->np) { 1305 cell = iter; 1306 break; 1307 } 1308 } 1309 mutex_unlock(&nvmem_mutex); 1310 1311 return cell; 1312 } 1313 1314 /** 1315 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1316 * 1317 * @np: Device tree node that uses the nvmem cell. 1318 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1319 * for the cell at index 0 (the lone cell with no accompanying 1320 * nvmem-cell-names property). 1321 * 1322 * Return: Will be an ERR_PTR() on error or a valid pointer 1323 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1324 * nvmem_cell_put(). 1325 */ 1326 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1327 { 1328 struct device_node *cell_np, *nvmem_np; 1329 struct nvmem_device *nvmem; 1330 struct nvmem_cell_entry *cell_entry; 1331 struct nvmem_cell *cell; 1332 struct of_phandle_args cell_spec; 1333 int index = 0; 1334 int cell_index = 0; 1335 int ret; 1336 1337 /* if cell name exists, find index to the name */ 1338 if (id) 1339 index = of_property_match_string(np, "nvmem-cell-names", id); 1340 1341 ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", 1342 "#nvmem-cell-cells", 1343 index, &cell_spec); 1344 if (ret) 1345 return ERR_PTR(-ENOENT); 1346 1347 if (cell_spec.args_count > 1) 1348 return ERR_PTR(-EINVAL); 1349 1350 cell_np = cell_spec.np; 1351 if (cell_spec.args_count) 1352 cell_index = cell_spec.args[0]; 1353 1354 nvmem_np = of_get_parent(cell_np); 1355 if (!nvmem_np) { 1356 of_node_put(cell_np); 1357 return ERR_PTR(-EINVAL); 1358 } 1359 1360 /* nvmem layouts produce cells within the nvmem-layout container */ 1361 if (of_node_name_eq(nvmem_np, "nvmem-layout")) { 1362 nvmem_np = of_get_next_parent(nvmem_np); 1363 if (!nvmem_np) { 1364 of_node_put(cell_np); 1365 return ERR_PTR(-EINVAL); 1366 } 1367 } 1368 1369 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1370 of_node_put(nvmem_np); 1371 if (IS_ERR(nvmem)) { 1372 of_node_put(cell_np); 1373 return ERR_CAST(nvmem); 1374 } 1375 1376 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1377 of_node_put(cell_np); 1378 if (!cell_entry) { 1379 __nvmem_device_put(nvmem); 1380 return ERR_PTR(-ENOENT); 1381 } 1382 1383 cell = nvmem_create_cell(cell_entry, id, cell_index); 1384 if (IS_ERR(cell)) 1385 __nvmem_device_put(nvmem); 1386 1387 return cell; 1388 } 1389 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1390 #endif 1391 1392 /** 1393 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1394 * 1395 * @dev: Device that requests the nvmem cell. 1396 * @id: nvmem cell name to get (this corresponds with the name from the 1397 * nvmem-cell-names property for DT systems and with the con_id from 1398 * the lookup entry for non-DT systems). 1399 * 1400 * Return: Will be an ERR_PTR() on error or a valid pointer 1401 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1402 * nvmem_cell_put(). 1403 */ 1404 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1405 { 1406 struct nvmem_cell *cell; 1407 1408 if (dev->of_node) { /* try dt first */ 1409 cell = of_nvmem_cell_get(dev->of_node, id); 1410 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1411 return cell; 1412 } 1413 1414 /* NULL cell id only allowed for device tree; invalid otherwise */ 1415 if (!id) 1416 return ERR_PTR(-EINVAL); 1417 1418 return nvmem_cell_get_from_lookup(dev, id); 1419 } 1420 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1421 1422 static void devm_nvmem_cell_release(struct device *dev, void *res) 1423 { 1424 nvmem_cell_put(*(struct nvmem_cell **)res); 1425 } 1426 1427 /** 1428 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1429 * 1430 * @dev: Device that requests the nvmem cell. 1431 * @id: nvmem cell name id to get. 1432 * 1433 * Return: Will be an ERR_PTR() on error or a valid pointer 1434 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1435 * automatically once the device is freed. 1436 */ 1437 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1438 { 1439 struct nvmem_cell **ptr, *cell; 1440 1441 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1442 if (!ptr) 1443 return ERR_PTR(-ENOMEM); 1444 1445 cell = nvmem_cell_get(dev, id); 1446 if (!IS_ERR(cell)) { 1447 *ptr = cell; 1448 devres_add(dev, ptr); 1449 } else { 1450 devres_free(ptr); 1451 } 1452 1453 return cell; 1454 } 1455 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1456 1457 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1458 { 1459 struct nvmem_cell **c = res; 1460 1461 if (WARN_ON(!c || !*c)) 1462 return 0; 1463 1464 return *c == data; 1465 } 1466 1467 /** 1468 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1469 * from devm_nvmem_cell_get. 1470 * 1471 * @dev: Device that requests the nvmem cell. 1472 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1473 */ 1474 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1475 { 1476 int ret; 1477 1478 ret = devres_release(dev, devm_nvmem_cell_release, 1479 devm_nvmem_cell_match, cell); 1480 1481 WARN_ON(ret); 1482 } 1483 EXPORT_SYMBOL(devm_nvmem_cell_put); 1484 1485 /** 1486 * nvmem_cell_put() - Release previously allocated nvmem cell. 1487 * 1488 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1489 */ 1490 void nvmem_cell_put(struct nvmem_cell *cell) 1491 { 1492 struct nvmem_device *nvmem = cell->entry->nvmem; 1493 1494 if (cell->id) 1495 kfree_const(cell->id); 1496 1497 kfree(cell); 1498 __nvmem_device_put(nvmem); 1499 } 1500 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1501 1502 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1503 { 1504 u8 *p, *b; 1505 int i, extra, bit_offset = cell->bit_offset; 1506 1507 p = b = buf; 1508 if (bit_offset) { 1509 /* First shift */ 1510 *b++ >>= bit_offset; 1511 1512 /* setup rest of the bytes if any */ 1513 for (i = 1; i < cell->bytes; i++) { 1514 /* Get bits from next byte and shift them towards msb */ 1515 *p |= *b << (BITS_PER_BYTE - bit_offset); 1516 1517 p = b; 1518 *b++ >>= bit_offset; 1519 } 1520 } else { 1521 /* point to the msb */ 1522 p += cell->bytes - 1; 1523 } 1524 1525 /* result fits in less bytes */ 1526 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1527 while (--extra >= 0) 1528 *p-- = 0; 1529 1530 /* clear msb bits if any leftover in the last byte */ 1531 if (cell->nbits % BITS_PER_BYTE) 1532 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1533 } 1534 1535 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1536 struct nvmem_cell_entry *cell, 1537 void *buf, size_t *len, const char *id, int index) 1538 { 1539 int rc; 1540 1541 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1542 1543 if (rc) 1544 return rc; 1545 1546 /* shift bits in-place */ 1547 if (cell->bit_offset || cell->nbits) 1548 nvmem_shift_read_buffer_in_place(cell, buf); 1549 1550 if (nvmem->cell_post_process) { 1551 rc = nvmem->cell_post_process(nvmem->priv, id, index, 1552 cell->offset, buf, cell->bytes); 1553 if (rc) 1554 return rc; 1555 } 1556 1557 if (len) 1558 *len = cell->bytes; 1559 1560 return 0; 1561 } 1562 1563 /** 1564 * nvmem_cell_read() - Read a given nvmem cell 1565 * 1566 * @cell: nvmem cell to be read. 1567 * @len: pointer to length of cell which will be populated on successful read; 1568 * can be NULL. 1569 * 1570 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1571 * buffer should be freed by the consumer with a kfree(). 1572 */ 1573 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1574 { 1575 struct nvmem_device *nvmem = cell->entry->nvmem; 1576 u8 *buf; 1577 int rc; 1578 1579 if (!nvmem) 1580 return ERR_PTR(-EINVAL); 1581 1582 buf = kzalloc(cell->entry->bytes, GFP_KERNEL); 1583 if (!buf) 1584 return ERR_PTR(-ENOMEM); 1585 1586 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index); 1587 if (rc) { 1588 kfree(buf); 1589 return ERR_PTR(rc); 1590 } 1591 1592 return buf; 1593 } 1594 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1595 1596 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1597 u8 *_buf, int len) 1598 { 1599 struct nvmem_device *nvmem = cell->nvmem; 1600 int i, rc, nbits, bit_offset = cell->bit_offset; 1601 u8 v, *p, *buf, *b, pbyte, pbits; 1602 1603 nbits = cell->nbits; 1604 buf = kzalloc(cell->bytes, GFP_KERNEL); 1605 if (!buf) 1606 return ERR_PTR(-ENOMEM); 1607 1608 memcpy(buf, _buf, len); 1609 p = b = buf; 1610 1611 if (bit_offset) { 1612 pbyte = *b; 1613 *b <<= bit_offset; 1614 1615 /* setup the first byte with lsb bits from nvmem */ 1616 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1617 if (rc) 1618 goto err; 1619 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1620 1621 /* setup rest of the byte if any */ 1622 for (i = 1; i < cell->bytes; i++) { 1623 /* Get last byte bits and shift them towards lsb */ 1624 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1625 pbyte = *b; 1626 p = b; 1627 *b <<= bit_offset; 1628 *b++ |= pbits; 1629 } 1630 } 1631 1632 /* if it's not end on byte boundary */ 1633 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1634 /* setup the last byte with msb bits from nvmem */ 1635 rc = nvmem_reg_read(nvmem, 1636 cell->offset + cell->bytes - 1, &v, 1); 1637 if (rc) 1638 goto err; 1639 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1640 1641 } 1642 1643 return buf; 1644 err: 1645 kfree(buf); 1646 return ERR_PTR(rc); 1647 } 1648 1649 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1650 { 1651 struct nvmem_device *nvmem = cell->nvmem; 1652 int rc; 1653 1654 if (!nvmem || nvmem->read_only || 1655 (cell->bit_offset == 0 && len != cell->bytes)) 1656 return -EINVAL; 1657 1658 if (cell->bit_offset || cell->nbits) { 1659 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1660 if (IS_ERR(buf)) 1661 return PTR_ERR(buf); 1662 } 1663 1664 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1665 1666 /* free the tmp buffer */ 1667 if (cell->bit_offset || cell->nbits) 1668 kfree(buf); 1669 1670 if (rc) 1671 return rc; 1672 1673 return len; 1674 } 1675 1676 /** 1677 * nvmem_cell_write() - Write to a given nvmem cell 1678 * 1679 * @cell: nvmem cell to be written. 1680 * @buf: Buffer to be written. 1681 * @len: length of buffer to be written to nvmem cell. 1682 * 1683 * Return: length of bytes written or negative on failure. 1684 */ 1685 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1686 { 1687 return __nvmem_cell_entry_write(cell->entry, buf, len); 1688 } 1689 1690 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1691 1692 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1693 void *val, size_t count) 1694 { 1695 struct nvmem_cell *cell; 1696 void *buf; 1697 size_t len; 1698 1699 cell = nvmem_cell_get(dev, cell_id); 1700 if (IS_ERR(cell)) 1701 return PTR_ERR(cell); 1702 1703 buf = nvmem_cell_read(cell, &len); 1704 if (IS_ERR(buf)) { 1705 nvmem_cell_put(cell); 1706 return PTR_ERR(buf); 1707 } 1708 if (len != count) { 1709 kfree(buf); 1710 nvmem_cell_put(cell); 1711 return -EINVAL; 1712 } 1713 memcpy(val, buf, count); 1714 kfree(buf); 1715 nvmem_cell_put(cell); 1716 1717 return 0; 1718 } 1719 1720 /** 1721 * nvmem_cell_read_u8() - Read a cell value as a u8 1722 * 1723 * @dev: Device that requests the nvmem cell. 1724 * @cell_id: Name of nvmem cell to read. 1725 * @val: pointer to output value. 1726 * 1727 * Return: 0 on success or negative errno. 1728 */ 1729 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1730 { 1731 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1732 } 1733 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1734 1735 /** 1736 * nvmem_cell_read_u16() - Read a cell value as a u16 1737 * 1738 * @dev: Device that requests the nvmem cell. 1739 * @cell_id: Name of nvmem cell to read. 1740 * @val: pointer to output value. 1741 * 1742 * Return: 0 on success or negative errno. 1743 */ 1744 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1745 { 1746 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1747 } 1748 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1749 1750 /** 1751 * nvmem_cell_read_u32() - Read a cell value as a u32 1752 * 1753 * @dev: Device that requests the nvmem cell. 1754 * @cell_id: Name of nvmem cell to read. 1755 * @val: pointer to output value. 1756 * 1757 * Return: 0 on success or negative errno. 1758 */ 1759 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1760 { 1761 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1762 } 1763 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1764 1765 /** 1766 * nvmem_cell_read_u64() - Read a cell value as a u64 1767 * 1768 * @dev: Device that requests the nvmem cell. 1769 * @cell_id: Name of nvmem cell to read. 1770 * @val: pointer to output value. 1771 * 1772 * Return: 0 on success or negative errno. 1773 */ 1774 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1775 { 1776 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1777 } 1778 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1779 1780 static const void *nvmem_cell_read_variable_common(struct device *dev, 1781 const char *cell_id, 1782 size_t max_len, size_t *len) 1783 { 1784 struct nvmem_cell *cell; 1785 int nbits; 1786 void *buf; 1787 1788 cell = nvmem_cell_get(dev, cell_id); 1789 if (IS_ERR(cell)) 1790 return cell; 1791 1792 nbits = cell->entry->nbits; 1793 buf = nvmem_cell_read(cell, len); 1794 nvmem_cell_put(cell); 1795 if (IS_ERR(buf)) 1796 return buf; 1797 1798 /* 1799 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1800 * the length of the real data. Throw away the extra junk. 1801 */ 1802 if (nbits) 1803 *len = DIV_ROUND_UP(nbits, 8); 1804 1805 if (*len > max_len) { 1806 kfree(buf); 1807 return ERR_PTR(-ERANGE); 1808 } 1809 1810 return buf; 1811 } 1812 1813 /** 1814 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1815 * 1816 * @dev: Device that requests the nvmem cell. 1817 * @cell_id: Name of nvmem cell to read. 1818 * @val: pointer to output value. 1819 * 1820 * Return: 0 on success or negative errno. 1821 */ 1822 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1823 u32 *val) 1824 { 1825 size_t len; 1826 const u8 *buf; 1827 int i; 1828 1829 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1830 if (IS_ERR(buf)) 1831 return PTR_ERR(buf); 1832 1833 /* Copy w/ implicit endian conversion */ 1834 *val = 0; 1835 for (i = 0; i < len; i++) 1836 *val |= buf[i] << (8 * i); 1837 1838 kfree(buf); 1839 1840 return 0; 1841 } 1842 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1843 1844 /** 1845 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1846 * 1847 * @dev: Device that requests the nvmem cell. 1848 * @cell_id: Name of nvmem cell to read. 1849 * @val: pointer to output value. 1850 * 1851 * Return: 0 on success or negative errno. 1852 */ 1853 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1854 u64 *val) 1855 { 1856 size_t len; 1857 const u8 *buf; 1858 int i; 1859 1860 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1861 if (IS_ERR(buf)) 1862 return PTR_ERR(buf); 1863 1864 /* Copy w/ implicit endian conversion */ 1865 *val = 0; 1866 for (i = 0; i < len; i++) 1867 *val |= (uint64_t)buf[i] << (8 * i); 1868 1869 kfree(buf); 1870 1871 return 0; 1872 } 1873 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 1874 1875 /** 1876 * nvmem_device_cell_read() - Read a given nvmem device and cell 1877 * 1878 * @nvmem: nvmem device to read from. 1879 * @info: nvmem cell info to be read. 1880 * @buf: buffer pointer which will be populated on successful read. 1881 * 1882 * Return: length of successful bytes read on success and negative 1883 * error code on error. 1884 */ 1885 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1886 struct nvmem_cell_info *info, void *buf) 1887 { 1888 struct nvmem_cell_entry cell; 1889 int rc; 1890 ssize_t len; 1891 1892 if (!nvmem) 1893 return -EINVAL; 1894 1895 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 1896 if (rc) 1897 return rc; 1898 1899 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0); 1900 if (rc) 1901 return rc; 1902 1903 return len; 1904 } 1905 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1906 1907 /** 1908 * nvmem_device_cell_write() - Write cell to a given nvmem device 1909 * 1910 * @nvmem: nvmem device to be written to. 1911 * @info: nvmem cell info to be written. 1912 * @buf: buffer to be written to cell. 1913 * 1914 * Return: length of bytes written or negative error code on failure. 1915 */ 1916 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1917 struct nvmem_cell_info *info, void *buf) 1918 { 1919 struct nvmem_cell_entry cell; 1920 int rc; 1921 1922 if (!nvmem) 1923 return -EINVAL; 1924 1925 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 1926 if (rc) 1927 return rc; 1928 1929 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 1930 } 1931 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1932 1933 /** 1934 * nvmem_device_read() - Read from a given nvmem device 1935 * 1936 * @nvmem: nvmem device to read from. 1937 * @offset: offset in nvmem device. 1938 * @bytes: number of bytes to read. 1939 * @buf: buffer pointer which will be populated on successful read. 1940 * 1941 * Return: length of successful bytes read on success and negative 1942 * error code on error. 1943 */ 1944 int nvmem_device_read(struct nvmem_device *nvmem, 1945 unsigned int offset, 1946 size_t bytes, void *buf) 1947 { 1948 int rc; 1949 1950 if (!nvmem) 1951 return -EINVAL; 1952 1953 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1954 1955 if (rc) 1956 return rc; 1957 1958 return bytes; 1959 } 1960 EXPORT_SYMBOL_GPL(nvmem_device_read); 1961 1962 /** 1963 * nvmem_device_write() - Write cell to a given nvmem device 1964 * 1965 * @nvmem: nvmem device to be written to. 1966 * @offset: offset in nvmem device. 1967 * @bytes: number of bytes to write. 1968 * @buf: buffer to be written. 1969 * 1970 * Return: length of bytes written or negative error code on failure. 1971 */ 1972 int nvmem_device_write(struct nvmem_device *nvmem, 1973 unsigned int offset, 1974 size_t bytes, void *buf) 1975 { 1976 int rc; 1977 1978 if (!nvmem) 1979 return -EINVAL; 1980 1981 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1982 1983 if (rc) 1984 return rc; 1985 1986 1987 return bytes; 1988 } 1989 EXPORT_SYMBOL_GPL(nvmem_device_write); 1990 1991 /** 1992 * nvmem_add_cell_table() - register a table of cell info entries 1993 * 1994 * @table: table of cell info entries 1995 */ 1996 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1997 { 1998 mutex_lock(&nvmem_cell_mutex); 1999 list_add_tail(&table->node, &nvmem_cell_tables); 2000 mutex_unlock(&nvmem_cell_mutex); 2001 } 2002 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 2003 2004 /** 2005 * nvmem_del_cell_table() - remove a previously registered cell info table 2006 * 2007 * @table: table of cell info entries 2008 */ 2009 void nvmem_del_cell_table(struct nvmem_cell_table *table) 2010 { 2011 mutex_lock(&nvmem_cell_mutex); 2012 list_del(&table->node); 2013 mutex_unlock(&nvmem_cell_mutex); 2014 } 2015 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 2016 2017 /** 2018 * nvmem_add_cell_lookups() - register a list of cell lookup entries 2019 * 2020 * @entries: array of cell lookup entries 2021 * @nentries: number of cell lookup entries in the array 2022 */ 2023 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2024 { 2025 int i; 2026 2027 mutex_lock(&nvmem_lookup_mutex); 2028 for (i = 0; i < nentries; i++) 2029 list_add_tail(&entries[i].node, &nvmem_lookup_list); 2030 mutex_unlock(&nvmem_lookup_mutex); 2031 } 2032 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 2033 2034 /** 2035 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 2036 * entries 2037 * 2038 * @entries: array of cell lookup entries 2039 * @nentries: number of cell lookup entries in the array 2040 */ 2041 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2042 { 2043 int i; 2044 2045 mutex_lock(&nvmem_lookup_mutex); 2046 for (i = 0; i < nentries; i++) 2047 list_del(&entries[i].node); 2048 mutex_unlock(&nvmem_lookup_mutex); 2049 } 2050 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 2051 2052 /** 2053 * nvmem_dev_name() - Get the name of a given nvmem device. 2054 * 2055 * @nvmem: nvmem device. 2056 * 2057 * Return: name of the nvmem device. 2058 */ 2059 const char *nvmem_dev_name(struct nvmem_device *nvmem) 2060 { 2061 return dev_name(&nvmem->dev); 2062 } 2063 EXPORT_SYMBOL_GPL(nvmem_dev_name); 2064 2065 static int __init nvmem_init(void) 2066 { 2067 return bus_register(&nvmem_bus_type); 2068 } 2069 2070 static void __exit nvmem_exit(void) 2071 { 2072 bus_unregister(&nvmem_bus_type); 2073 } 2074 2075 subsys_initcall(nvmem_init); 2076 module_exit(nvmem_exit); 2077 2078 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 2079 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 2080 MODULE_DESCRIPTION("nvmem Driver Core"); 2081 MODULE_LICENSE("GPL v2"); 2082