1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 const struct nvmem_keepout *keepout; 38 unsigned int nkeepout; 39 nvmem_reg_read_t reg_read; 40 nvmem_reg_write_t reg_write; 41 struct gpio_desc *wp_gpio; 42 struct nvmem_layout *layout; 43 void *priv; 44 }; 45 46 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 47 48 #define FLAG_COMPAT BIT(0) 49 struct nvmem_cell_entry { 50 const char *name; 51 int offset; 52 size_t raw_len; 53 int bytes; 54 int bit_offset; 55 int nbits; 56 nvmem_cell_post_process_t read_post_process; 57 void *priv; 58 struct device_node *np; 59 struct nvmem_device *nvmem; 60 struct list_head node; 61 }; 62 63 struct nvmem_cell { 64 struct nvmem_cell_entry *entry; 65 const char *id; 66 int index; 67 }; 68 69 static DEFINE_MUTEX(nvmem_mutex); 70 static DEFINE_IDA(nvmem_ida); 71 72 static DEFINE_MUTEX(nvmem_cell_mutex); 73 static LIST_HEAD(nvmem_cell_tables); 74 75 static DEFINE_MUTEX(nvmem_lookup_mutex); 76 static LIST_HEAD(nvmem_lookup_list); 77 78 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 79 80 static DEFINE_SPINLOCK(nvmem_layout_lock); 81 static LIST_HEAD(nvmem_layouts); 82 83 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 84 void *val, size_t bytes) 85 { 86 if (nvmem->reg_read) 87 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 88 89 return -EINVAL; 90 } 91 92 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 93 void *val, size_t bytes) 94 { 95 int ret; 96 97 if (nvmem->reg_write) { 98 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 99 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 100 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 101 return ret; 102 } 103 104 return -EINVAL; 105 } 106 107 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 108 unsigned int offset, void *val, 109 size_t bytes, int write) 110 { 111 112 unsigned int end = offset + bytes; 113 unsigned int kend, ksize; 114 const struct nvmem_keepout *keepout = nvmem->keepout; 115 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 116 int rc; 117 118 /* 119 * Skip all keepouts before the range being accessed. 120 * Keepouts are sorted. 121 */ 122 while ((keepout < keepoutend) && (keepout->end <= offset)) 123 keepout++; 124 125 while ((offset < end) && (keepout < keepoutend)) { 126 /* Access the valid portion before the keepout. */ 127 if (offset < keepout->start) { 128 kend = min(end, keepout->start); 129 ksize = kend - offset; 130 if (write) 131 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 132 else 133 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 134 135 if (rc) 136 return rc; 137 138 offset += ksize; 139 val += ksize; 140 } 141 142 /* 143 * Now we're aligned to the start of this keepout zone. Go 144 * through it. 145 */ 146 kend = min(end, keepout->end); 147 ksize = kend - offset; 148 if (!write) 149 memset(val, keepout->value, ksize); 150 151 val += ksize; 152 offset += ksize; 153 keepout++; 154 } 155 156 /* 157 * If we ran out of keepouts but there's still stuff to do, send it 158 * down directly 159 */ 160 if (offset < end) { 161 ksize = end - offset; 162 if (write) 163 return __nvmem_reg_write(nvmem, offset, val, ksize); 164 else 165 return __nvmem_reg_read(nvmem, offset, val, ksize); 166 } 167 168 return 0; 169 } 170 171 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 172 void *val, size_t bytes) 173 { 174 if (!nvmem->nkeepout) 175 return __nvmem_reg_read(nvmem, offset, val, bytes); 176 177 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 178 } 179 180 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 181 void *val, size_t bytes) 182 { 183 if (!nvmem->nkeepout) 184 return __nvmem_reg_write(nvmem, offset, val, bytes); 185 186 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 187 } 188 189 #ifdef CONFIG_NVMEM_SYSFS 190 static const char * const nvmem_type_str[] = { 191 [NVMEM_TYPE_UNKNOWN] = "Unknown", 192 [NVMEM_TYPE_EEPROM] = "EEPROM", 193 [NVMEM_TYPE_OTP] = "OTP", 194 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 195 [NVMEM_TYPE_FRAM] = "FRAM", 196 }; 197 198 #ifdef CONFIG_DEBUG_LOCK_ALLOC 199 static struct lock_class_key eeprom_lock_key; 200 #endif 201 202 static ssize_t type_show(struct device *dev, 203 struct device_attribute *attr, char *buf) 204 { 205 struct nvmem_device *nvmem = to_nvmem_device(dev); 206 207 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 208 } 209 210 static DEVICE_ATTR_RO(type); 211 212 static struct attribute *nvmem_attrs[] = { 213 &dev_attr_type.attr, 214 NULL, 215 }; 216 217 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 218 struct bin_attribute *attr, char *buf, 219 loff_t pos, size_t count) 220 { 221 struct device *dev; 222 struct nvmem_device *nvmem; 223 int rc; 224 225 if (attr->private) 226 dev = attr->private; 227 else 228 dev = kobj_to_dev(kobj); 229 nvmem = to_nvmem_device(dev); 230 231 /* Stop the user from reading */ 232 if (pos >= nvmem->size) 233 return 0; 234 235 if (!IS_ALIGNED(pos, nvmem->stride)) 236 return -EINVAL; 237 238 if (count < nvmem->word_size) 239 return -EINVAL; 240 241 if (pos + count > nvmem->size) 242 count = nvmem->size - pos; 243 244 count = round_down(count, nvmem->word_size); 245 246 if (!nvmem->reg_read) 247 return -EPERM; 248 249 rc = nvmem_reg_read(nvmem, pos, buf, count); 250 251 if (rc) 252 return rc; 253 254 return count; 255 } 256 257 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 258 struct bin_attribute *attr, char *buf, 259 loff_t pos, size_t count) 260 { 261 struct device *dev; 262 struct nvmem_device *nvmem; 263 int rc; 264 265 if (attr->private) 266 dev = attr->private; 267 else 268 dev = kobj_to_dev(kobj); 269 nvmem = to_nvmem_device(dev); 270 271 /* Stop the user from writing */ 272 if (pos >= nvmem->size) 273 return -EFBIG; 274 275 if (!IS_ALIGNED(pos, nvmem->stride)) 276 return -EINVAL; 277 278 if (count < nvmem->word_size) 279 return -EINVAL; 280 281 if (pos + count > nvmem->size) 282 count = nvmem->size - pos; 283 284 count = round_down(count, nvmem->word_size); 285 286 if (!nvmem->reg_write) 287 return -EPERM; 288 289 rc = nvmem_reg_write(nvmem, pos, buf, count); 290 291 if (rc) 292 return rc; 293 294 return count; 295 } 296 297 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 298 { 299 umode_t mode = 0400; 300 301 if (!nvmem->root_only) 302 mode |= 0044; 303 304 if (!nvmem->read_only) 305 mode |= 0200; 306 307 if (!nvmem->reg_write) 308 mode &= ~0200; 309 310 if (!nvmem->reg_read) 311 mode &= ~0444; 312 313 return mode; 314 } 315 316 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 317 struct bin_attribute *attr, int i) 318 { 319 struct device *dev = kobj_to_dev(kobj); 320 struct nvmem_device *nvmem = to_nvmem_device(dev); 321 322 attr->size = nvmem->size; 323 324 return nvmem_bin_attr_get_umode(nvmem); 325 } 326 327 /* default read/write permissions */ 328 static struct bin_attribute bin_attr_rw_nvmem = { 329 .attr = { 330 .name = "nvmem", 331 .mode = 0644, 332 }, 333 .read = bin_attr_nvmem_read, 334 .write = bin_attr_nvmem_write, 335 }; 336 337 static struct bin_attribute *nvmem_bin_attributes[] = { 338 &bin_attr_rw_nvmem, 339 NULL, 340 }; 341 342 static const struct attribute_group nvmem_bin_group = { 343 .bin_attrs = nvmem_bin_attributes, 344 .attrs = nvmem_attrs, 345 .is_bin_visible = nvmem_bin_attr_is_visible, 346 }; 347 348 static const struct attribute_group *nvmem_dev_groups[] = { 349 &nvmem_bin_group, 350 NULL, 351 }; 352 353 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 354 .attr = { 355 .name = "eeprom", 356 }, 357 .read = bin_attr_nvmem_read, 358 .write = bin_attr_nvmem_write, 359 }; 360 361 /* 362 * nvmem_setup_compat() - Create an additional binary entry in 363 * drivers sys directory, to be backwards compatible with the older 364 * drivers/misc/eeprom drivers. 365 */ 366 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 367 const struct nvmem_config *config) 368 { 369 int rval; 370 371 if (!config->compat) 372 return 0; 373 374 if (!config->base_dev) 375 return -EINVAL; 376 377 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 378 if (config->type == NVMEM_TYPE_FRAM) 379 nvmem->eeprom.attr.name = "fram"; 380 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 381 nvmem->eeprom.size = nvmem->size; 382 #ifdef CONFIG_DEBUG_LOCK_ALLOC 383 nvmem->eeprom.attr.key = &eeprom_lock_key; 384 #endif 385 nvmem->eeprom.private = &nvmem->dev; 386 nvmem->base_dev = config->base_dev; 387 388 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 389 if (rval) { 390 dev_err(&nvmem->dev, 391 "Failed to create eeprom binary file %d\n", rval); 392 return rval; 393 } 394 395 nvmem->flags |= FLAG_COMPAT; 396 397 return 0; 398 } 399 400 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 401 const struct nvmem_config *config) 402 { 403 if (config->compat) 404 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 405 } 406 407 #else /* CONFIG_NVMEM_SYSFS */ 408 409 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 410 const struct nvmem_config *config) 411 { 412 return -ENOSYS; 413 } 414 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 415 const struct nvmem_config *config) 416 { 417 } 418 419 #endif /* CONFIG_NVMEM_SYSFS */ 420 421 static void nvmem_release(struct device *dev) 422 { 423 struct nvmem_device *nvmem = to_nvmem_device(dev); 424 425 ida_free(&nvmem_ida, nvmem->id); 426 gpiod_put(nvmem->wp_gpio); 427 kfree(nvmem); 428 } 429 430 static const struct device_type nvmem_provider_type = { 431 .release = nvmem_release, 432 }; 433 434 static struct bus_type nvmem_bus_type = { 435 .name = "nvmem", 436 }; 437 438 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 439 { 440 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 441 mutex_lock(&nvmem_mutex); 442 list_del(&cell->node); 443 mutex_unlock(&nvmem_mutex); 444 of_node_put(cell->np); 445 kfree_const(cell->name); 446 kfree(cell); 447 } 448 449 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 450 { 451 struct nvmem_cell_entry *cell, *p; 452 453 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 454 nvmem_cell_entry_drop(cell); 455 } 456 457 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 458 { 459 mutex_lock(&nvmem_mutex); 460 list_add_tail(&cell->node, &cell->nvmem->cells); 461 mutex_unlock(&nvmem_mutex); 462 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 463 } 464 465 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 466 const struct nvmem_cell_info *info, 467 struct nvmem_cell_entry *cell) 468 { 469 cell->nvmem = nvmem; 470 cell->offset = info->offset; 471 cell->raw_len = info->raw_len ?: info->bytes; 472 cell->bytes = info->bytes; 473 cell->name = info->name; 474 cell->read_post_process = info->read_post_process; 475 cell->priv = info->priv; 476 477 cell->bit_offset = info->bit_offset; 478 cell->nbits = info->nbits; 479 cell->np = info->np; 480 481 if (cell->nbits) 482 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 483 BITS_PER_BYTE); 484 485 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 486 dev_err(&nvmem->dev, 487 "cell %s unaligned to nvmem stride %d\n", 488 cell->name ?: "<unknown>", nvmem->stride); 489 return -EINVAL; 490 } 491 492 return 0; 493 } 494 495 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 496 const struct nvmem_cell_info *info, 497 struct nvmem_cell_entry *cell) 498 { 499 int err; 500 501 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 502 if (err) 503 return err; 504 505 cell->name = kstrdup_const(info->name, GFP_KERNEL); 506 if (!cell->name) 507 return -ENOMEM; 508 509 return 0; 510 } 511 512 /** 513 * nvmem_add_one_cell() - Add one cell information to an nvmem device 514 * 515 * @nvmem: nvmem device to add cells to. 516 * @info: nvmem cell info to add to the device 517 * 518 * Return: 0 or negative error code on failure. 519 */ 520 int nvmem_add_one_cell(struct nvmem_device *nvmem, 521 const struct nvmem_cell_info *info) 522 { 523 struct nvmem_cell_entry *cell; 524 int rval; 525 526 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 527 if (!cell) 528 return -ENOMEM; 529 530 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 531 if (rval) { 532 kfree(cell); 533 return rval; 534 } 535 536 nvmem_cell_entry_add(cell); 537 538 return 0; 539 } 540 EXPORT_SYMBOL_GPL(nvmem_add_one_cell); 541 542 /** 543 * nvmem_add_cells() - Add cell information to an nvmem device 544 * 545 * @nvmem: nvmem device to add cells to. 546 * @info: nvmem cell info to add to the device 547 * @ncells: number of cells in info 548 * 549 * Return: 0 or negative error code on failure. 550 */ 551 static int nvmem_add_cells(struct nvmem_device *nvmem, 552 const struct nvmem_cell_info *info, 553 int ncells) 554 { 555 int i, rval; 556 557 for (i = 0; i < ncells; i++) { 558 rval = nvmem_add_one_cell(nvmem, &info[i]); 559 if (rval) 560 return rval; 561 } 562 563 return 0; 564 } 565 566 /** 567 * nvmem_register_notifier() - Register a notifier block for nvmem events. 568 * 569 * @nb: notifier block to be called on nvmem events. 570 * 571 * Return: 0 on success, negative error number on failure. 572 */ 573 int nvmem_register_notifier(struct notifier_block *nb) 574 { 575 return blocking_notifier_chain_register(&nvmem_notifier, nb); 576 } 577 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 578 579 /** 580 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 581 * 582 * @nb: notifier block to be unregistered. 583 * 584 * Return: 0 on success, negative error number on failure. 585 */ 586 int nvmem_unregister_notifier(struct notifier_block *nb) 587 { 588 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 589 } 590 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 591 592 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 593 { 594 const struct nvmem_cell_info *info; 595 struct nvmem_cell_table *table; 596 struct nvmem_cell_entry *cell; 597 int rval = 0, i; 598 599 mutex_lock(&nvmem_cell_mutex); 600 list_for_each_entry(table, &nvmem_cell_tables, node) { 601 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 602 for (i = 0; i < table->ncells; i++) { 603 info = &table->cells[i]; 604 605 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 606 if (!cell) { 607 rval = -ENOMEM; 608 goto out; 609 } 610 611 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 612 if (rval) { 613 kfree(cell); 614 goto out; 615 } 616 617 nvmem_cell_entry_add(cell); 618 } 619 } 620 } 621 622 out: 623 mutex_unlock(&nvmem_cell_mutex); 624 return rval; 625 } 626 627 static struct nvmem_cell_entry * 628 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 629 { 630 struct nvmem_cell_entry *iter, *cell = NULL; 631 632 mutex_lock(&nvmem_mutex); 633 list_for_each_entry(iter, &nvmem->cells, node) { 634 if (strcmp(cell_id, iter->name) == 0) { 635 cell = iter; 636 break; 637 } 638 } 639 mutex_unlock(&nvmem_mutex); 640 641 return cell; 642 } 643 644 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 645 { 646 unsigned int cur = 0; 647 const struct nvmem_keepout *keepout = nvmem->keepout; 648 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 649 650 while (keepout < keepoutend) { 651 /* Ensure keepouts are sorted and don't overlap. */ 652 if (keepout->start < cur) { 653 dev_err(&nvmem->dev, 654 "Keepout regions aren't sorted or overlap.\n"); 655 656 return -ERANGE; 657 } 658 659 if (keepout->end < keepout->start) { 660 dev_err(&nvmem->dev, 661 "Invalid keepout region.\n"); 662 663 return -EINVAL; 664 } 665 666 /* 667 * Validate keepouts (and holes between) don't violate 668 * word_size constraints. 669 */ 670 if ((keepout->end - keepout->start < nvmem->word_size) || 671 ((keepout->start != cur) && 672 (keepout->start - cur < nvmem->word_size))) { 673 674 dev_err(&nvmem->dev, 675 "Keepout regions violate word_size constraints.\n"); 676 677 return -ERANGE; 678 } 679 680 /* Validate keepouts don't violate stride (alignment). */ 681 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 682 !IS_ALIGNED(keepout->end, nvmem->stride)) { 683 684 dev_err(&nvmem->dev, 685 "Keepout regions violate stride.\n"); 686 687 return -EINVAL; 688 } 689 690 cur = keepout->end; 691 keepout++; 692 } 693 694 return 0; 695 } 696 697 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) 698 { 699 struct nvmem_layout *layout = nvmem->layout; 700 struct device *dev = &nvmem->dev; 701 struct device_node *child; 702 const __be32 *addr; 703 int len, ret; 704 705 for_each_child_of_node(np, child) { 706 struct nvmem_cell_info info = {0}; 707 708 addr = of_get_property(child, "reg", &len); 709 if (!addr) 710 continue; 711 if (len < 2 * sizeof(u32)) { 712 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 713 of_node_put(child); 714 return -EINVAL; 715 } 716 717 info.offset = be32_to_cpup(addr++); 718 info.bytes = be32_to_cpup(addr); 719 info.name = kasprintf(GFP_KERNEL, "%pOFn", child); 720 721 addr = of_get_property(child, "bits", &len); 722 if (addr && len == (2 * sizeof(u32))) { 723 info.bit_offset = be32_to_cpup(addr++); 724 info.nbits = be32_to_cpup(addr); 725 } 726 727 info.np = of_node_get(child); 728 729 if (layout && layout->fixup_cell_info) 730 layout->fixup_cell_info(nvmem, layout, &info); 731 732 ret = nvmem_add_one_cell(nvmem, &info); 733 kfree(info.name); 734 if (ret) { 735 of_node_put(child); 736 return ret; 737 } 738 } 739 740 return 0; 741 } 742 743 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem) 744 { 745 return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node); 746 } 747 748 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) 749 { 750 struct device_node *layout_np; 751 int err = 0; 752 753 layout_np = of_nvmem_layout_get_container(nvmem); 754 if (!layout_np) 755 return 0; 756 757 if (of_device_is_compatible(layout_np, "fixed-layout")) 758 err = nvmem_add_cells_from_dt(nvmem, layout_np); 759 760 of_node_put(layout_np); 761 762 return err; 763 } 764 765 int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner) 766 { 767 layout->owner = owner; 768 769 spin_lock(&nvmem_layout_lock); 770 list_add(&layout->node, &nvmem_layouts); 771 spin_unlock(&nvmem_layout_lock); 772 773 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout); 774 775 return 0; 776 } 777 EXPORT_SYMBOL_GPL(__nvmem_layout_register); 778 779 void nvmem_layout_unregister(struct nvmem_layout *layout) 780 { 781 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout); 782 783 spin_lock(&nvmem_layout_lock); 784 list_del(&layout->node); 785 spin_unlock(&nvmem_layout_lock); 786 } 787 EXPORT_SYMBOL_GPL(nvmem_layout_unregister); 788 789 static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem) 790 { 791 struct device_node *layout_np; 792 struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER); 793 794 layout_np = of_nvmem_layout_get_container(nvmem); 795 if (!layout_np) 796 return NULL; 797 798 /* Fixed layouts don't have a matching driver */ 799 if (of_device_is_compatible(layout_np, "fixed-layout")) { 800 of_node_put(layout_np); 801 return NULL; 802 } 803 804 /* 805 * In case the nvmem device was built-in while the layout was built as a 806 * module, we shall manually request the layout driver loading otherwise 807 * we'll never have any match. 808 */ 809 of_request_module(layout_np); 810 811 spin_lock(&nvmem_layout_lock); 812 813 list_for_each_entry(l, &nvmem_layouts, node) { 814 if (of_match_node(l->of_match_table, layout_np)) { 815 if (try_module_get(l->owner)) 816 layout = l; 817 818 break; 819 } 820 } 821 822 spin_unlock(&nvmem_layout_lock); 823 of_node_put(layout_np); 824 825 return layout; 826 } 827 828 static void nvmem_layout_put(struct nvmem_layout *layout) 829 { 830 if (layout) 831 module_put(layout->owner); 832 } 833 834 static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem) 835 { 836 struct nvmem_layout *layout = nvmem->layout; 837 int ret; 838 839 if (layout && layout->add_cells) { 840 ret = layout->add_cells(&nvmem->dev, nvmem, layout); 841 if (ret) 842 return ret; 843 } 844 845 return 0; 846 } 847 848 #if IS_ENABLED(CONFIG_OF) 849 /** 850 * of_nvmem_layout_get_container() - Get OF node to layout container. 851 * 852 * @nvmem: nvmem device. 853 * 854 * Return: a node pointer with refcount incremented or NULL if no 855 * container exists. Use of_node_put() on it when done. 856 */ 857 struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem) 858 { 859 return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout"); 860 } 861 EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container); 862 #endif 863 864 const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem, 865 struct nvmem_layout *layout) 866 { 867 struct device_node __maybe_unused *layout_np; 868 const struct of_device_id *match; 869 870 layout_np = of_nvmem_layout_get_container(nvmem); 871 match = of_match_node(layout->of_match_table, layout_np); 872 873 return match ? match->data : NULL; 874 } 875 EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data); 876 877 /** 878 * nvmem_register() - Register a nvmem device for given nvmem_config. 879 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 880 * 881 * @config: nvmem device configuration with which nvmem device is created. 882 * 883 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 884 * on success. 885 */ 886 887 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 888 { 889 struct nvmem_device *nvmem; 890 int rval; 891 892 if (!config->dev) 893 return ERR_PTR(-EINVAL); 894 895 if (!config->reg_read && !config->reg_write) 896 return ERR_PTR(-EINVAL); 897 898 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 899 if (!nvmem) 900 return ERR_PTR(-ENOMEM); 901 902 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 903 if (rval < 0) { 904 kfree(nvmem); 905 return ERR_PTR(rval); 906 } 907 908 nvmem->id = rval; 909 910 nvmem->dev.type = &nvmem_provider_type; 911 nvmem->dev.bus = &nvmem_bus_type; 912 nvmem->dev.parent = config->dev; 913 914 device_initialize(&nvmem->dev); 915 916 if (!config->ignore_wp) 917 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 918 GPIOD_OUT_HIGH); 919 if (IS_ERR(nvmem->wp_gpio)) { 920 rval = PTR_ERR(nvmem->wp_gpio); 921 nvmem->wp_gpio = NULL; 922 goto err_put_device; 923 } 924 925 kref_init(&nvmem->refcnt); 926 INIT_LIST_HEAD(&nvmem->cells); 927 928 nvmem->owner = config->owner; 929 if (!nvmem->owner && config->dev->driver) 930 nvmem->owner = config->dev->driver->owner; 931 nvmem->stride = config->stride ?: 1; 932 nvmem->word_size = config->word_size ?: 1; 933 nvmem->size = config->size; 934 nvmem->root_only = config->root_only; 935 nvmem->priv = config->priv; 936 nvmem->type = config->type; 937 nvmem->reg_read = config->reg_read; 938 nvmem->reg_write = config->reg_write; 939 nvmem->keepout = config->keepout; 940 nvmem->nkeepout = config->nkeepout; 941 if (config->of_node) 942 nvmem->dev.of_node = config->of_node; 943 else if (!config->no_of_node) 944 nvmem->dev.of_node = config->dev->of_node; 945 946 switch (config->id) { 947 case NVMEM_DEVID_NONE: 948 rval = dev_set_name(&nvmem->dev, "%s", config->name); 949 break; 950 case NVMEM_DEVID_AUTO: 951 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 952 break; 953 default: 954 rval = dev_set_name(&nvmem->dev, "%s%d", 955 config->name ? : "nvmem", 956 config->name ? config->id : nvmem->id); 957 break; 958 } 959 960 if (rval) 961 goto err_put_device; 962 963 nvmem->read_only = device_property_present(config->dev, "read-only") || 964 config->read_only || !nvmem->reg_write; 965 966 #ifdef CONFIG_NVMEM_SYSFS 967 nvmem->dev.groups = nvmem_dev_groups; 968 #endif 969 970 if (nvmem->nkeepout) { 971 rval = nvmem_validate_keepouts(nvmem); 972 if (rval) 973 goto err_put_device; 974 } 975 976 if (config->compat) { 977 rval = nvmem_sysfs_setup_compat(nvmem, config); 978 if (rval) 979 goto err_put_device; 980 } 981 982 /* 983 * If the driver supplied a layout by config->layout, the module 984 * pointer will be NULL and nvmem_layout_put() will be a noop. 985 */ 986 nvmem->layout = config->layout ?: nvmem_layout_get(nvmem); 987 if (IS_ERR(nvmem->layout)) { 988 rval = PTR_ERR(nvmem->layout); 989 nvmem->layout = NULL; 990 991 if (rval == -EPROBE_DEFER) 992 goto err_teardown_compat; 993 } 994 995 if (config->cells) { 996 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 997 if (rval) 998 goto err_remove_cells; 999 } 1000 1001 rval = nvmem_add_cells_from_table(nvmem); 1002 if (rval) 1003 goto err_remove_cells; 1004 1005 if (config->add_legacy_fixed_of_cells) { 1006 rval = nvmem_add_cells_from_legacy_of(nvmem); 1007 if (rval) 1008 goto err_remove_cells; 1009 } 1010 1011 rval = nvmem_add_cells_from_fixed_layout(nvmem); 1012 if (rval) 1013 goto err_remove_cells; 1014 1015 rval = nvmem_add_cells_from_layout(nvmem); 1016 if (rval) 1017 goto err_remove_cells; 1018 1019 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 1020 1021 rval = device_add(&nvmem->dev); 1022 if (rval) 1023 goto err_remove_cells; 1024 1025 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 1026 1027 return nvmem; 1028 1029 err_remove_cells: 1030 nvmem_device_remove_all_cells(nvmem); 1031 nvmem_layout_put(nvmem->layout); 1032 err_teardown_compat: 1033 if (config->compat) 1034 nvmem_sysfs_remove_compat(nvmem, config); 1035 err_put_device: 1036 put_device(&nvmem->dev); 1037 1038 return ERR_PTR(rval); 1039 } 1040 EXPORT_SYMBOL_GPL(nvmem_register); 1041 1042 static void nvmem_device_release(struct kref *kref) 1043 { 1044 struct nvmem_device *nvmem; 1045 1046 nvmem = container_of(kref, struct nvmem_device, refcnt); 1047 1048 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 1049 1050 if (nvmem->flags & FLAG_COMPAT) 1051 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 1052 1053 nvmem_device_remove_all_cells(nvmem); 1054 nvmem_layout_put(nvmem->layout); 1055 device_unregister(&nvmem->dev); 1056 } 1057 1058 /** 1059 * nvmem_unregister() - Unregister previously registered nvmem device 1060 * 1061 * @nvmem: Pointer to previously registered nvmem device. 1062 */ 1063 void nvmem_unregister(struct nvmem_device *nvmem) 1064 { 1065 if (nvmem) 1066 kref_put(&nvmem->refcnt, nvmem_device_release); 1067 } 1068 EXPORT_SYMBOL_GPL(nvmem_unregister); 1069 1070 static void devm_nvmem_unregister(void *nvmem) 1071 { 1072 nvmem_unregister(nvmem); 1073 } 1074 1075 /** 1076 * devm_nvmem_register() - Register a managed nvmem device for given 1077 * nvmem_config. 1078 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 1079 * 1080 * @dev: Device that uses the nvmem device. 1081 * @config: nvmem device configuration with which nvmem device is created. 1082 * 1083 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 1084 * on success. 1085 */ 1086 struct nvmem_device *devm_nvmem_register(struct device *dev, 1087 const struct nvmem_config *config) 1088 { 1089 struct nvmem_device *nvmem; 1090 int ret; 1091 1092 nvmem = nvmem_register(config); 1093 if (IS_ERR(nvmem)) 1094 return nvmem; 1095 1096 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); 1097 if (ret) 1098 return ERR_PTR(ret); 1099 1100 return nvmem; 1101 } 1102 EXPORT_SYMBOL_GPL(devm_nvmem_register); 1103 1104 static struct nvmem_device *__nvmem_device_get(void *data, 1105 int (*match)(struct device *dev, const void *data)) 1106 { 1107 struct nvmem_device *nvmem = NULL; 1108 struct device *dev; 1109 1110 mutex_lock(&nvmem_mutex); 1111 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 1112 if (dev) 1113 nvmem = to_nvmem_device(dev); 1114 mutex_unlock(&nvmem_mutex); 1115 if (!nvmem) 1116 return ERR_PTR(-EPROBE_DEFER); 1117 1118 if (!try_module_get(nvmem->owner)) { 1119 dev_err(&nvmem->dev, 1120 "could not increase module refcount for cell %s\n", 1121 nvmem_dev_name(nvmem)); 1122 1123 put_device(&nvmem->dev); 1124 return ERR_PTR(-EINVAL); 1125 } 1126 1127 kref_get(&nvmem->refcnt); 1128 1129 return nvmem; 1130 } 1131 1132 static void __nvmem_device_put(struct nvmem_device *nvmem) 1133 { 1134 put_device(&nvmem->dev); 1135 module_put(nvmem->owner); 1136 kref_put(&nvmem->refcnt, nvmem_device_release); 1137 } 1138 1139 #if IS_ENABLED(CONFIG_OF) 1140 /** 1141 * of_nvmem_device_get() - Get nvmem device from a given id 1142 * 1143 * @np: Device tree node that uses the nvmem device. 1144 * @id: nvmem name from nvmem-names property. 1145 * 1146 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1147 * on success. 1148 */ 1149 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1150 { 1151 1152 struct device_node *nvmem_np; 1153 struct nvmem_device *nvmem; 1154 int index = 0; 1155 1156 if (id) 1157 index = of_property_match_string(np, "nvmem-names", id); 1158 1159 nvmem_np = of_parse_phandle(np, "nvmem", index); 1160 if (!nvmem_np) 1161 return ERR_PTR(-ENOENT); 1162 1163 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1164 of_node_put(nvmem_np); 1165 return nvmem; 1166 } 1167 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1168 #endif 1169 1170 /** 1171 * nvmem_device_get() - Get nvmem device from a given id 1172 * 1173 * @dev: Device that uses the nvmem device. 1174 * @dev_name: name of the requested nvmem device. 1175 * 1176 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1177 * on success. 1178 */ 1179 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1180 { 1181 if (dev->of_node) { /* try dt first */ 1182 struct nvmem_device *nvmem; 1183 1184 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1185 1186 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1187 return nvmem; 1188 1189 } 1190 1191 return __nvmem_device_get((void *)dev_name, device_match_name); 1192 } 1193 EXPORT_SYMBOL_GPL(nvmem_device_get); 1194 1195 /** 1196 * nvmem_device_find() - Find nvmem device with matching function 1197 * 1198 * @data: Data to pass to match function 1199 * @match: Callback function to check device 1200 * 1201 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1202 * on success. 1203 */ 1204 struct nvmem_device *nvmem_device_find(void *data, 1205 int (*match)(struct device *dev, const void *data)) 1206 { 1207 return __nvmem_device_get(data, match); 1208 } 1209 EXPORT_SYMBOL_GPL(nvmem_device_find); 1210 1211 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1212 { 1213 struct nvmem_device **nvmem = res; 1214 1215 if (WARN_ON(!nvmem || !*nvmem)) 1216 return 0; 1217 1218 return *nvmem == data; 1219 } 1220 1221 static void devm_nvmem_device_release(struct device *dev, void *res) 1222 { 1223 nvmem_device_put(*(struct nvmem_device **)res); 1224 } 1225 1226 /** 1227 * devm_nvmem_device_put() - put alredy got nvmem device 1228 * 1229 * @dev: Device that uses the nvmem device. 1230 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1231 * that needs to be released. 1232 */ 1233 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1234 { 1235 int ret; 1236 1237 ret = devres_release(dev, devm_nvmem_device_release, 1238 devm_nvmem_device_match, nvmem); 1239 1240 WARN_ON(ret); 1241 } 1242 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1243 1244 /** 1245 * nvmem_device_put() - put alredy got nvmem device 1246 * 1247 * @nvmem: pointer to nvmem device that needs to be released. 1248 */ 1249 void nvmem_device_put(struct nvmem_device *nvmem) 1250 { 1251 __nvmem_device_put(nvmem); 1252 } 1253 EXPORT_SYMBOL_GPL(nvmem_device_put); 1254 1255 /** 1256 * devm_nvmem_device_get() - Get nvmem device of device form a given id 1257 * 1258 * @dev: Device that requests the nvmem device. 1259 * @id: name id for the requested nvmem device. 1260 * 1261 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1262 * on success. The nvmem_device will be freed by the automatically once the 1263 * device is freed. 1264 */ 1265 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1266 { 1267 struct nvmem_device **ptr, *nvmem; 1268 1269 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1270 if (!ptr) 1271 return ERR_PTR(-ENOMEM); 1272 1273 nvmem = nvmem_device_get(dev, id); 1274 if (!IS_ERR(nvmem)) { 1275 *ptr = nvmem; 1276 devres_add(dev, ptr); 1277 } else { 1278 devres_free(ptr); 1279 } 1280 1281 return nvmem; 1282 } 1283 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1284 1285 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 1286 const char *id, int index) 1287 { 1288 struct nvmem_cell *cell; 1289 const char *name = NULL; 1290 1291 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1292 if (!cell) 1293 return ERR_PTR(-ENOMEM); 1294 1295 if (id) { 1296 name = kstrdup_const(id, GFP_KERNEL); 1297 if (!name) { 1298 kfree(cell); 1299 return ERR_PTR(-ENOMEM); 1300 } 1301 } 1302 1303 cell->id = name; 1304 cell->entry = entry; 1305 cell->index = index; 1306 1307 return cell; 1308 } 1309 1310 static struct nvmem_cell * 1311 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1312 { 1313 struct nvmem_cell_entry *cell_entry; 1314 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1315 struct nvmem_cell_lookup *lookup; 1316 struct nvmem_device *nvmem; 1317 const char *dev_id; 1318 1319 if (!dev) 1320 return ERR_PTR(-EINVAL); 1321 1322 dev_id = dev_name(dev); 1323 1324 mutex_lock(&nvmem_lookup_mutex); 1325 1326 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1327 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1328 (strcmp(lookup->con_id, con_id) == 0)) { 1329 /* This is the right entry. */ 1330 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1331 device_match_name); 1332 if (IS_ERR(nvmem)) { 1333 /* Provider may not be registered yet. */ 1334 cell = ERR_CAST(nvmem); 1335 break; 1336 } 1337 1338 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1339 lookup->cell_name); 1340 if (!cell_entry) { 1341 __nvmem_device_put(nvmem); 1342 cell = ERR_PTR(-ENOENT); 1343 } else { 1344 cell = nvmem_create_cell(cell_entry, con_id, 0); 1345 if (IS_ERR(cell)) 1346 __nvmem_device_put(nvmem); 1347 } 1348 break; 1349 } 1350 } 1351 1352 mutex_unlock(&nvmem_lookup_mutex); 1353 return cell; 1354 } 1355 1356 #if IS_ENABLED(CONFIG_OF) 1357 static struct nvmem_cell_entry * 1358 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1359 { 1360 struct nvmem_cell_entry *iter, *cell = NULL; 1361 1362 mutex_lock(&nvmem_mutex); 1363 list_for_each_entry(iter, &nvmem->cells, node) { 1364 if (np == iter->np) { 1365 cell = iter; 1366 break; 1367 } 1368 } 1369 mutex_unlock(&nvmem_mutex); 1370 1371 return cell; 1372 } 1373 1374 /** 1375 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1376 * 1377 * @np: Device tree node that uses the nvmem cell. 1378 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1379 * for the cell at index 0 (the lone cell with no accompanying 1380 * nvmem-cell-names property). 1381 * 1382 * Return: Will be an ERR_PTR() on error or a valid pointer 1383 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1384 * nvmem_cell_put(). 1385 */ 1386 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1387 { 1388 struct device_node *cell_np, *nvmem_np; 1389 struct nvmem_device *nvmem; 1390 struct nvmem_cell_entry *cell_entry; 1391 struct nvmem_cell *cell; 1392 struct of_phandle_args cell_spec; 1393 int index = 0; 1394 int cell_index = 0; 1395 int ret; 1396 1397 /* if cell name exists, find index to the name */ 1398 if (id) 1399 index = of_property_match_string(np, "nvmem-cell-names", id); 1400 1401 ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", 1402 "#nvmem-cell-cells", 1403 index, &cell_spec); 1404 if (ret) 1405 return ERR_PTR(-ENOENT); 1406 1407 if (cell_spec.args_count > 1) 1408 return ERR_PTR(-EINVAL); 1409 1410 cell_np = cell_spec.np; 1411 if (cell_spec.args_count) 1412 cell_index = cell_spec.args[0]; 1413 1414 nvmem_np = of_get_parent(cell_np); 1415 if (!nvmem_np) { 1416 of_node_put(cell_np); 1417 return ERR_PTR(-EINVAL); 1418 } 1419 1420 /* nvmem layouts produce cells within the nvmem-layout container */ 1421 if (of_node_name_eq(nvmem_np, "nvmem-layout")) { 1422 nvmem_np = of_get_next_parent(nvmem_np); 1423 if (!nvmem_np) { 1424 of_node_put(cell_np); 1425 return ERR_PTR(-EINVAL); 1426 } 1427 } 1428 1429 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1430 of_node_put(nvmem_np); 1431 if (IS_ERR(nvmem)) { 1432 of_node_put(cell_np); 1433 return ERR_CAST(nvmem); 1434 } 1435 1436 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1437 of_node_put(cell_np); 1438 if (!cell_entry) { 1439 __nvmem_device_put(nvmem); 1440 return ERR_PTR(-ENOENT); 1441 } 1442 1443 cell = nvmem_create_cell(cell_entry, id, cell_index); 1444 if (IS_ERR(cell)) 1445 __nvmem_device_put(nvmem); 1446 1447 return cell; 1448 } 1449 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1450 #endif 1451 1452 /** 1453 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1454 * 1455 * @dev: Device that requests the nvmem cell. 1456 * @id: nvmem cell name to get (this corresponds with the name from the 1457 * nvmem-cell-names property for DT systems and with the con_id from 1458 * the lookup entry for non-DT systems). 1459 * 1460 * Return: Will be an ERR_PTR() on error or a valid pointer 1461 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1462 * nvmem_cell_put(). 1463 */ 1464 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1465 { 1466 struct nvmem_cell *cell; 1467 1468 if (dev->of_node) { /* try dt first */ 1469 cell = of_nvmem_cell_get(dev->of_node, id); 1470 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1471 return cell; 1472 } 1473 1474 /* NULL cell id only allowed for device tree; invalid otherwise */ 1475 if (!id) 1476 return ERR_PTR(-EINVAL); 1477 1478 return nvmem_cell_get_from_lookup(dev, id); 1479 } 1480 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1481 1482 static void devm_nvmem_cell_release(struct device *dev, void *res) 1483 { 1484 nvmem_cell_put(*(struct nvmem_cell **)res); 1485 } 1486 1487 /** 1488 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1489 * 1490 * @dev: Device that requests the nvmem cell. 1491 * @id: nvmem cell name id to get. 1492 * 1493 * Return: Will be an ERR_PTR() on error or a valid pointer 1494 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1495 * automatically once the device is freed. 1496 */ 1497 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1498 { 1499 struct nvmem_cell **ptr, *cell; 1500 1501 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1502 if (!ptr) 1503 return ERR_PTR(-ENOMEM); 1504 1505 cell = nvmem_cell_get(dev, id); 1506 if (!IS_ERR(cell)) { 1507 *ptr = cell; 1508 devres_add(dev, ptr); 1509 } else { 1510 devres_free(ptr); 1511 } 1512 1513 return cell; 1514 } 1515 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1516 1517 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1518 { 1519 struct nvmem_cell **c = res; 1520 1521 if (WARN_ON(!c || !*c)) 1522 return 0; 1523 1524 return *c == data; 1525 } 1526 1527 /** 1528 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1529 * from devm_nvmem_cell_get. 1530 * 1531 * @dev: Device that requests the nvmem cell. 1532 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1533 */ 1534 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1535 { 1536 int ret; 1537 1538 ret = devres_release(dev, devm_nvmem_cell_release, 1539 devm_nvmem_cell_match, cell); 1540 1541 WARN_ON(ret); 1542 } 1543 EXPORT_SYMBOL(devm_nvmem_cell_put); 1544 1545 /** 1546 * nvmem_cell_put() - Release previously allocated nvmem cell. 1547 * 1548 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1549 */ 1550 void nvmem_cell_put(struct nvmem_cell *cell) 1551 { 1552 struct nvmem_device *nvmem = cell->entry->nvmem; 1553 1554 if (cell->id) 1555 kfree_const(cell->id); 1556 1557 kfree(cell); 1558 __nvmem_device_put(nvmem); 1559 } 1560 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1561 1562 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1563 { 1564 u8 *p, *b; 1565 int i, extra, bit_offset = cell->bit_offset; 1566 1567 p = b = buf; 1568 if (bit_offset) { 1569 /* First shift */ 1570 *b++ >>= bit_offset; 1571 1572 /* setup rest of the bytes if any */ 1573 for (i = 1; i < cell->bytes; i++) { 1574 /* Get bits from next byte and shift them towards msb */ 1575 *p |= *b << (BITS_PER_BYTE - bit_offset); 1576 1577 p = b; 1578 *b++ >>= bit_offset; 1579 } 1580 } else { 1581 /* point to the msb */ 1582 p += cell->bytes - 1; 1583 } 1584 1585 /* result fits in less bytes */ 1586 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1587 while (--extra >= 0) 1588 *p-- = 0; 1589 1590 /* clear msb bits if any leftover in the last byte */ 1591 if (cell->nbits % BITS_PER_BYTE) 1592 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1593 } 1594 1595 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1596 struct nvmem_cell_entry *cell, 1597 void *buf, size_t *len, const char *id, int index) 1598 { 1599 int rc; 1600 1601 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len); 1602 1603 if (rc) 1604 return rc; 1605 1606 /* shift bits in-place */ 1607 if (cell->bit_offset || cell->nbits) 1608 nvmem_shift_read_buffer_in_place(cell, buf); 1609 1610 if (cell->read_post_process) { 1611 rc = cell->read_post_process(cell->priv, id, index, 1612 cell->offset, buf, cell->raw_len); 1613 if (rc) 1614 return rc; 1615 } 1616 1617 if (len) 1618 *len = cell->bytes; 1619 1620 return 0; 1621 } 1622 1623 /** 1624 * nvmem_cell_read() - Read a given nvmem cell 1625 * 1626 * @cell: nvmem cell to be read. 1627 * @len: pointer to length of cell which will be populated on successful read; 1628 * can be NULL. 1629 * 1630 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1631 * buffer should be freed by the consumer with a kfree(). 1632 */ 1633 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1634 { 1635 struct nvmem_cell_entry *entry = cell->entry; 1636 struct nvmem_device *nvmem = entry->nvmem; 1637 u8 *buf; 1638 int rc; 1639 1640 if (!nvmem) 1641 return ERR_PTR(-EINVAL); 1642 1643 buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); 1644 if (!buf) 1645 return ERR_PTR(-ENOMEM); 1646 1647 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index); 1648 if (rc) { 1649 kfree(buf); 1650 return ERR_PTR(rc); 1651 } 1652 1653 return buf; 1654 } 1655 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1656 1657 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1658 u8 *_buf, int len) 1659 { 1660 struct nvmem_device *nvmem = cell->nvmem; 1661 int i, rc, nbits, bit_offset = cell->bit_offset; 1662 u8 v, *p, *buf, *b, pbyte, pbits; 1663 1664 nbits = cell->nbits; 1665 buf = kzalloc(cell->bytes, GFP_KERNEL); 1666 if (!buf) 1667 return ERR_PTR(-ENOMEM); 1668 1669 memcpy(buf, _buf, len); 1670 p = b = buf; 1671 1672 if (bit_offset) { 1673 pbyte = *b; 1674 *b <<= bit_offset; 1675 1676 /* setup the first byte with lsb bits from nvmem */ 1677 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1678 if (rc) 1679 goto err; 1680 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1681 1682 /* setup rest of the byte if any */ 1683 for (i = 1; i < cell->bytes; i++) { 1684 /* Get last byte bits and shift them towards lsb */ 1685 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1686 pbyte = *b; 1687 p = b; 1688 *b <<= bit_offset; 1689 *b++ |= pbits; 1690 } 1691 } 1692 1693 /* if it's not end on byte boundary */ 1694 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1695 /* setup the last byte with msb bits from nvmem */ 1696 rc = nvmem_reg_read(nvmem, 1697 cell->offset + cell->bytes - 1, &v, 1); 1698 if (rc) 1699 goto err; 1700 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1701 1702 } 1703 1704 return buf; 1705 err: 1706 kfree(buf); 1707 return ERR_PTR(rc); 1708 } 1709 1710 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1711 { 1712 struct nvmem_device *nvmem = cell->nvmem; 1713 int rc; 1714 1715 if (!nvmem || nvmem->read_only || 1716 (cell->bit_offset == 0 && len != cell->bytes)) 1717 return -EINVAL; 1718 1719 /* 1720 * Any cells which have a read_post_process hook are read-only because 1721 * we cannot reverse the operation and it might affect other cells, 1722 * too. 1723 */ 1724 if (cell->read_post_process) 1725 return -EINVAL; 1726 1727 if (cell->bit_offset || cell->nbits) { 1728 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1729 if (IS_ERR(buf)) 1730 return PTR_ERR(buf); 1731 } 1732 1733 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1734 1735 /* free the tmp buffer */ 1736 if (cell->bit_offset || cell->nbits) 1737 kfree(buf); 1738 1739 if (rc) 1740 return rc; 1741 1742 return len; 1743 } 1744 1745 /** 1746 * nvmem_cell_write() - Write to a given nvmem cell 1747 * 1748 * @cell: nvmem cell to be written. 1749 * @buf: Buffer to be written. 1750 * @len: length of buffer to be written to nvmem cell. 1751 * 1752 * Return: length of bytes written or negative on failure. 1753 */ 1754 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1755 { 1756 return __nvmem_cell_entry_write(cell->entry, buf, len); 1757 } 1758 1759 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1760 1761 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1762 void *val, size_t count) 1763 { 1764 struct nvmem_cell *cell; 1765 void *buf; 1766 size_t len; 1767 1768 cell = nvmem_cell_get(dev, cell_id); 1769 if (IS_ERR(cell)) 1770 return PTR_ERR(cell); 1771 1772 buf = nvmem_cell_read(cell, &len); 1773 if (IS_ERR(buf)) { 1774 nvmem_cell_put(cell); 1775 return PTR_ERR(buf); 1776 } 1777 if (len != count) { 1778 kfree(buf); 1779 nvmem_cell_put(cell); 1780 return -EINVAL; 1781 } 1782 memcpy(val, buf, count); 1783 kfree(buf); 1784 nvmem_cell_put(cell); 1785 1786 return 0; 1787 } 1788 1789 /** 1790 * nvmem_cell_read_u8() - Read a cell value as a u8 1791 * 1792 * @dev: Device that requests the nvmem cell. 1793 * @cell_id: Name of nvmem cell to read. 1794 * @val: pointer to output value. 1795 * 1796 * Return: 0 on success or negative errno. 1797 */ 1798 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1799 { 1800 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1801 } 1802 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1803 1804 /** 1805 * nvmem_cell_read_u16() - Read a cell value as a u16 1806 * 1807 * @dev: Device that requests the nvmem cell. 1808 * @cell_id: Name of nvmem cell to read. 1809 * @val: pointer to output value. 1810 * 1811 * Return: 0 on success or negative errno. 1812 */ 1813 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1814 { 1815 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1816 } 1817 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1818 1819 /** 1820 * nvmem_cell_read_u32() - Read a cell value as a u32 1821 * 1822 * @dev: Device that requests the nvmem cell. 1823 * @cell_id: Name of nvmem cell to read. 1824 * @val: pointer to output value. 1825 * 1826 * Return: 0 on success or negative errno. 1827 */ 1828 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1829 { 1830 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1831 } 1832 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1833 1834 /** 1835 * nvmem_cell_read_u64() - Read a cell value as a u64 1836 * 1837 * @dev: Device that requests the nvmem cell. 1838 * @cell_id: Name of nvmem cell to read. 1839 * @val: pointer to output value. 1840 * 1841 * Return: 0 on success or negative errno. 1842 */ 1843 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1844 { 1845 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1846 } 1847 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1848 1849 static const void *nvmem_cell_read_variable_common(struct device *dev, 1850 const char *cell_id, 1851 size_t max_len, size_t *len) 1852 { 1853 struct nvmem_cell *cell; 1854 int nbits; 1855 void *buf; 1856 1857 cell = nvmem_cell_get(dev, cell_id); 1858 if (IS_ERR(cell)) 1859 return cell; 1860 1861 nbits = cell->entry->nbits; 1862 buf = nvmem_cell_read(cell, len); 1863 nvmem_cell_put(cell); 1864 if (IS_ERR(buf)) 1865 return buf; 1866 1867 /* 1868 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1869 * the length of the real data. Throw away the extra junk. 1870 */ 1871 if (nbits) 1872 *len = DIV_ROUND_UP(nbits, 8); 1873 1874 if (*len > max_len) { 1875 kfree(buf); 1876 return ERR_PTR(-ERANGE); 1877 } 1878 1879 return buf; 1880 } 1881 1882 /** 1883 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1884 * 1885 * @dev: Device that requests the nvmem cell. 1886 * @cell_id: Name of nvmem cell to read. 1887 * @val: pointer to output value. 1888 * 1889 * Return: 0 on success or negative errno. 1890 */ 1891 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1892 u32 *val) 1893 { 1894 size_t len; 1895 const u8 *buf; 1896 int i; 1897 1898 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1899 if (IS_ERR(buf)) 1900 return PTR_ERR(buf); 1901 1902 /* Copy w/ implicit endian conversion */ 1903 *val = 0; 1904 for (i = 0; i < len; i++) 1905 *val |= buf[i] << (8 * i); 1906 1907 kfree(buf); 1908 1909 return 0; 1910 } 1911 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1912 1913 /** 1914 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1915 * 1916 * @dev: Device that requests the nvmem cell. 1917 * @cell_id: Name of nvmem cell to read. 1918 * @val: pointer to output value. 1919 * 1920 * Return: 0 on success or negative errno. 1921 */ 1922 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1923 u64 *val) 1924 { 1925 size_t len; 1926 const u8 *buf; 1927 int i; 1928 1929 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1930 if (IS_ERR(buf)) 1931 return PTR_ERR(buf); 1932 1933 /* Copy w/ implicit endian conversion */ 1934 *val = 0; 1935 for (i = 0; i < len; i++) 1936 *val |= (uint64_t)buf[i] << (8 * i); 1937 1938 kfree(buf); 1939 1940 return 0; 1941 } 1942 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 1943 1944 /** 1945 * nvmem_device_cell_read() - Read a given nvmem device and cell 1946 * 1947 * @nvmem: nvmem device to read from. 1948 * @info: nvmem cell info to be read. 1949 * @buf: buffer pointer which will be populated on successful read. 1950 * 1951 * Return: length of successful bytes read on success and negative 1952 * error code on error. 1953 */ 1954 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1955 struct nvmem_cell_info *info, void *buf) 1956 { 1957 struct nvmem_cell_entry cell; 1958 int rc; 1959 ssize_t len; 1960 1961 if (!nvmem) 1962 return -EINVAL; 1963 1964 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 1965 if (rc) 1966 return rc; 1967 1968 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0); 1969 if (rc) 1970 return rc; 1971 1972 return len; 1973 } 1974 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1975 1976 /** 1977 * nvmem_device_cell_write() - Write cell to a given nvmem device 1978 * 1979 * @nvmem: nvmem device to be written to. 1980 * @info: nvmem cell info to be written. 1981 * @buf: buffer to be written to cell. 1982 * 1983 * Return: length of bytes written or negative error code on failure. 1984 */ 1985 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1986 struct nvmem_cell_info *info, void *buf) 1987 { 1988 struct nvmem_cell_entry cell; 1989 int rc; 1990 1991 if (!nvmem) 1992 return -EINVAL; 1993 1994 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 1995 if (rc) 1996 return rc; 1997 1998 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 1999 } 2000 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 2001 2002 /** 2003 * nvmem_device_read() - Read from a given nvmem device 2004 * 2005 * @nvmem: nvmem device to read from. 2006 * @offset: offset in nvmem device. 2007 * @bytes: number of bytes to read. 2008 * @buf: buffer pointer which will be populated on successful read. 2009 * 2010 * Return: length of successful bytes read on success and negative 2011 * error code on error. 2012 */ 2013 int nvmem_device_read(struct nvmem_device *nvmem, 2014 unsigned int offset, 2015 size_t bytes, void *buf) 2016 { 2017 int rc; 2018 2019 if (!nvmem) 2020 return -EINVAL; 2021 2022 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 2023 2024 if (rc) 2025 return rc; 2026 2027 return bytes; 2028 } 2029 EXPORT_SYMBOL_GPL(nvmem_device_read); 2030 2031 /** 2032 * nvmem_device_write() - Write cell to a given nvmem device 2033 * 2034 * @nvmem: nvmem device to be written to. 2035 * @offset: offset in nvmem device. 2036 * @bytes: number of bytes to write. 2037 * @buf: buffer to be written. 2038 * 2039 * Return: length of bytes written or negative error code on failure. 2040 */ 2041 int nvmem_device_write(struct nvmem_device *nvmem, 2042 unsigned int offset, 2043 size_t bytes, void *buf) 2044 { 2045 int rc; 2046 2047 if (!nvmem) 2048 return -EINVAL; 2049 2050 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 2051 2052 if (rc) 2053 return rc; 2054 2055 2056 return bytes; 2057 } 2058 EXPORT_SYMBOL_GPL(nvmem_device_write); 2059 2060 /** 2061 * nvmem_add_cell_table() - register a table of cell info entries 2062 * 2063 * @table: table of cell info entries 2064 */ 2065 void nvmem_add_cell_table(struct nvmem_cell_table *table) 2066 { 2067 mutex_lock(&nvmem_cell_mutex); 2068 list_add_tail(&table->node, &nvmem_cell_tables); 2069 mutex_unlock(&nvmem_cell_mutex); 2070 } 2071 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 2072 2073 /** 2074 * nvmem_del_cell_table() - remove a previously registered cell info table 2075 * 2076 * @table: table of cell info entries 2077 */ 2078 void nvmem_del_cell_table(struct nvmem_cell_table *table) 2079 { 2080 mutex_lock(&nvmem_cell_mutex); 2081 list_del(&table->node); 2082 mutex_unlock(&nvmem_cell_mutex); 2083 } 2084 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 2085 2086 /** 2087 * nvmem_add_cell_lookups() - register a list of cell lookup entries 2088 * 2089 * @entries: array of cell lookup entries 2090 * @nentries: number of cell lookup entries in the array 2091 */ 2092 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2093 { 2094 int i; 2095 2096 mutex_lock(&nvmem_lookup_mutex); 2097 for (i = 0; i < nentries; i++) 2098 list_add_tail(&entries[i].node, &nvmem_lookup_list); 2099 mutex_unlock(&nvmem_lookup_mutex); 2100 } 2101 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 2102 2103 /** 2104 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 2105 * entries 2106 * 2107 * @entries: array of cell lookup entries 2108 * @nentries: number of cell lookup entries in the array 2109 */ 2110 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2111 { 2112 int i; 2113 2114 mutex_lock(&nvmem_lookup_mutex); 2115 for (i = 0; i < nentries; i++) 2116 list_del(&entries[i].node); 2117 mutex_unlock(&nvmem_lookup_mutex); 2118 } 2119 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 2120 2121 /** 2122 * nvmem_dev_name() - Get the name of a given nvmem device. 2123 * 2124 * @nvmem: nvmem device. 2125 * 2126 * Return: name of the nvmem device. 2127 */ 2128 const char *nvmem_dev_name(struct nvmem_device *nvmem) 2129 { 2130 return dev_name(&nvmem->dev); 2131 } 2132 EXPORT_SYMBOL_GPL(nvmem_dev_name); 2133 2134 /** 2135 * nvmem_dev_size() - Get the size of a given nvmem device. 2136 * 2137 * @nvmem: nvmem device. 2138 * 2139 * Return: size of the nvmem device. 2140 */ 2141 size_t nvmem_dev_size(struct nvmem_device *nvmem) 2142 { 2143 return nvmem->size; 2144 } 2145 EXPORT_SYMBOL_GPL(nvmem_dev_size); 2146 2147 static int __init nvmem_init(void) 2148 { 2149 return bus_register(&nvmem_bus_type); 2150 } 2151 2152 static void __exit nvmem_exit(void) 2153 { 2154 bus_unregister(&nvmem_bus_type); 2155 } 2156 2157 subsys_initcall(nvmem_init); 2158 module_exit(nvmem_exit); 2159 2160 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 2161 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 2162 MODULE_DESCRIPTION("nvmem Driver Core"); 2163