1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 const struct nvmem_keepout *keepout; 38 unsigned int nkeepout; 39 nvmem_reg_read_t reg_read; 40 nvmem_reg_write_t reg_write; 41 nvmem_cell_post_process_t cell_post_process; 42 struct gpio_desc *wp_gpio; 43 void *priv; 44 }; 45 46 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 47 48 #define FLAG_COMPAT BIT(0) 49 struct nvmem_cell_entry { 50 const char *name; 51 int offset; 52 int bytes; 53 int bit_offset; 54 int nbits; 55 struct device_node *np; 56 struct nvmem_device *nvmem; 57 struct list_head node; 58 }; 59 60 struct nvmem_cell { 61 struct nvmem_cell_entry *entry; 62 const char *id; 63 }; 64 65 static DEFINE_MUTEX(nvmem_mutex); 66 static DEFINE_IDA(nvmem_ida); 67 68 static DEFINE_MUTEX(nvmem_cell_mutex); 69 static LIST_HEAD(nvmem_cell_tables); 70 71 static DEFINE_MUTEX(nvmem_lookup_mutex); 72 static LIST_HEAD(nvmem_lookup_list); 73 74 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 75 76 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 77 void *val, size_t bytes) 78 { 79 if (nvmem->reg_read) 80 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 81 82 return -EINVAL; 83 } 84 85 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 86 void *val, size_t bytes) 87 { 88 int ret; 89 90 if (nvmem->reg_write) { 91 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 92 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 93 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 94 return ret; 95 } 96 97 return -EINVAL; 98 } 99 100 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 101 unsigned int offset, void *val, 102 size_t bytes, int write) 103 { 104 105 unsigned int end = offset + bytes; 106 unsigned int kend, ksize; 107 const struct nvmem_keepout *keepout = nvmem->keepout; 108 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 109 int rc; 110 111 /* 112 * Skip all keepouts before the range being accessed. 113 * Keepouts are sorted. 114 */ 115 while ((keepout < keepoutend) && (keepout->end <= offset)) 116 keepout++; 117 118 while ((offset < end) && (keepout < keepoutend)) { 119 /* Access the valid portion before the keepout. */ 120 if (offset < keepout->start) { 121 kend = min(end, keepout->start); 122 ksize = kend - offset; 123 if (write) 124 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 125 else 126 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 127 128 if (rc) 129 return rc; 130 131 offset += ksize; 132 val += ksize; 133 } 134 135 /* 136 * Now we're aligned to the start of this keepout zone. Go 137 * through it. 138 */ 139 kend = min(end, keepout->end); 140 ksize = kend - offset; 141 if (!write) 142 memset(val, keepout->value, ksize); 143 144 val += ksize; 145 offset += ksize; 146 keepout++; 147 } 148 149 /* 150 * If we ran out of keepouts but there's still stuff to do, send it 151 * down directly 152 */ 153 if (offset < end) { 154 ksize = end - offset; 155 if (write) 156 return __nvmem_reg_write(nvmem, offset, val, ksize); 157 else 158 return __nvmem_reg_read(nvmem, offset, val, ksize); 159 } 160 161 return 0; 162 } 163 164 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 165 void *val, size_t bytes) 166 { 167 if (!nvmem->nkeepout) 168 return __nvmem_reg_read(nvmem, offset, val, bytes); 169 170 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 171 } 172 173 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 174 void *val, size_t bytes) 175 { 176 if (!nvmem->nkeepout) 177 return __nvmem_reg_write(nvmem, offset, val, bytes); 178 179 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 180 } 181 182 #ifdef CONFIG_NVMEM_SYSFS 183 static const char * const nvmem_type_str[] = { 184 [NVMEM_TYPE_UNKNOWN] = "Unknown", 185 [NVMEM_TYPE_EEPROM] = "EEPROM", 186 [NVMEM_TYPE_OTP] = "OTP", 187 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 188 [NVMEM_TYPE_FRAM] = "FRAM", 189 }; 190 191 #ifdef CONFIG_DEBUG_LOCK_ALLOC 192 static struct lock_class_key eeprom_lock_key; 193 #endif 194 195 static ssize_t type_show(struct device *dev, 196 struct device_attribute *attr, char *buf) 197 { 198 struct nvmem_device *nvmem = to_nvmem_device(dev); 199 200 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 201 } 202 203 static DEVICE_ATTR_RO(type); 204 205 static struct attribute *nvmem_attrs[] = { 206 &dev_attr_type.attr, 207 NULL, 208 }; 209 210 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 211 struct bin_attribute *attr, char *buf, 212 loff_t pos, size_t count) 213 { 214 struct device *dev; 215 struct nvmem_device *nvmem; 216 int rc; 217 218 if (attr->private) 219 dev = attr->private; 220 else 221 dev = kobj_to_dev(kobj); 222 nvmem = to_nvmem_device(dev); 223 224 /* Stop the user from reading */ 225 if (pos >= nvmem->size) 226 return 0; 227 228 if (!IS_ALIGNED(pos, nvmem->stride)) 229 return -EINVAL; 230 231 if (count < nvmem->word_size) 232 return -EINVAL; 233 234 if (pos + count > nvmem->size) 235 count = nvmem->size - pos; 236 237 count = round_down(count, nvmem->word_size); 238 239 if (!nvmem->reg_read) 240 return -EPERM; 241 242 rc = nvmem_reg_read(nvmem, pos, buf, count); 243 244 if (rc) 245 return rc; 246 247 return count; 248 } 249 250 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 251 struct bin_attribute *attr, char *buf, 252 loff_t pos, size_t count) 253 { 254 struct device *dev; 255 struct nvmem_device *nvmem; 256 int rc; 257 258 if (attr->private) 259 dev = attr->private; 260 else 261 dev = kobj_to_dev(kobj); 262 nvmem = to_nvmem_device(dev); 263 264 /* Stop the user from writing */ 265 if (pos >= nvmem->size) 266 return -EFBIG; 267 268 if (!IS_ALIGNED(pos, nvmem->stride)) 269 return -EINVAL; 270 271 if (count < nvmem->word_size) 272 return -EINVAL; 273 274 if (pos + count > nvmem->size) 275 count = nvmem->size - pos; 276 277 count = round_down(count, nvmem->word_size); 278 279 if (!nvmem->reg_write) 280 return -EPERM; 281 282 rc = nvmem_reg_write(nvmem, pos, buf, count); 283 284 if (rc) 285 return rc; 286 287 return count; 288 } 289 290 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 291 { 292 umode_t mode = 0400; 293 294 if (!nvmem->root_only) 295 mode |= 0044; 296 297 if (!nvmem->read_only) 298 mode |= 0200; 299 300 if (!nvmem->reg_write) 301 mode &= ~0200; 302 303 if (!nvmem->reg_read) 304 mode &= ~0444; 305 306 return mode; 307 } 308 309 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 310 struct bin_attribute *attr, int i) 311 { 312 struct device *dev = kobj_to_dev(kobj); 313 struct nvmem_device *nvmem = to_nvmem_device(dev); 314 315 attr->size = nvmem->size; 316 317 return nvmem_bin_attr_get_umode(nvmem); 318 } 319 320 /* default read/write permissions */ 321 static struct bin_attribute bin_attr_rw_nvmem = { 322 .attr = { 323 .name = "nvmem", 324 .mode = 0644, 325 }, 326 .read = bin_attr_nvmem_read, 327 .write = bin_attr_nvmem_write, 328 }; 329 330 static struct bin_attribute *nvmem_bin_attributes[] = { 331 &bin_attr_rw_nvmem, 332 NULL, 333 }; 334 335 static const struct attribute_group nvmem_bin_group = { 336 .bin_attrs = nvmem_bin_attributes, 337 .attrs = nvmem_attrs, 338 .is_bin_visible = nvmem_bin_attr_is_visible, 339 }; 340 341 static const struct attribute_group *nvmem_dev_groups[] = { 342 &nvmem_bin_group, 343 NULL, 344 }; 345 346 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 347 .attr = { 348 .name = "eeprom", 349 }, 350 .read = bin_attr_nvmem_read, 351 .write = bin_attr_nvmem_write, 352 }; 353 354 /* 355 * nvmem_setup_compat() - Create an additional binary entry in 356 * drivers sys directory, to be backwards compatible with the older 357 * drivers/misc/eeprom drivers. 358 */ 359 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 360 const struct nvmem_config *config) 361 { 362 int rval; 363 364 if (!config->compat) 365 return 0; 366 367 if (!config->base_dev) 368 return -EINVAL; 369 370 if (config->type == NVMEM_TYPE_FRAM) 371 bin_attr_nvmem_eeprom_compat.attr.name = "fram"; 372 373 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 374 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 375 nvmem->eeprom.size = nvmem->size; 376 #ifdef CONFIG_DEBUG_LOCK_ALLOC 377 nvmem->eeprom.attr.key = &eeprom_lock_key; 378 #endif 379 nvmem->eeprom.private = &nvmem->dev; 380 nvmem->base_dev = config->base_dev; 381 382 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 383 if (rval) { 384 dev_err(&nvmem->dev, 385 "Failed to create eeprom binary file %d\n", rval); 386 return rval; 387 } 388 389 nvmem->flags |= FLAG_COMPAT; 390 391 return 0; 392 } 393 394 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 395 const struct nvmem_config *config) 396 { 397 if (config->compat) 398 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 399 } 400 401 #else /* CONFIG_NVMEM_SYSFS */ 402 403 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 404 const struct nvmem_config *config) 405 { 406 return -ENOSYS; 407 } 408 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 409 const struct nvmem_config *config) 410 { 411 } 412 413 #endif /* CONFIG_NVMEM_SYSFS */ 414 415 static void nvmem_release(struct device *dev) 416 { 417 struct nvmem_device *nvmem = to_nvmem_device(dev); 418 419 ida_free(&nvmem_ida, nvmem->id); 420 gpiod_put(nvmem->wp_gpio); 421 kfree(nvmem); 422 } 423 424 static const struct device_type nvmem_provider_type = { 425 .release = nvmem_release, 426 }; 427 428 static struct bus_type nvmem_bus_type = { 429 .name = "nvmem", 430 }; 431 432 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 433 { 434 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 435 mutex_lock(&nvmem_mutex); 436 list_del(&cell->node); 437 mutex_unlock(&nvmem_mutex); 438 of_node_put(cell->np); 439 kfree_const(cell->name); 440 kfree(cell); 441 } 442 443 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 444 { 445 struct nvmem_cell_entry *cell, *p; 446 447 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 448 nvmem_cell_entry_drop(cell); 449 } 450 451 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 452 { 453 mutex_lock(&nvmem_mutex); 454 list_add_tail(&cell->node, &cell->nvmem->cells); 455 mutex_unlock(&nvmem_mutex); 456 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 457 } 458 459 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 460 const struct nvmem_cell_info *info, 461 struct nvmem_cell_entry *cell) 462 { 463 cell->nvmem = nvmem; 464 cell->offset = info->offset; 465 cell->bytes = info->bytes; 466 cell->name = info->name; 467 468 cell->bit_offset = info->bit_offset; 469 cell->nbits = info->nbits; 470 cell->np = info->np; 471 472 if (cell->nbits) 473 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 474 BITS_PER_BYTE); 475 476 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 477 dev_err(&nvmem->dev, 478 "cell %s unaligned to nvmem stride %d\n", 479 cell->name ?: "<unknown>", nvmem->stride); 480 return -EINVAL; 481 } 482 483 return 0; 484 } 485 486 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 487 const struct nvmem_cell_info *info, 488 struct nvmem_cell_entry *cell) 489 { 490 int err; 491 492 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 493 if (err) 494 return err; 495 496 cell->name = kstrdup_const(info->name, GFP_KERNEL); 497 if (!cell->name) 498 return -ENOMEM; 499 500 return 0; 501 } 502 503 /** 504 * nvmem_add_cells() - Add cell information to an nvmem device 505 * 506 * @nvmem: nvmem device to add cells to. 507 * @info: nvmem cell info to add to the device 508 * @ncells: number of cells in info 509 * 510 * Return: 0 or negative error code on failure. 511 */ 512 static int nvmem_add_cells(struct nvmem_device *nvmem, 513 const struct nvmem_cell_info *info, 514 int ncells) 515 { 516 struct nvmem_cell_entry **cells; 517 int i, rval; 518 519 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 520 if (!cells) 521 return -ENOMEM; 522 523 for (i = 0; i < ncells; i++) { 524 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 525 if (!cells[i]) { 526 rval = -ENOMEM; 527 goto err; 528 } 529 530 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, &info[i], cells[i]); 531 if (rval) { 532 kfree(cells[i]); 533 goto err; 534 } 535 536 nvmem_cell_entry_add(cells[i]); 537 } 538 539 /* remove tmp array */ 540 kfree(cells); 541 542 return 0; 543 err: 544 while (i--) 545 nvmem_cell_entry_drop(cells[i]); 546 547 kfree(cells); 548 549 return rval; 550 } 551 552 /** 553 * nvmem_register_notifier() - Register a notifier block for nvmem events. 554 * 555 * @nb: notifier block to be called on nvmem events. 556 * 557 * Return: 0 on success, negative error number on failure. 558 */ 559 int nvmem_register_notifier(struct notifier_block *nb) 560 { 561 return blocking_notifier_chain_register(&nvmem_notifier, nb); 562 } 563 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 564 565 /** 566 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 567 * 568 * @nb: notifier block to be unregistered. 569 * 570 * Return: 0 on success, negative error number on failure. 571 */ 572 int nvmem_unregister_notifier(struct notifier_block *nb) 573 { 574 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 575 } 576 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 577 578 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 579 { 580 const struct nvmem_cell_info *info; 581 struct nvmem_cell_table *table; 582 struct nvmem_cell_entry *cell; 583 int rval = 0, i; 584 585 mutex_lock(&nvmem_cell_mutex); 586 list_for_each_entry(table, &nvmem_cell_tables, node) { 587 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 588 for (i = 0; i < table->ncells; i++) { 589 info = &table->cells[i]; 590 591 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 592 if (!cell) { 593 rval = -ENOMEM; 594 goto out; 595 } 596 597 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 598 if (rval) { 599 kfree(cell); 600 goto out; 601 } 602 603 nvmem_cell_entry_add(cell); 604 } 605 } 606 } 607 608 out: 609 mutex_unlock(&nvmem_cell_mutex); 610 return rval; 611 } 612 613 static struct nvmem_cell_entry * 614 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 615 { 616 struct nvmem_cell_entry *iter, *cell = NULL; 617 618 mutex_lock(&nvmem_mutex); 619 list_for_each_entry(iter, &nvmem->cells, node) { 620 if (strcmp(cell_id, iter->name) == 0) { 621 cell = iter; 622 break; 623 } 624 } 625 mutex_unlock(&nvmem_mutex); 626 627 return cell; 628 } 629 630 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 631 { 632 unsigned int cur = 0; 633 const struct nvmem_keepout *keepout = nvmem->keepout; 634 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 635 636 while (keepout < keepoutend) { 637 /* Ensure keepouts are sorted and don't overlap. */ 638 if (keepout->start < cur) { 639 dev_err(&nvmem->dev, 640 "Keepout regions aren't sorted or overlap.\n"); 641 642 return -ERANGE; 643 } 644 645 if (keepout->end < keepout->start) { 646 dev_err(&nvmem->dev, 647 "Invalid keepout region.\n"); 648 649 return -EINVAL; 650 } 651 652 /* 653 * Validate keepouts (and holes between) don't violate 654 * word_size constraints. 655 */ 656 if ((keepout->end - keepout->start < nvmem->word_size) || 657 ((keepout->start != cur) && 658 (keepout->start - cur < nvmem->word_size))) { 659 660 dev_err(&nvmem->dev, 661 "Keepout regions violate word_size constraints.\n"); 662 663 return -ERANGE; 664 } 665 666 /* Validate keepouts don't violate stride (alignment). */ 667 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 668 !IS_ALIGNED(keepout->end, nvmem->stride)) { 669 670 dev_err(&nvmem->dev, 671 "Keepout regions violate stride.\n"); 672 673 return -EINVAL; 674 } 675 676 cur = keepout->end; 677 keepout++; 678 } 679 680 return 0; 681 } 682 683 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 684 { 685 struct device_node *parent, *child; 686 struct device *dev = &nvmem->dev; 687 struct nvmem_cell_entry *cell; 688 const __be32 *addr; 689 int len; 690 691 parent = dev->of_node; 692 693 for_each_child_of_node(parent, child) { 694 addr = of_get_property(child, "reg", &len); 695 if (!addr) 696 continue; 697 if (len < 2 * sizeof(u32)) { 698 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 699 of_node_put(child); 700 return -EINVAL; 701 } 702 703 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 704 if (!cell) { 705 of_node_put(child); 706 return -ENOMEM; 707 } 708 709 cell->nvmem = nvmem; 710 cell->offset = be32_to_cpup(addr++); 711 cell->bytes = be32_to_cpup(addr); 712 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 713 714 addr = of_get_property(child, "bits", &len); 715 if (addr && len == (2 * sizeof(u32))) { 716 cell->bit_offset = be32_to_cpup(addr++); 717 cell->nbits = be32_to_cpup(addr); 718 } 719 720 if (cell->nbits) 721 cell->bytes = DIV_ROUND_UP( 722 cell->nbits + cell->bit_offset, 723 BITS_PER_BYTE); 724 725 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 726 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 727 cell->name, nvmem->stride); 728 /* Cells already added will be freed later. */ 729 kfree_const(cell->name); 730 kfree(cell); 731 of_node_put(child); 732 return -EINVAL; 733 } 734 735 cell->np = of_node_get(child); 736 nvmem_cell_entry_add(cell); 737 } 738 739 return 0; 740 } 741 742 /** 743 * nvmem_register() - Register a nvmem device for given nvmem_config. 744 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 745 * 746 * @config: nvmem device configuration with which nvmem device is created. 747 * 748 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 749 * on success. 750 */ 751 752 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 753 { 754 struct nvmem_device *nvmem; 755 int rval; 756 757 if (!config->dev) 758 return ERR_PTR(-EINVAL); 759 760 if (!config->reg_read && !config->reg_write) 761 return ERR_PTR(-EINVAL); 762 763 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 764 if (!nvmem) 765 return ERR_PTR(-ENOMEM); 766 767 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 768 if (rval < 0) { 769 kfree(nvmem); 770 return ERR_PTR(rval); 771 } 772 773 if (config->wp_gpio) 774 nvmem->wp_gpio = config->wp_gpio; 775 else if (!config->ignore_wp) 776 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 777 GPIOD_OUT_HIGH); 778 if (IS_ERR(nvmem->wp_gpio)) { 779 ida_free(&nvmem_ida, nvmem->id); 780 rval = PTR_ERR(nvmem->wp_gpio); 781 kfree(nvmem); 782 return ERR_PTR(rval); 783 } 784 785 kref_init(&nvmem->refcnt); 786 INIT_LIST_HEAD(&nvmem->cells); 787 788 nvmem->id = rval; 789 nvmem->owner = config->owner; 790 if (!nvmem->owner && config->dev->driver) 791 nvmem->owner = config->dev->driver->owner; 792 nvmem->stride = config->stride ?: 1; 793 nvmem->word_size = config->word_size ?: 1; 794 nvmem->size = config->size; 795 nvmem->dev.type = &nvmem_provider_type; 796 nvmem->dev.bus = &nvmem_bus_type; 797 nvmem->dev.parent = config->dev; 798 nvmem->root_only = config->root_only; 799 nvmem->priv = config->priv; 800 nvmem->type = config->type; 801 nvmem->reg_read = config->reg_read; 802 nvmem->reg_write = config->reg_write; 803 nvmem->cell_post_process = config->cell_post_process; 804 nvmem->keepout = config->keepout; 805 nvmem->nkeepout = config->nkeepout; 806 if (config->of_node) 807 nvmem->dev.of_node = config->of_node; 808 else if (!config->no_of_node) 809 nvmem->dev.of_node = config->dev->of_node; 810 811 switch (config->id) { 812 case NVMEM_DEVID_NONE: 813 rval = dev_set_name(&nvmem->dev, "%s", config->name); 814 break; 815 case NVMEM_DEVID_AUTO: 816 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 817 break; 818 default: 819 rval = dev_set_name(&nvmem->dev, "%s%d", 820 config->name ? : "nvmem", 821 config->name ? config->id : nvmem->id); 822 break; 823 } 824 825 if (rval) { 826 ida_free(&nvmem_ida, nvmem->id); 827 kfree(nvmem); 828 return ERR_PTR(rval); 829 } 830 831 nvmem->read_only = device_property_present(config->dev, "read-only") || 832 config->read_only || !nvmem->reg_write; 833 834 #ifdef CONFIG_NVMEM_SYSFS 835 nvmem->dev.groups = nvmem_dev_groups; 836 #endif 837 838 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 839 840 rval = device_register(&nvmem->dev); 841 if (rval) 842 goto err_put_device; 843 844 if (nvmem->nkeepout) { 845 rval = nvmem_validate_keepouts(nvmem); 846 if (rval) 847 goto err_device_del; 848 } 849 850 if (config->compat) { 851 rval = nvmem_sysfs_setup_compat(nvmem, config); 852 if (rval) 853 goto err_device_del; 854 } 855 856 if (config->cells) { 857 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 858 if (rval) 859 goto err_teardown_compat; 860 } 861 862 rval = nvmem_add_cells_from_table(nvmem); 863 if (rval) 864 goto err_remove_cells; 865 866 rval = nvmem_add_cells_from_of(nvmem); 867 if (rval) 868 goto err_remove_cells; 869 870 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 871 872 return nvmem; 873 874 err_remove_cells: 875 nvmem_device_remove_all_cells(nvmem); 876 err_teardown_compat: 877 if (config->compat) 878 nvmem_sysfs_remove_compat(nvmem, config); 879 err_device_del: 880 device_del(&nvmem->dev); 881 err_put_device: 882 put_device(&nvmem->dev); 883 884 return ERR_PTR(rval); 885 } 886 EXPORT_SYMBOL_GPL(nvmem_register); 887 888 static void nvmem_device_release(struct kref *kref) 889 { 890 struct nvmem_device *nvmem; 891 892 nvmem = container_of(kref, struct nvmem_device, refcnt); 893 894 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 895 896 if (nvmem->flags & FLAG_COMPAT) 897 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 898 899 nvmem_device_remove_all_cells(nvmem); 900 device_unregister(&nvmem->dev); 901 } 902 903 /** 904 * nvmem_unregister() - Unregister previously registered nvmem device 905 * 906 * @nvmem: Pointer to previously registered nvmem device. 907 */ 908 void nvmem_unregister(struct nvmem_device *nvmem) 909 { 910 if (nvmem) 911 kref_put(&nvmem->refcnt, nvmem_device_release); 912 } 913 EXPORT_SYMBOL_GPL(nvmem_unregister); 914 915 static void devm_nvmem_unregister(void *nvmem) 916 { 917 nvmem_unregister(nvmem); 918 } 919 920 /** 921 * devm_nvmem_register() - Register a managed nvmem device for given 922 * nvmem_config. 923 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 924 * 925 * @dev: Device that uses the nvmem device. 926 * @config: nvmem device configuration with which nvmem device is created. 927 * 928 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 929 * on success. 930 */ 931 struct nvmem_device *devm_nvmem_register(struct device *dev, 932 const struct nvmem_config *config) 933 { 934 struct nvmem_device *nvmem; 935 int ret; 936 937 nvmem = nvmem_register(config); 938 if (IS_ERR(nvmem)) 939 return nvmem; 940 941 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); 942 if (ret) 943 return ERR_PTR(ret); 944 945 return nvmem; 946 } 947 EXPORT_SYMBOL_GPL(devm_nvmem_register); 948 949 static struct nvmem_device *__nvmem_device_get(void *data, 950 int (*match)(struct device *dev, const void *data)) 951 { 952 struct nvmem_device *nvmem = NULL; 953 struct device *dev; 954 955 mutex_lock(&nvmem_mutex); 956 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 957 if (dev) 958 nvmem = to_nvmem_device(dev); 959 mutex_unlock(&nvmem_mutex); 960 if (!nvmem) 961 return ERR_PTR(-EPROBE_DEFER); 962 963 if (!try_module_get(nvmem->owner)) { 964 dev_err(&nvmem->dev, 965 "could not increase module refcount for cell %s\n", 966 nvmem_dev_name(nvmem)); 967 968 put_device(&nvmem->dev); 969 return ERR_PTR(-EINVAL); 970 } 971 972 kref_get(&nvmem->refcnt); 973 974 return nvmem; 975 } 976 977 static void __nvmem_device_put(struct nvmem_device *nvmem) 978 { 979 put_device(&nvmem->dev); 980 module_put(nvmem->owner); 981 kref_put(&nvmem->refcnt, nvmem_device_release); 982 } 983 984 #if IS_ENABLED(CONFIG_OF) 985 /** 986 * of_nvmem_device_get() - Get nvmem device from a given id 987 * 988 * @np: Device tree node that uses the nvmem device. 989 * @id: nvmem name from nvmem-names property. 990 * 991 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 992 * on success. 993 */ 994 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 995 { 996 997 struct device_node *nvmem_np; 998 struct nvmem_device *nvmem; 999 int index = 0; 1000 1001 if (id) 1002 index = of_property_match_string(np, "nvmem-names", id); 1003 1004 nvmem_np = of_parse_phandle(np, "nvmem", index); 1005 if (!nvmem_np) 1006 return ERR_PTR(-ENOENT); 1007 1008 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1009 of_node_put(nvmem_np); 1010 return nvmem; 1011 } 1012 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1013 #endif 1014 1015 /** 1016 * nvmem_device_get() - Get nvmem device from a given id 1017 * 1018 * @dev: Device that uses the nvmem device. 1019 * @dev_name: name of the requested nvmem device. 1020 * 1021 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1022 * on success. 1023 */ 1024 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1025 { 1026 if (dev->of_node) { /* try dt first */ 1027 struct nvmem_device *nvmem; 1028 1029 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1030 1031 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1032 return nvmem; 1033 1034 } 1035 1036 return __nvmem_device_get((void *)dev_name, device_match_name); 1037 } 1038 EXPORT_SYMBOL_GPL(nvmem_device_get); 1039 1040 /** 1041 * nvmem_device_find() - Find nvmem device with matching function 1042 * 1043 * @data: Data to pass to match function 1044 * @match: Callback function to check device 1045 * 1046 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1047 * on success. 1048 */ 1049 struct nvmem_device *nvmem_device_find(void *data, 1050 int (*match)(struct device *dev, const void *data)) 1051 { 1052 return __nvmem_device_get(data, match); 1053 } 1054 EXPORT_SYMBOL_GPL(nvmem_device_find); 1055 1056 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1057 { 1058 struct nvmem_device **nvmem = res; 1059 1060 if (WARN_ON(!nvmem || !*nvmem)) 1061 return 0; 1062 1063 return *nvmem == data; 1064 } 1065 1066 static void devm_nvmem_device_release(struct device *dev, void *res) 1067 { 1068 nvmem_device_put(*(struct nvmem_device **)res); 1069 } 1070 1071 /** 1072 * devm_nvmem_device_put() - put alredy got nvmem device 1073 * 1074 * @dev: Device that uses the nvmem device. 1075 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1076 * that needs to be released. 1077 */ 1078 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1079 { 1080 int ret; 1081 1082 ret = devres_release(dev, devm_nvmem_device_release, 1083 devm_nvmem_device_match, nvmem); 1084 1085 WARN_ON(ret); 1086 } 1087 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1088 1089 /** 1090 * nvmem_device_put() - put alredy got nvmem device 1091 * 1092 * @nvmem: pointer to nvmem device that needs to be released. 1093 */ 1094 void nvmem_device_put(struct nvmem_device *nvmem) 1095 { 1096 __nvmem_device_put(nvmem); 1097 } 1098 EXPORT_SYMBOL_GPL(nvmem_device_put); 1099 1100 /** 1101 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 1102 * 1103 * @dev: Device that requests the nvmem device. 1104 * @id: name id for the requested nvmem device. 1105 * 1106 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 1107 * on success. The nvmem_cell will be freed by the automatically once the 1108 * device is freed. 1109 */ 1110 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1111 { 1112 struct nvmem_device **ptr, *nvmem; 1113 1114 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1115 if (!ptr) 1116 return ERR_PTR(-ENOMEM); 1117 1118 nvmem = nvmem_device_get(dev, id); 1119 if (!IS_ERR(nvmem)) { 1120 *ptr = nvmem; 1121 devres_add(dev, ptr); 1122 } else { 1123 devres_free(ptr); 1124 } 1125 1126 return nvmem; 1127 } 1128 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1129 1130 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, const char *id) 1131 { 1132 struct nvmem_cell *cell; 1133 const char *name = NULL; 1134 1135 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1136 if (!cell) 1137 return ERR_PTR(-ENOMEM); 1138 1139 if (id) { 1140 name = kstrdup_const(id, GFP_KERNEL); 1141 if (!name) { 1142 kfree(cell); 1143 return ERR_PTR(-ENOMEM); 1144 } 1145 } 1146 1147 cell->id = name; 1148 cell->entry = entry; 1149 1150 return cell; 1151 } 1152 1153 static struct nvmem_cell * 1154 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1155 { 1156 struct nvmem_cell_entry *cell_entry; 1157 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1158 struct nvmem_cell_lookup *lookup; 1159 struct nvmem_device *nvmem; 1160 const char *dev_id; 1161 1162 if (!dev) 1163 return ERR_PTR(-EINVAL); 1164 1165 dev_id = dev_name(dev); 1166 1167 mutex_lock(&nvmem_lookup_mutex); 1168 1169 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1170 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1171 (strcmp(lookup->con_id, con_id) == 0)) { 1172 /* This is the right entry. */ 1173 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1174 device_match_name); 1175 if (IS_ERR(nvmem)) { 1176 /* Provider may not be registered yet. */ 1177 cell = ERR_CAST(nvmem); 1178 break; 1179 } 1180 1181 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1182 lookup->cell_name); 1183 if (!cell_entry) { 1184 __nvmem_device_put(nvmem); 1185 cell = ERR_PTR(-ENOENT); 1186 } else { 1187 cell = nvmem_create_cell(cell_entry, con_id); 1188 if (IS_ERR(cell)) 1189 __nvmem_device_put(nvmem); 1190 } 1191 break; 1192 } 1193 } 1194 1195 mutex_unlock(&nvmem_lookup_mutex); 1196 return cell; 1197 } 1198 1199 #if IS_ENABLED(CONFIG_OF) 1200 static struct nvmem_cell_entry * 1201 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1202 { 1203 struct nvmem_cell_entry *iter, *cell = NULL; 1204 1205 mutex_lock(&nvmem_mutex); 1206 list_for_each_entry(iter, &nvmem->cells, node) { 1207 if (np == iter->np) { 1208 cell = iter; 1209 break; 1210 } 1211 } 1212 mutex_unlock(&nvmem_mutex); 1213 1214 return cell; 1215 } 1216 1217 /** 1218 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1219 * 1220 * @np: Device tree node that uses the nvmem cell. 1221 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1222 * for the cell at index 0 (the lone cell with no accompanying 1223 * nvmem-cell-names property). 1224 * 1225 * Return: Will be an ERR_PTR() on error or a valid pointer 1226 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1227 * nvmem_cell_put(). 1228 */ 1229 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1230 { 1231 struct device_node *cell_np, *nvmem_np; 1232 struct nvmem_device *nvmem; 1233 struct nvmem_cell_entry *cell_entry; 1234 struct nvmem_cell *cell; 1235 int index = 0; 1236 1237 /* if cell name exists, find index to the name */ 1238 if (id) 1239 index = of_property_match_string(np, "nvmem-cell-names", id); 1240 1241 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1242 if (!cell_np) 1243 return ERR_PTR(-ENOENT); 1244 1245 nvmem_np = of_get_next_parent(cell_np); 1246 if (!nvmem_np) 1247 return ERR_PTR(-EINVAL); 1248 1249 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1250 of_node_put(nvmem_np); 1251 if (IS_ERR(nvmem)) 1252 return ERR_CAST(nvmem); 1253 1254 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1255 if (!cell_entry) { 1256 __nvmem_device_put(nvmem); 1257 return ERR_PTR(-ENOENT); 1258 } 1259 1260 cell = nvmem_create_cell(cell_entry, id); 1261 if (IS_ERR(cell)) 1262 __nvmem_device_put(nvmem); 1263 1264 return cell; 1265 } 1266 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1267 #endif 1268 1269 /** 1270 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1271 * 1272 * @dev: Device that requests the nvmem cell. 1273 * @id: nvmem cell name to get (this corresponds with the name from the 1274 * nvmem-cell-names property for DT systems and with the con_id from 1275 * the lookup entry for non-DT systems). 1276 * 1277 * Return: Will be an ERR_PTR() on error or a valid pointer 1278 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1279 * nvmem_cell_put(). 1280 */ 1281 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1282 { 1283 struct nvmem_cell *cell; 1284 1285 if (dev->of_node) { /* try dt first */ 1286 cell = of_nvmem_cell_get(dev->of_node, id); 1287 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1288 return cell; 1289 } 1290 1291 /* NULL cell id only allowed for device tree; invalid otherwise */ 1292 if (!id) 1293 return ERR_PTR(-EINVAL); 1294 1295 return nvmem_cell_get_from_lookup(dev, id); 1296 } 1297 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1298 1299 static void devm_nvmem_cell_release(struct device *dev, void *res) 1300 { 1301 nvmem_cell_put(*(struct nvmem_cell **)res); 1302 } 1303 1304 /** 1305 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1306 * 1307 * @dev: Device that requests the nvmem cell. 1308 * @id: nvmem cell name id to get. 1309 * 1310 * Return: Will be an ERR_PTR() on error or a valid pointer 1311 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1312 * automatically once the device is freed. 1313 */ 1314 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1315 { 1316 struct nvmem_cell **ptr, *cell; 1317 1318 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1319 if (!ptr) 1320 return ERR_PTR(-ENOMEM); 1321 1322 cell = nvmem_cell_get(dev, id); 1323 if (!IS_ERR(cell)) { 1324 *ptr = cell; 1325 devres_add(dev, ptr); 1326 } else { 1327 devres_free(ptr); 1328 } 1329 1330 return cell; 1331 } 1332 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1333 1334 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1335 { 1336 struct nvmem_cell **c = res; 1337 1338 if (WARN_ON(!c || !*c)) 1339 return 0; 1340 1341 return *c == data; 1342 } 1343 1344 /** 1345 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1346 * from devm_nvmem_cell_get. 1347 * 1348 * @dev: Device that requests the nvmem cell. 1349 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1350 */ 1351 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1352 { 1353 int ret; 1354 1355 ret = devres_release(dev, devm_nvmem_cell_release, 1356 devm_nvmem_cell_match, cell); 1357 1358 WARN_ON(ret); 1359 } 1360 EXPORT_SYMBOL(devm_nvmem_cell_put); 1361 1362 /** 1363 * nvmem_cell_put() - Release previously allocated nvmem cell. 1364 * 1365 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1366 */ 1367 void nvmem_cell_put(struct nvmem_cell *cell) 1368 { 1369 struct nvmem_device *nvmem = cell->entry->nvmem; 1370 1371 if (cell->id) 1372 kfree_const(cell->id); 1373 1374 kfree(cell); 1375 __nvmem_device_put(nvmem); 1376 } 1377 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1378 1379 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1380 { 1381 u8 *p, *b; 1382 int i, extra, bit_offset = cell->bit_offset; 1383 1384 p = b = buf; 1385 if (bit_offset) { 1386 /* First shift */ 1387 *b++ >>= bit_offset; 1388 1389 /* setup rest of the bytes if any */ 1390 for (i = 1; i < cell->bytes; i++) { 1391 /* Get bits from next byte and shift them towards msb */ 1392 *p |= *b << (BITS_PER_BYTE - bit_offset); 1393 1394 p = b; 1395 *b++ >>= bit_offset; 1396 } 1397 } else { 1398 /* point to the msb */ 1399 p += cell->bytes - 1; 1400 } 1401 1402 /* result fits in less bytes */ 1403 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1404 while (--extra >= 0) 1405 *p-- = 0; 1406 1407 /* clear msb bits if any leftover in the last byte */ 1408 if (cell->nbits % BITS_PER_BYTE) 1409 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1410 } 1411 1412 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1413 struct nvmem_cell_entry *cell, 1414 void *buf, size_t *len, const char *id) 1415 { 1416 int rc; 1417 1418 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1419 1420 if (rc) 1421 return rc; 1422 1423 /* shift bits in-place */ 1424 if (cell->bit_offset || cell->nbits) 1425 nvmem_shift_read_buffer_in_place(cell, buf); 1426 1427 if (nvmem->cell_post_process) { 1428 rc = nvmem->cell_post_process(nvmem->priv, id, 1429 cell->offset, buf, cell->bytes); 1430 if (rc) 1431 return rc; 1432 } 1433 1434 if (len) 1435 *len = cell->bytes; 1436 1437 return 0; 1438 } 1439 1440 /** 1441 * nvmem_cell_read() - Read a given nvmem cell 1442 * 1443 * @cell: nvmem cell to be read. 1444 * @len: pointer to length of cell which will be populated on successful read; 1445 * can be NULL. 1446 * 1447 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1448 * buffer should be freed by the consumer with a kfree(). 1449 */ 1450 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1451 { 1452 struct nvmem_device *nvmem = cell->entry->nvmem; 1453 u8 *buf; 1454 int rc; 1455 1456 if (!nvmem) 1457 return ERR_PTR(-EINVAL); 1458 1459 buf = kzalloc(cell->entry->bytes, GFP_KERNEL); 1460 if (!buf) 1461 return ERR_PTR(-ENOMEM); 1462 1463 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id); 1464 if (rc) { 1465 kfree(buf); 1466 return ERR_PTR(rc); 1467 } 1468 1469 return buf; 1470 } 1471 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1472 1473 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1474 u8 *_buf, int len) 1475 { 1476 struct nvmem_device *nvmem = cell->nvmem; 1477 int i, rc, nbits, bit_offset = cell->bit_offset; 1478 u8 v, *p, *buf, *b, pbyte, pbits; 1479 1480 nbits = cell->nbits; 1481 buf = kzalloc(cell->bytes, GFP_KERNEL); 1482 if (!buf) 1483 return ERR_PTR(-ENOMEM); 1484 1485 memcpy(buf, _buf, len); 1486 p = b = buf; 1487 1488 if (bit_offset) { 1489 pbyte = *b; 1490 *b <<= bit_offset; 1491 1492 /* setup the first byte with lsb bits from nvmem */ 1493 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1494 if (rc) 1495 goto err; 1496 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1497 1498 /* setup rest of the byte if any */ 1499 for (i = 1; i < cell->bytes; i++) { 1500 /* Get last byte bits and shift them towards lsb */ 1501 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1502 pbyte = *b; 1503 p = b; 1504 *b <<= bit_offset; 1505 *b++ |= pbits; 1506 } 1507 } 1508 1509 /* if it's not end on byte boundary */ 1510 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1511 /* setup the last byte with msb bits from nvmem */ 1512 rc = nvmem_reg_read(nvmem, 1513 cell->offset + cell->bytes - 1, &v, 1); 1514 if (rc) 1515 goto err; 1516 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1517 1518 } 1519 1520 return buf; 1521 err: 1522 kfree(buf); 1523 return ERR_PTR(rc); 1524 } 1525 1526 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1527 { 1528 struct nvmem_device *nvmem = cell->nvmem; 1529 int rc; 1530 1531 if (!nvmem || nvmem->read_only || 1532 (cell->bit_offset == 0 && len != cell->bytes)) 1533 return -EINVAL; 1534 1535 if (cell->bit_offset || cell->nbits) { 1536 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1537 if (IS_ERR(buf)) 1538 return PTR_ERR(buf); 1539 } 1540 1541 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1542 1543 /* free the tmp buffer */ 1544 if (cell->bit_offset || cell->nbits) 1545 kfree(buf); 1546 1547 if (rc) 1548 return rc; 1549 1550 return len; 1551 } 1552 1553 /** 1554 * nvmem_cell_write() - Write to a given nvmem cell 1555 * 1556 * @cell: nvmem cell to be written. 1557 * @buf: Buffer to be written. 1558 * @len: length of buffer to be written to nvmem cell. 1559 * 1560 * Return: length of bytes written or negative on failure. 1561 */ 1562 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1563 { 1564 return __nvmem_cell_entry_write(cell->entry, buf, len); 1565 } 1566 1567 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1568 1569 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1570 void *val, size_t count) 1571 { 1572 struct nvmem_cell *cell; 1573 void *buf; 1574 size_t len; 1575 1576 cell = nvmem_cell_get(dev, cell_id); 1577 if (IS_ERR(cell)) 1578 return PTR_ERR(cell); 1579 1580 buf = nvmem_cell_read(cell, &len); 1581 if (IS_ERR(buf)) { 1582 nvmem_cell_put(cell); 1583 return PTR_ERR(buf); 1584 } 1585 if (len != count) { 1586 kfree(buf); 1587 nvmem_cell_put(cell); 1588 return -EINVAL; 1589 } 1590 memcpy(val, buf, count); 1591 kfree(buf); 1592 nvmem_cell_put(cell); 1593 1594 return 0; 1595 } 1596 1597 /** 1598 * nvmem_cell_read_u8() - Read a cell value as a u8 1599 * 1600 * @dev: Device that requests the nvmem cell. 1601 * @cell_id: Name of nvmem cell to read. 1602 * @val: pointer to output value. 1603 * 1604 * Return: 0 on success or negative errno. 1605 */ 1606 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1607 { 1608 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1609 } 1610 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1611 1612 /** 1613 * nvmem_cell_read_u16() - Read a cell value as a u16 1614 * 1615 * @dev: Device that requests the nvmem cell. 1616 * @cell_id: Name of nvmem cell to read. 1617 * @val: pointer to output value. 1618 * 1619 * Return: 0 on success or negative errno. 1620 */ 1621 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1622 { 1623 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1624 } 1625 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1626 1627 /** 1628 * nvmem_cell_read_u32() - Read a cell value as a u32 1629 * 1630 * @dev: Device that requests the nvmem cell. 1631 * @cell_id: Name of nvmem cell to read. 1632 * @val: pointer to output value. 1633 * 1634 * Return: 0 on success or negative errno. 1635 */ 1636 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1637 { 1638 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1639 } 1640 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1641 1642 /** 1643 * nvmem_cell_read_u64() - Read a cell value as a u64 1644 * 1645 * @dev: Device that requests the nvmem cell. 1646 * @cell_id: Name of nvmem cell to read. 1647 * @val: pointer to output value. 1648 * 1649 * Return: 0 on success or negative errno. 1650 */ 1651 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1652 { 1653 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1654 } 1655 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1656 1657 static const void *nvmem_cell_read_variable_common(struct device *dev, 1658 const char *cell_id, 1659 size_t max_len, size_t *len) 1660 { 1661 struct nvmem_cell *cell; 1662 int nbits; 1663 void *buf; 1664 1665 cell = nvmem_cell_get(dev, cell_id); 1666 if (IS_ERR(cell)) 1667 return cell; 1668 1669 nbits = cell->entry->nbits; 1670 buf = nvmem_cell_read(cell, len); 1671 nvmem_cell_put(cell); 1672 if (IS_ERR(buf)) 1673 return buf; 1674 1675 /* 1676 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1677 * the length of the real data. Throw away the extra junk. 1678 */ 1679 if (nbits) 1680 *len = DIV_ROUND_UP(nbits, 8); 1681 1682 if (*len > max_len) { 1683 kfree(buf); 1684 return ERR_PTR(-ERANGE); 1685 } 1686 1687 return buf; 1688 } 1689 1690 /** 1691 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1692 * 1693 * @dev: Device that requests the nvmem cell. 1694 * @cell_id: Name of nvmem cell to read. 1695 * @val: pointer to output value. 1696 * 1697 * Return: 0 on success or negative errno. 1698 */ 1699 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1700 u32 *val) 1701 { 1702 size_t len; 1703 const u8 *buf; 1704 int i; 1705 1706 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1707 if (IS_ERR(buf)) 1708 return PTR_ERR(buf); 1709 1710 /* Copy w/ implicit endian conversion */ 1711 *val = 0; 1712 for (i = 0; i < len; i++) 1713 *val |= buf[i] << (8 * i); 1714 1715 kfree(buf); 1716 1717 return 0; 1718 } 1719 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1720 1721 /** 1722 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1723 * 1724 * @dev: Device that requests the nvmem cell. 1725 * @cell_id: Name of nvmem cell to read. 1726 * @val: pointer to output value. 1727 * 1728 * Return: 0 on success or negative errno. 1729 */ 1730 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1731 u64 *val) 1732 { 1733 size_t len; 1734 const u8 *buf; 1735 int i; 1736 1737 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1738 if (IS_ERR(buf)) 1739 return PTR_ERR(buf); 1740 1741 /* Copy w/ implicit endian conversion */ 1742 *val = 0; 1743 for (i = 0; i < len; i++) 1744 *val |= (uint64_t)buf[i] << (8 * i); 1745 1746 kfree(buf); 1747 1748 return 0; 1749 } 1750 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 1751 1752 /** 1753 * nvmem_device_cell_read() - Read a given nvmem device and cell 1754 * 1755 * @nvmem: nvmem device to read from. 1756 * @info: nvmem cell info to be read. 1757 * @buf: buffer pointer which will be populated on successful read. 1758 * 1759 * Return: length of successful bytes read on success and negative 1760 * error code on error. 1761 */ 1762 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1763 struct nvmem_cell_info *info, void *buf) 1764 { 1765 struct nvmem_cell_entry cell; 1766 int rc; 1767 ssize_t len; 1768 1769 if (!nvmem) 1770 return -EINVAL; 1771 1772 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 1773 if (rc) 1774 return rc; 1775 1776 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL); 1777 if (rc) 1778 return rc; 1779 1780 return len; 1781 } 1782 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1783 1784 /** 1785 * nvmem_device_cell_write() - Write cell to a given nvmem device 1786 * 1787 * @nvmem: nvmem device to be written to. 1788 * @info: nvmem cell info to be written. 1789 * @buf: buffer to be written to cell. 1790 * 1791 * Return: length of bytes written or negative error code on failure. 1792 */ 1793 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1794 struct nvmem_cell_info *info, void *buf) 1795 { 1796 struct nvmem_cell_entry cell; 1797 int rc; 1798 1799 if (!nvmem) 1800 return -EINVAL; 1801 1802 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 1803 if (rc) 1804 return rc; 1805 1806 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 1807 } 1808 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1809 1810 /** 1811 * nvmem_device_read() - Read from a given nvmem device 1812 * 1813 * @nvmem: nvmem device to read from. 1814 * @offset: offset in nvmem device. 1815 * @bytes: number of bytes to read. 1816 * @buf: buffer pointer which will be populated on successful read. 1817 * 1818 * Return: length of successful bytes read on success and negative 1819 * error code on error. 1820 */ 1821 int nvmem_device_read(struct nvmem_device *nvmem, 1822 unsigned int offset, 1823 size_t bytes, void *buf) 1824 { 1825 int rc; 1826 1827 if (!nvmem) 1828 return -EINVAL; 1829 1830 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1831 1832 if (rc) 1833 return rc; 1834 1835 return bytes; 1836 } 1837 EXPORT_SYMBOL_GPL(nvmem_device_read); 1838 1839 /** 1840 * nvmem_device_write() - Write cell to a given nvmem device 1841 * 1842 * @nvmem: nvmem device to be written to. 1843 * @offset: offset in nvmem device. 1844 * @bytes: number of bytes to write. 1845 * @buf: buffer to be written. 1846 * 1847 * Return: length of bytes written or negative error code on failure. 1848 */ 1849 int nvmem_device_write(struct nvmem_device *nvmem, 1850 unsigned int offset, 1851 size_t bytes, void *buf) 1852 { 1853 int rc; 1854 1855 if (!nvmem) 1856 return -EINVAL; 1857 1858 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1859 1860 if (rc) 1861 return rc; 1862 1863 1864 return bytes; 1865 } 1866 EXPORT_SYMBOL_GPL(nvmem_device_write); 1867 1868 /** 1869 * nvmem_add_cell_table() - register a table of cell info entries 1870 * 1871 * @table: table of cell info entries 1872 */ 1873 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1874 { 1875 mutex_lock(&nvmem_cell_mutex); 1876 list_add_tail(&table->node, &nvmem_cell_tables); 1877 mutex_unlock(&nvmem_cell_mutex); 1878 } 1879 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1880 1881 /** 1882 * nvmem_del_cell_table() - remove a previously registered cell info table 1883 * 1884 * @table: table of cell info entries 1885 */ 1886 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1887 { 1888 mutex_lock(&nvmem_cell_mutex); 1889 list_del(&table->node); 1890 mutex_unlock(&nvmem_cell_mutex); 1891 } 1892 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1893 1894 /** 1895 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1896 * 1897 * @entries: array of cell lookup entries 1898 * @nentries: number of cell lookup entries in the array 1899 */ 1900 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1901 { 1902 int i; 1903 1904 mutex_lock(&nvmem_lookup_mutex); 1905 for (i = 0; i < nentries; i++) 1906 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1907 mutex_unlock(&nvmem_lookup_mutex); 1908 } 1909 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1910 1911 /** 1912 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1913 * entries 1914 * 1915 * @entries: array of cell lookup entries 1916 * @nentries: number of cell lookup entries in the array 1917 */ 1918 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1919 { 1920 int i; 1921 1922 mutex_lock(&nvmem_lookup_mutex); 1923 for (i = 0; i < nentries; i++) 1924 list_del(&entries[i].node); 1925 mutex_unlock(&nvmem_lookup_mutex); 1926 } 1927 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1928 1929 /** 1930 * nvmem_dev_name() - Get the name of a given nvmem device. 1931 * 1932 * @nvmem: nvmem device. 1933 * 1934 * Return: name of the nvmem device. 1935 */ 1936 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1937 { 1938 return dev_name(&nvmem->dev); 1939 } 1940 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1941 1942 static int __init nvmem_init(void) 1943 { 1944 return bus_register(&nvmem_bus_type); 1945 } 1946 1947 static void __exit nvmem_exit(void) 1948 { 1949 bus_unregister(&nvmem_bus_type); 1950 } 1951 1952 subsys_initcall(nvmem_init); 1953 module_exit(nvmem_exit); 1954 1955 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1956 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1957 MODULE_DESCRIPTION("nvmem Driver Core"); 1958 MODULE_LICENSE("GPL v2"); 1959