1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 const struct nvmem_keepout *keepout; 38 unsigned int nkeepout; 39 nvmem_reg_read_t reg_read; 40 nvmem_reg_write_t reg_write; 41 struct gpio_desc *wp_gpio; 42 void *priv; 43 }; 44 45 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 46 47 #define FLAG_COMPAT BIT(0) 48 49 struct nvmem_cell { 50 const char *name; 51 int offset; 52 int bytes; 53 int bit_offset; 54 int nbits; 55 struct device_node *np; 56 struct nvmem_device *nvmem; 57 struct list_head node; 58 }; 59 60 static DEFINE_MUTEX(nvmem_mutex); 61 static DEFINE_IDA(nvmem_ida); 62 63 static DEFINE_MUTEX(nvmem_cell_mutex); 64 static LIST_HEAD(nvmem_cell_tables); 65 66 static DEFINE_MUTEX(nvmem_lookup_mutex); 67 static LIST_HEAD(nvmem_lookup_list); 68 69 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 70 71 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 72 void *val, size_t bytes) 73 { 74 if (nvmem->reg_read) 75 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 76 77 return -EINVAL; 78 } 79 80 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 81 void *val, size_t bytes) 82 { 83 int ret; 84 85 if (nvmem->reg_write) { 86 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 87 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 88 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 89 return ret; 90 } 91 92 return -EINVAL; 93 } 94 95 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 96 unsigned int offset, void *val, 97 size_t bytes, int write) 98 { 99 100 unsigned int end = offset + bytes; 101 unsigned int kend, ksize; 102 const struct nvmem_keepout *keepout = nvmem->keepout; 103 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 104 int rc; 105 106 /* 107 * Skip all keepouts before the range being accessed. 108 * Keepouts are sorted. 109 */ 110 while ((keepout < keepoutend) && (keepout->end <= offset)) 111 keepout++; 112 113 while ((offset < end) && (keepout < keepoutend)) { 114 /* Access the valid portion before the keepout. */ 115 if (offset < keepout->start) { 116 kend = min(end, keepout->start); 117 ksize = kend - offset; 118 if (write) 119 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 120 else 121 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 122 123 if (rc) 124 return rc; 125 126 offset += ksize; 127 val += ksize; 128 } 129 130 /* 131 * Now we're aligned to the start of this keepout zone. Go 132 * through it. 133 */ 134 kend = min(end, keepout->end); 135 ksize = kend - offset; 136 if (!write) 137 memset(val, keepout->value, ksize); 138 139 val += ksize; 140 offset += ksize; 141 keepout++; 142 } 143 144 /* 145 * If we ran out of keepouts but there's still stuff to do, send it 146 * down directly 147 */ 148 if (offset < end) { 149 ksize = end - offset; 150 if (write) 151 return __nvmem_reg_write(nvmem, offset, val, ksize); 152 else 153 return __nvmem_reg_read(nvmem, offset, val, ksize); 154 } 155 156 return 0; 157 } 158 159 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 160 void *val, size_t bytes) 161 { 162 if (!nvmem->nkeepout) 163 return __nvmem_reg_read(nvmem, offset, val, bytes); 164 165 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 166 } 167 168 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 169 void *val, size_t bytes) 170 { 171 if (!nvmem->nkeepout) 172 return __nvmem_reg_write(nvmem, offset, val, bytes); 173 174 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 175 } 176 177 #ifdef CONFIG_NVMEM_SYSFS 178 static const char * const nvmem_type_str[] = { 179 [NVMEM_TYPE_UNKNOWN] = "Unknown", 180 [NVMEM_TYPE_EEPROM] = "EEPROM", 181 [NVMEM_TYPE_OTP] = "OTP", 182 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 183 }; 184 185 #ifdef CONFIG_DEBUG_LOCK_ALLOC 186 static struct lock_class_key eeprom_lock_key; 187 #endif 188 189 static ssize_t type_show(struct device *dev, 190 struct device_attribute *attr, char *buf) 191 { 192 struct nvmem_device *nvmem = to_nvmem_device(dev); 193 194 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 195 } 196 197 static DEVICE_ATTR_RO(type); 198 199 static struct attribute *nvmem_attrs[] = { 200 &dev_attr_type.attr, 201 NULL, 202 }; 203 204 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 205 struct bin_attribute *attr, char *buf, 206 loff_t pos, size_t count) 207 { 208 struct device *dev; 209 struct nvmem_device *nvmem; 210 int rc; 211 212 if (attr->private) 213 dev = attr->private; 214 else 215 dev = kobj_to_dev(kobj); 216 nvmem = to_nvmem_device(dev); 217 218 /* Stop the user from reading */ 219 if (pos >= nvmem->size) 220 return 0; 221 222 if (!IS_ALIGNED(pos, nvmem->stride)) 223 return -EINVAL; 224 225 if (count < nvmem->word_size) 226 return -EINVAL; 227 228 if (pos + count > nvmem->size) 229 count = nvmem->size - pos; 230 231 count = round_down(count, nvmem->word_size); 232 233 if (!nvmem->reg_read) 234 return -EPERM; 235 236 rc = nvmem_reg_read(nvmem, pos, buf, count); 237 238 if (rc) 239 return rc; 240 241 return count; 242 } 243 244 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 245 struct bin_attribute *attr, char *buf, 246 loff_t pos, size_t count) 247 { 248 struct device *dev; 249 struct nvmem_device *nvmem; 250 int rc; 251 252 if (attr->private) 253 dev = attr->private; 254 else 255 dev = kobj_to_dev(kobj); 256 nvmem = to_nvmem_device(dev); 257 258 /* Stop the user from writing */ 259 if (pos >= nvmem->size) 260 return -EFBIG; 261 262 if (!IS_ALIGNED(pos, nvmem->stride)) 263 return -EINVAL; 264 265 if (count < nvmem->word_size) 266 return -EINVAL; 267 268 if (pos + count > nvmem->size) 269 count = nvmem->size - pos; 270 271 count = round_down(count, nvmem->word_size); 272 273 if (!nvmem->reg_write) 274 return -EPERM; 275 276 rc = nvmem_reg_write(nvmem, pos, buf, count); 277 278 if (rc) 279 return rc; 280 281 return count; 282 } 283 284 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 285 { 286 umode_t mode = 0400; 287 288 if (!nvmem->root_only) 289 mode |= 0044; 290 291 if (!nvmem->read_only) 292 mode |= 0200; 293 294 if (!nvmem->reg_write) 295 mode &= ~0200; 296 297 if (!nvmem->reg_read) 298 mode &= ~0444; 299 300 return mode; 301 } 302 303 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 304 struct bin_attribute *attr, int i) 305 { 306 struct device *dev = kobj_to_dev(kobj); 307 struct nvmem_device *nvmem = to_nvmem_device(dev); 308 309 return nvmem_bin_attr_get_umode(nvmem); 310 } 311 312 /* default read/write permissions */ 313 static struct bin_attribute bin_attr_rw_nvmem = { 314 .attr = { 315 .name = "nvmem", 316 .mode = 0644, 317 }, 318 .read = bin_attr_nvmem_read, 319 .write = bin_attr_nvmem_write, 320 }; 321 322 static struct bin_attribute *nvmem_bin_attributes[] = { 323 &bin_attr_rw_nvmem, 324 NULL, 325 }; 326 327 static const struct attribute_group nvmem_bin_group = { 328 .bin_attrs = nvmem_bin_attributes, 329 .attrs = nvmem_attrs, 330 .is_bin_visible = nvmem_bin_attr_is_visible, 331 }; 332 333 static const struct attribute_group *nvmem_dev_groups[] = { 334 &nvmem_bin_group, 335 NULL, 336 }; 337 338 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 339 .attr = { 340 .name = "eeprom", 341 }, 342 .read = bin_attr_nvmem_read, 343 .write = bin_attr_nvmem_write, 344 }; 345 346 /* 347 * nvmem_setup_compat() - Create an additional binary entry in 348 * drivers sys directory, to be backwards compatible with the older 349 * drivers/misc/eeprom drivers. 350 */ 351 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 352 const struct nvmem_config *config) 353 { 354 int rval; 355 356 if (!config->compat) 357 return 0; 358 359 if (!config->base_dev) 360 return -EINVAL; 361 362 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 363 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 364 nvmem->eeprom.size = nvmem->size; 365 #ifdef CONFIG_DEBUG_LOCK_ALLOC 366 nvmem->eeprom.attr.key = &eeprom_lock_key; 367 #endif 368 nvmem->eeprom.private = &nvmem->dev; 369 nvmem->base_dev = config->base_dev; 370 371 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 372 if (rval) { 373 dev_err(&nvmem->dev, 374 "Failed to create eeprom binary file %d\n", rval); 375 return rval; 376 } 377 378 nvmem->flags |= FLAG_COMPAT; 379 380 return 0; 381 } 382 383 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 384 const struct nvmem_config *config) 385 { 386 if (config->compat) 387 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 388 } 389 390 #else /* CONFIG_NVMEM_SYSFS */ 391 392 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 393 const struct nvmem_config *config) 394 { 395 return -ENOSYS; 396 } 397 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 398 const struct nvmem_config *config) 399 { 400 } 401 402 #endif /* CONFIG_NVMEM_SYSFS */ 403 404 static void nvmem_release(struct device *dev) 405 { 406 struct nvmem_device *nvmem = to_nvmem_device(dev); 407 408 ida_free(&nvmem_ida, nvmem->id); 409 gpiod_put(nvmem->wp_gpio); 410 kfree(nvmem); 411 } 412 413 static const struct device_type nvmem_provider_type = { 414 .release = nvmem_release, 415 }; 416 417 static struct bus_type nvmem_bus_type = { 418 .name = "nvmem", 419 }; 420 421 static void nvmem_cell_drop(struct nvmem_cell *cell) 422 { 423 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 424 mutex_lock(&nvmem_mutex); 425 list_del(&cell->node); 426 mutex_unlock(&nvmem_mutex); 427 of_node_put(cell->np); 428 kfree_const(cell->name); 429 kfree(cell); 430 } 431 432 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 433 { 434 struct nvmem_cell *cell, *p; 435 436 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 437 nvmem_cell_drop(cell); 438 } 439 440 static void nvmem_cell_add(struct nvmem_cell *cell) 441 { 442 mutex_lock(&nvmem_mutex); 443 list_add_tail(&cell->node, &cell->nvmem->cells); 444 mutex_unlock(&nvmem_mutex); 445 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 446 } 447 448 static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem, 449 const struct nvmem_cell_info *info, 450 struct nvmem_cell *cell) 451 { 452 cell->nvmem = nvmem; 453 cell->offset = info->offset; 454 cell->bytes = info->bytes; 455 cell->name = info->name; 456 457 cell->bit_offset = info->bit_offset; 458 cell->nbits = info->nbits; 459 460 if (cell->nbits) 461 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 462 BITS_PER_BYTE); 463 464 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 465 dev_err(&nvmem->dev, 466 "cell %s unaligned to nvmem stride %d\n", 467 cell->name ?: "<unknown>", nvmem->stride); 468 return -EINVAL; 469 } 470 471 return 0; 472 } 473 474 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 475 const struct nvmem_cell_info *info, 476 struct nvmem_cell *cell) 477 { 478 int err; 479 480 err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell); 481 if (err) 482 return err; 483 484 cell->name = kstrdup_const(info->name, GFP_KERNEL); 485 if (!cell->name) 486 return -ENOMEM; 487 488 return 0; 489 } 490 491 /** 492 * nvmem_add_cells() - Add cell information to an nvmem device 493 * 494 * @nvmem: nvmem device to add cells to. 495 * @info: nvmem cell info to add to the device 496 * @ncells: number of cells in info 497 * 498 * Return: 0 or negative error code on failure. 499 */ 500 static int nvmem_add_cells(struct nvmem_device *nvmem, 501 const struct nvmem_cell_info *info, 502 int ncells) 503 { 504 struct nvmem_cell **cells; 505 int i, rval; 506 507 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 508 if (!cells) 509 return -ENOMEM; 510 511 for (i = 0; i < ncells; i++) { 512 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 513 if (!cells[i]) { 514 rval = -ENOMEM; 515 goto err; 516 } 517 518 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 519 if (rval) { 520 kfree(cells[i]); 521 goto err; 522 } 523 524 nvmem_cell_add(cells[i]); 525 } 526 527 /* remove tmp array */ 528 kfree(cells); 529 530 return 0; 531 err: 532 while (i--) 533 nvmem_cell_drop(cells[i]); 534 535 kfree(cells); 536 537 return rval; 538 } 539 540 /** 541 * nvmem_register_notifier() - Register a notifier block for nvmem events. 542 * 543 * @nb: notifier block to be called on nvmem events. 544 * 545 * Return: 0 on success, negative error number on failure. 546 */ 547 int nvmem_register_notifier(struct notifier_block *nb) 548 { 549 return blocking_notifier_chain_register(&nvmem_notifier, nb); 550 } 551 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 552 553 /** 554 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 555 * 556 * @nb: notifier block to be unregistered. 557 * 558 * Return: 0 on success, negative error number on failure. 559 */ 560 int nvmem_unregister_notifier(struct notifier_block *nb) 561 { 562 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 563 } 564 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 565 566 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 567 { 568 const struct nvmem_cell_info *info; 569 struct nvmem_cell_table *table; 570 struct nvmem_cell *cell; 571 int rval = 0, i; 572 573 mutex_lock(&nvmem_cell_mutex); 574 list_for_each_entry(table, &nvmem_cell_tables, node) { 575 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 576 for (i = 0; i < table->ncells; i++) { 577 info = &table->cells[i]; 578 579 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 580 if (!cell) { 581 rval = -ENOMEM; 582 goto out; 583 } 584 585 rval = nvmem_cell_info_to_nvmem_cell(nvmem, 586 info, 587 cell); 588 if (rval) { 589 kfree(cell); 590 goto out; 591 } 592 593 nvmem_cell_add(cell); 594 } 595 } 596 } 597 598 out: 599 mutex_unlock(&nvmem_cell_mutex); 600 return rval; 601 } 602 603 static struct nvmem_cell * 604 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 605 { 606 struct nvmem_cell *iter, *cell = NULL; 607 608 mutex_lock(&nvmem_mutex); 609 list_for_each_entry(iter, &nvmem->cells, node) { 610 if (strcmp(cell_id, iter->name) == 0) { 611 cell = iter; 612 break; 613 } 614 } 615 mutex_unlock(&nvmem_mutex); 616 617 return cell; 618 } 619 620 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 621 { 622 unsigned int cur = 0; 623 const struct nvmem_keepout *keepout = nvmem->keepout; 624 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 625 626 while (keepout < keepoutend) { 627 /* Ensure keepouts are sorted and don't overlap. */ 628 if (keepout->start < cur) { 629 dev_err(&nvmem->dev, 630 "Keepout regions aren't sorted or overlap.\n"); 631 632 return -ERANGE; 633 } 634 635 if (keepout->end < keepout->start) { 636 dev_err(&nvmem->dev, 637 "Invalid keepout region.\n"); 638 639 return -EINVAL; 640 } 641 642 /* 643 * Validate keepouts (and holes between) don't violate 644 * word_size constraints. 645 */ 646 if ((keepout->end - keepout->start < nvmem->word_size) || 647 ((keepout->start != cur) && 648 (keepout->start - cur < nvmem->word_size))) { 649 650 dev_err(&nvmem->dev, 651 "Keepout regions violate word_size constraints.\n"); 652 653 return -ERANGE; 654 } 655 656 /* Validate keepouts don't violate stride (alignment). */ 657 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 658 !IS_ALIGNED(keepout->end, nvmem->stride)) { 659 660 dev_err(&nvmem->dev, 661 "Keepout regions violate stride.\n"); 662 663 return -EINVAL; 664 } 665 666 cur = keepout->end; 667 keepout++; 668 } 669 670 return 0; 671 } 672 673 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 674 { 675 struct device_node *parent, *child; 676 struct device *dev = &nvmem->dev; 677 struct nvmem_cell *cell; 678 const __be32 *addr; 679 int len; 680 681 parent = dev->of_node; 682 683 for_each_child_of_node(parent, child) { 684 addr = of_get_property(child, "reg", &len); 685 if (!addr) 686 continue; 687 if (len < 2 * sizeof(u32)) { 688 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 689 return -EINVAL; 690 } 691 692 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 693 if (!cell) 694 return -ENOMEM; 695 696 cell->nvmem = nvmem; 697 cell->np = of_node_get(child); 698 cell->offset = be32_to_cpup(addr++); 699 cell->bytes = be32_to_cpup(addr); 700 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 701 702 addr = of_get_property(child, "bits", &len); 703 if (addr && len == (2 * sizeof(u32))) { 704 cell->bit_offset = be32_to_cpup(addr++); 705 cell->nbits = be32_to_cpup(addr); 706 } 707 708 if (cell->nbits) 709 cell->bytes = DIV_ROUND_UP( 710 cell->nbits + cell->bit_offset, 711 BITS_PER_BYTE); 712 713 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 714 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 715 cell->name, nvmem->stride); 716 /* Cells already added will be freed later. */ 717 kfree_const(cell->name); 718 of_node_put(cell->np); 719 kfree(cell); 720 return -EINVAL; 721 } 722 723 nvmem_cell_add(cell); 724 } 725 726 return 0; 727 } 728 729 /** 730 * nvmem_register() - Register a nvmem device for given nvmem_config. 731 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 732 * 733 * @config: nvmem device configuration with which nvmem device is created. 734 * 735 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 736 * on success. 737 */ 738 739 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 740 { 741 struct nvmem_device *nvmem; 742 int rval; 743 744 if (!config->dev) 745 return ERR_PTR(-EINVAL); 746 747 if (!config->reg_read && !config->reg_write) 748 return ERR_PTR(-EINVAL); 749 750 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 751 if (!nvmem) 752 return ERR_PTR(-ENOMEM); 753 754 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 755 if (rval < 0) { 756 kfree(nvmem); 757 return ERR_PTR(rval); 758 } 759 760 if (config->wp_gpio) 761 nvmem->wp_gpio = config->wp_gpio; 762 else 763 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 764 GPIOD_OUT_HIGH); 765 if (IS_ERR(nvmem->wp_gpio)) { 766 ida_free(&nvmem_ida, nvmem->id); 767 rval = PTR_ERR(nvmem->wp_gpio); 768 kfree(nvmem); 769 return ERR_PTR(rval); 770 } 771 772 kref_init(&nvmem->refcnt); 773 INIT_LIST_HEAD(&nvmem->cells); 774 775 nvmem->id = rval; 776 nvmem->owner = config->owner; 777 if (!nvmem->owner && config->dev->driver) 778 nvmem->owner = config->dev->driver->owner; 779 nvmem->stride = config->stride ?: 1; 780 nvmem->word_size = config->word_size ?: 1; 781 nvmem->size = config->size; 782 nvmem->dev.type = &nvmem_provider_type; 783 nvmem->dev.bus = &nvmem_bus_type; 784 nvmem->dev.parent = config->dev; 785 nvmem->root_only = config->root_only; 786 nvmem->priv = config->priv; 787 nvmem->type = config->type; 788 nvmem->reg_read = config->reg_read; 789 nvmem->reg_write = config->reg_write; 790 nvmem->keepout = config->keepout; 791 nvmem->nkeepout = config->nkeepout; 792 if (!config->no_of_node) 793 nvmem->dev.of_node = config->dev->of_node; 794 795 switch (config->id) { 796 case NVMEM_DEVID_NONE: 797 dev_set_name(&nvmem->dev, "%s", config->name); 798 break; 799 case NVMEM_DEVID_AUTO: 800 dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 801 break; 802 default: 803 dev_set_name(&nvmem->dev, "%s%d", 804 config->name ? : "nvmem", 805 config->name ? config->id : nvmem->id); 806 break; 807 } 808 809 nvmem->read_only = device_property_present(config->dev, "read-only") || 810 config->read_only || !nvmem->reg_write; 811 812 #ifdef CONFIG_NVMEM_SYSFS 813 nvmem->dev.groups = nvmem_dev_groups; 814 #endif 815 816 if (nvmem->nkeepout) { 817 rval = nvmem_validate_keepouts(nvmem); 818 if (rval) 819 goto err_put_device; 820 } 821 822 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 823 824 rval = device_register(&nvmem->dev); 825 if (rval) 826 goto err_put_device; 827 828 if (config->compat) { 829 rval = nvmem_sysfs_setup_compat(nvmem, config); 830 if (rval) 831 goto err_device_del; 832 } 833 834 if (config->cells) { 835 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 836 if (rval) 837 goto err_teardown_compat; 838 } 839 840 rval = nvmem_add_cells_from_table(nvmem); 841 if (rval) 842 goto err_remove_cells; 843 844 rval = nvmem_add_cells_from_of(nvmem); 845 if (rval) 846 goto err_remove_cells; 847 848 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 849 850 return nvmem; 851 852 err_remove_cells: 853 nvmem_device_remove_all_cells(nvmem); 854 err_teardown_compat: 855 if (config->compat) 856 nvmem_sysfs_remove_compat(nvmem, config); 857 err_device_del: 858 device_del(&nvmem->dev); 859 err_put_device: 860 put_device(&nvmem->dev); 861 862 return ERR_PTR(rval); 863 } 864 EXPORT_SYMBOL_GPL(nvmem_register); 865 866 static void nvmem_device_release(struct kref *kref) 867 { 868 struct nvmem_device *nvmem; 869 870 nvmem = container_of(kref, struct nvmem_device, refcnt); 871 872 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 873 874 if (nvmem->flags & FLAG_COMPAT) 875 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 876 877 nvmem_device_remove_all_cells(nvmem); 878 device_unregister(&nvmem->dev); 879 } 880 881 /** 882 * nvmem_unregister() - Unregister previously registered nvmem device 883 * 884 * @nvmem: Pointer to previously registered nvmem device. 885 */ 886 void nvmem_unregister(struct nvmem_device *nvmem) 887 { 888 kref_put(&nvmem->refcnt, nvmem_device_release); 889 } 890 EXPORT_SYMBOL_GPL(nvmem_unregister); 891 892 static void devm_nvmem_release(struct device *dev, void *res) 893 { 894 nvmem_unregister(*(struct nvmem_device **)res); 895 } 896 897 /** 898 * devm_nvmem_register() - Register a managed nvmem device for given 899 * nvmem_config. 900 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 901 * 902 * @dev: Device that uses the nvmem device. 903 * @config: nvmem device configuration with which nvmem device is created. 904 * 905 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 906 * on success. 907 */ 908 struct nvmem_device *devm_nvmem_register(struct device *dev, 909 const struct nvmem_config *config) 910 { 911 struct nvmem_device **ptr, *nvmem; 912 913 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 914 if (!ptr) 915 return ERR_PTR(-ENOMEM); 916 917 nvmem = nvmem_register(config); 918 919 if (!IS_ERR(nvmem)) { 920 *ptr = nvmem; 921 devres_add(dev, ptr); 922 } else { 923 devres_free(ptr); 924 } 925 926 return nvmem; 927 } 928 EXPORT_SYMBOL_GPL(devm_nvmem_register); 929 930 static int devm_nvmem_match(struct device *dev, void *res, void *data) 931 { 932 struct nvmem_device **r = res; 933 934 return *r == data; 935 } 936 937 /** 938 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 939 * device. 940 * 941 * @dev: Device that uses the nvmem device. 942 * @nvmem: Pointer to previously registered nvmem device. 943 * 944 * Return: Will be negative on error or zero on success. 945 */ 946 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 947 { 948 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 949 } 950 EXPORT_SYMBOL(devm_nvmem_unregister); 951 952 static struct nvmem_device *__nvmem_device_get(void *data, 953 int (*match)(struct device *dev, const void *data)) 954 { 955 struct nvmem_device *nvmem = NULL; 956 struct device *dev; 957 958 mutex_lock(&nvmem_mutex); 959 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 960 if (dev) 961 nvmem = to_nvmem_device(dev); 962 mutex_unlock(&nvmem_mutex); 963 if (!nvmem) 964 return ERR_PTR(-EPROBE_DEFER); 965 966 if (!try_module_get(nvmem->owner)) { 967 dev_err(&nvmem->dev, 968 "could not increase module refcount for cell %s\n", 969 nvmem_dev_name(nvmem)); 970 971 put_device(&nvmem->dev); 972 return ERR_PTR(-EINVAL); 973 } 974 975 kref_get(&nvmem->refcnt); 976 977 return nvmem; 978 } 979 980 static void __nvmem_device_put(struct nvmem_device *nvmem) 981 { 982 put_device(&nvmem->dev); 983 module_put(nvmem->owner); 984 kref_put(&nvmem->refcnt, nvmem_device_release); 985 } 986 987 #if IS_ENABLED(CONFIG_OF) 988 /** 989 * of_nvmem_device_get() - Get nvmem device from a given id 990 * 991 * @np: Device tree node that uses the nvmem device. 992 * @id: nvmem name from nvmem-names property. 993 * 994 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 995 * on success. 996 */ 997 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 998 { 999 1000 struct device_node *nvmem_np; 1001 struct nvmem_device *nvmem; 1002 int index = 0; 1003 1004 if (id) 1005 index = of_property_match_string(np, "nvmem-names", id); 1006 1007 nvmem_np = of_parse_phandle(np, "nvmem", index); 1008 if (!nvmem_np) 1009 return ERR_PTR(-ENOENT); 1010 1011 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1012 of_node_put(nvmem_np); 1013 return nvmem; 1014 } 1015 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1016 #endif 1017 1018 /** 1019 * nvmem_device_get() - Get nvmem device from a given id 1020 * 1021 * @dev: Device that uses the nvmem device. 1022 * @dev_name: name of the requested nvmem device. 1023 * 1024 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1025 * on success. 1026 */ 1027 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1028 { 1029 if (dev->of_node) { /* try dt first */ 1030 struct nvmem_device *nvmem; 1031 1032 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1033 1034 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1035 return nvmem; 1036 1037 } 1038 1039 return __nvmem_device_get((void *)dev_name, device_match_name); 1040 } 1041 EXPORT_SYMBOL_GPL(nvmem_device_get); 1042 1043 /** 1044 * nvmem_device_find() - Find nvmem device with matching function 1045 * 1046 * @data: Data to pass to match function 1047 * @match: Callback function to check device 1048 * 1049 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1050 * on success. 1051 */ 1052 struct nvmem_device *nvmem_device_find(void *data, 1053 int (*match)(struct device *dev, const void *data)) 1054 { 1055 return __nvmem_device_get(data, match); 1056 } 1057 EXPORT_SYMBOL_GPL(nvmem_device_find); 1058 1059 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1060 { 1061 struct nvmem_device **nvmem = res; 1062 1063 if (WARN_ON(!nvmem || !*nvmem)) 1064 return 0; 1065 1066 return *nvmem == data; 1067 } 1068 1069 static void devm_nvmem_device_release(struct device *dev, void *res) 1070 { 1071 nvmem_device_put(*(struct nvmem_device **)res); 1072 } 1073 1074 /** 1075 * devm_nvmem_device_put() - put alredy got nvmem device 1076 * 1077 * @dev: Device that uses the nvmem device. 1078 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1079 * that needs to be released. 1080 */ 1081 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1082 { 1083 int ret; 1084 1085 ret = devres_release(dev, devm_nvmem_device_release, 1086 devm_nvmem_device_match, nvmem); 1087 1088 WARN_ON(ret); 1089 } 1090 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1091 1092 /** 1093 * nvmem_device_put() - put alredy got nvmem device 1094 * 1095 * @nvmem: pointer to nvmem device that needs to be released. 1096 */ 1097 void nvmem_device_put(struct nvmem_device *nvmem) 1098 { 1099 __nvmem_device_put(nvmem); 1100 } 1101 EXPORT_SYMBOL_GPL(nvmem_device_put); 1102 1103 /** 1104 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 1105 * 1106 * @dev: Device that requests the nvmem device. 1107 * @id: name id for the requested nvmem device. 1108 * 1109 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 1110 * on success. The nvmem_cell will be freed by the automatically once the 1111 * device is freed. 1112 */ 1113 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1114 { 1115 struct nvmem_device **ptr, *nvmem; 1116 1117 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1118 if (!ptr) 1119 return ERR_PTR(-ENOMEM); 1120 1121 nvmem = nvmem_device_get(dev, id); 1122 if (!IS_ERR(nvmem)) { 1123 *ptr = nvmem; 1124 devres_add(dev, ptr); 1125 } else { 1126 devres_free(ptr); 1127 } 1128 1129 return nvmem; 1130 } 1131 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1132 1133 static struct nvmem_cell * 1134 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1135 { 1136 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1137 struct nvmem_cell_lookup *lookup; 1138 struct nvmem_device *nvmem; 1139 const char *dev_id; 1140 1141 if (!dev) 1142 return ERR_PTR(-EINVAL); 1143 1144 dev_id = dev_name(dev); 1145 1146 mutex_lock(&nvmem_lookup_mutex); 1147 1148 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1149 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1150 (strcmp(lookup->con_id, con_id) == 0)) { 1151 /* This is the right entry. */ 1152 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1153 device_match_name); 1154 if (IS_ERR(nvmem)) { 1155 /* Provider may not be registered yet. */ 1156 cell = ERR_CAST(nvmem); 1157 break; 1158 } 1159 1160 cell = nvmem_find_cell_by_name(nvmem, 1161 lookup->cell_name); 1162 if (!cell) { 1163 __nvmem_device_put(nvmem); 1164 cell = ERR_PTR(-ENOENT); 1165 } 1166 break; 1167 } 1168 } 1169 1170 mutex_unlock(&nvmem_lookup_mutex); 1171 return cell; 1172 } 1173 1174 #if IS_ENABLED(CONFIG_OF) 1175 static struct nvmem_cell * 1176 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 1177 { 1178 struct nvmem_cell *iter, *cell = NULL; 1179 1180 mutex_lock(&nvmem_mutex); 1181 list_for_each_entry(iter, &nvmem->cells, node) { 1182 if (np == iter->np) { 1183 cell = iter; 1184 break; 1185 } 1186 } 1187 mutex_unlock(&nvmem_mutex); 1188 1189 return cell; 1190 } 1191 1192 /** 1193 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1194 * 1195 * @np: Device tree node that uses the nvmem cell. 1196 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1197 * for the cell at index 0 (the lone cell with no accompanying 1198 * nvmem-cell-names property). 1199 * 1200 * Return: Will be an ERR_PTR() on error or a valid pointer 1201 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1202 * nvmem_cell_put(). 1203 */ 1204 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1205 { 1206 struct device_node *cell_np, *nvmem_np; 1207 struct nvmem_device *nvmem; 1208 struct nvmem_cell *cell; 1209 int index = 0; 1210 1211 /* if cell name exists, find index to the name */ 1212 if (id) 1213 index = of_property_match_string(np, "nvmem-cell-names", id); 1214 1215 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1216 if (!cell_np) 1217 return ERR_PTR(-ENOENT); 1218 1219 nvmem_np = of_get_next_parent(cell_np); 1220 if (!nvmem_np) 1221 return ERR_PTR(-EINVAL); 1222 1223 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1224 of_node_put(nvmem_np); 1225 if (IS_ERR(nvmem)) 1226 return ERR_CAST(nvmem); 1227 1228 cell = nvmem_find_cell_by_node(nvmem, cell_np); 1229 if (!cell) { 1230 __nvmem_device_put(nvmem); 1231 return ERR_PTR(-ENOENT); 1232 } 1233 1234 return cell; 1235 } 1236 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1237 #endif 1238 1239 /** 1240 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1241 * 1242 * @dev: Device that requests the nvmem cell. 1243 * @id: nvmem cell name to get (this corresponds with the name from the 1244 * nvmem-cell-names property for DT systems and with the con_id from 1245 * the lookup entry for non-DT systems). 1246 * 1247 * Return: Will be an ERR_PTR() on error or a valid pointer 1248 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1249 * nvmem_cell_put(). 1250 */ 1251 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1252 { 1253 struct nvmem_cell *cell; 1254 1255 if (dev->of_node) { /* try dt first */ 1256 cell = of_nvmem_cell_get(dev->of_node, id); 1257 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1258 return cell; 1259 } 1260 1261 /* NULL cell id only allowed for device tree; invalid otherwise */ 1262 if (!id) 1263 return ERR_PTR(-EINVAL); 1264 1265 return nvmem_cell_get_from_lookup(dev, id); 1266 } 1267 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1268 1269 static void devm_nvmem_cell_release(struct device *dev, void *res) 1270 { 1271 nvmem_cell_put(*(struct nvmem_cell **)res); 1272 } 1273 1274 /** 1275 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1276 * 1277 * @dev: Device that requests the nvmem cell. 1278 * @id: nvmem cell name id to get. 1279 * 1280 * Return: Will be an ERR_PTR() on error or a valid pointer 1281 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1282 * automatically once the device is freed. 1283 */ 1284 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1285 { 1286 struct nvmem_cell **ptr, *cell; 1287 1288 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1289 if (!ptr) 1290 return ERR_PTR(-ENOMEM); 1291 1292 cell = nvmem_cell_get(dev, id); 1293 if (!IS_ERR(cell)) { 1294 *ptr = cell; 1295 devres_add(dev, ptr); 1296 } else { 1297 devres_free(ptr); 1298 } 1299 1300 return cell; 1301 } 1302 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1303 1304 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1305 { 1306 struct nvmem_cell **c = res; 1307 1308 if (WARN_ON(!c || !*c)) 1309 return 0; 1310 1311 return *c == data; 1312 } 1313 1314 /** 1315 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1316 * from devm_nvmem_cell_get. 1317 * 1318 * @dev: Device that requests the nvmem cell. 1319 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1320 */ 1321 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1322 { 1323 int ret; 1324 1325 ret = devres_release(dev, devm_nvmem_cell_release, 1326 devm_nvmem_cell_match, cell); 1327 1328 WARN_ON(ret); 1329 } 1330 EXPORT_SYMBOL(devm_nvmem_cell_put); 1331 1332 /** 1333 * nvmem_cell_put() - Release previously allocated nvmem cell. 1334 * 1335 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1336 */ 1337 void nvmem_cell_put(struct nvmem_cell *cell) 1338 { 1339 struct nvmem_device *nvmem = cell->nvmem; 1340 1341 __nvmem_device_put(nvmem); 1342 } 1343 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1344 1345 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 1346 { 1347 u8 *p, *b; 1348 int i, extra, bit_offset = cell->bit_offset; 1349 1350 p = b = buf; 1351 if (bit_offset) { 1352 /* First shift */ 1353 *b++ >>= bit_offset; 1354 1355 /* setup rest of the bytes if any */ 1356 for (i = 1; i < cell->bytes; i++) { 1357 /* Get bits from next byte and shift them towards msb */ 1358 *p |= *b << (BITS_PER_BYTE - bit_offset); 1359 1360 p = b; 1361 *b++ >>= bit_offset; 1362 } 1363 } else { 1364 /* point to the msb */ 1365 p += cell->bytes - 1; 1366 } 1367 1368 /* result fits in less bytes */ 1369 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1370 while (--extra >= 0) 1371 *p-- = 0; 1372 1373 /* clear msb bits if any leftover in the last byte */ 1374 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 1375 } 1376 1377 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1378 struct nvmem_cell *cell, 1379 void *buf, size_t *len) 1380 { 1381 int rc; 1382 1383 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1384 1385 if (rc) 1386 return rc; 1387 1388 /* shift bits in-place */ 1389 if (cell->bit_offset || cell->nbits) 1390 nvmem_shift_read_buffer_in_place(cell, buf); 1391 1392 if (len) 1393 *len = cell->bytes; 1394 1395 return 0; 1396 } 1397 1398 /** 1399 * nvmem_cell_read() - Read a given nvmem cell 1400 * 1401 * @cell: nvmem cell to be read. 1402 * @len: pointer to length of cell which will be populated on successful read; 1403 * can be NULL. 1404 * 1405 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1406 * buffer should be freed by the consumer with a kfree(). 1407 */ 1408 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1409 { 1410 struct nvmem_device *nvmem = cell->nvmem; 1411 u8 *buf; 1412 int rc; 1413 1414 if (!nvmem) 1415 return ERR_PTR(-EINVAL); 1416 1417 buf = kzalloc(cell->bytes, GFP_KERNEL); 1418 if (!buf) 1419 return ERR_PTR(-ENOMEM); 1420 1421 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1422 if (rc) { 1423 kfree(buf); 1424 return ERR_PTR(rc); 1425 } 1426 1427 return buf; 1428 } 1429 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1430 1431 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1432 u8 *_buf, int len) 1433 { 1434 struct nvmem_device *nvmem = cell->nvmem; 1435 int i, rc, nbits, bit_offset = cell->bit_offset; 1436 u8 v, *p, *buf, *b, pbyte, pbits; 1437 1438 nbits = cell->nbits; 1439 buf = kzalloc(cell->bytes, GFP_KERNEL); 1440 if (!buf) 1441 return ERR_PTR(-ENOMEM); 1442 1443 memcpy(buf, _buf, len); 1444 p = b = buf; 1445 1446 if (bit_offset) { 1447 pbyte = *b; 1448 *b <<= bit_offset; 1449 1450 /* setup the first byte with lsb bits from nvmem */ 1451 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1452 if (rc) 1453 goto err; 1454 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1455 1456 /* setup rest of the byte if any */ 1457 for (i = 1; i < cell->bytes; i++) { 1458 /* Get last byte bits and shift them towards lsb */ 1459 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1460 pbyte = *b; 1461 p = b; 1462 *b <<= bit_offset; 1463 *b++ |= pbits; 1464 } 1465 } 1466 1467 /* if it's not end on byte boundary */ 1468 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1469 /* setup the last byte with msb bits from nvmem */ 1470 rc = nvmem_reg_read(nvmem, 1471 cell->offset + cell->bytes - 1, &v, 1); 1472 if (rc) 1473 goto err; 1474 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1475 1476 } 1477 1478 return buf; 1479 err: 1480 kfree(buf); 1481 return ERR_PTR(rc); 1482 } 1483 1484 /** 1485 * nvmem_cell_write() - Write to a given nvmem cell 1486 * 1487 * @cell: nvmem cell to be written. 1488 * @buf: Buffer to be written. 1489 * @len: length of buffer to be written to nvmem cell. 1490 * 1491 * Return: length of bytes written or negative on failure. 1492 */ 1493 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1494 { 1495 struct nvmem_device *nvmem = cell->nvmem; 1496 int rc; 1497 1498 if (!nvmem || nvmem->read_only || 1499 (cell->bit_offset == 0 && len != cell->bytes)) 1500 return -EINVAL; 1501 1502 if (cell->bit_offset || cell->nbits) { 1503 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1504 if (IS_ERR(buf)) 1505 return PTR_ERR(buf); 1506 } 1507 1508 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1509 1510 /* free the tmp buffer */ 1511 if (cell->bit_offset || cell->nbits) 1512 kfree(buf); 1513 1514 if (rc) 1515 return rc; 1516 1517 return len; 1518 } 1519 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1520 1521 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1522 void *val, size_t count) 1523 { 1524 struct nvmem_cell *cell; 1525 void *buf; 1526 size_t len; 1527 1528 cell = nvmem_cell_get(dev, cell_id); 1529 if (IS_ERR(cell)) 1530 return PTR_ERR(cell); 1531 1532 buf = nvmem_cell_read(cell, &len); 1533 if (IS_ERR(buf)) { 1534 nvmem_cell_put(cell); 1535 return PTR_ERR(buf); 1536 } 1537 if (len != count) { 1538 kfree(buf); 1539 nvmem_cell_put(cell); 1540 return -EINVAL; 1541 } 1542 memcpy(val, buf, count); 1543 kfree(buf); 1544 nvmem_cell_put(cell); 1545 1546 return 0; 1547 } 1548 1549 /** 1550 * nvmem_cell_read_u8() - Read a cell value as a u8 1551 * 1552 * @dev: Device that requests the nvmem cell. 1553 * @cell_id: Name of nvmem cell to read. 1554 * @val: pointer to output value. 1555 * 1556 * Return: 0 on success or negative errno. 1557 */ 1558 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1559 { 1560 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1561 } 1562 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1563 1564 /** 1565 * nvmem_cell_read_u16() - Read a cell value as a u16 1566 * 1567 * @dev: Device that requests the nvmem cell. 1568 * @cell_id: Name of nvmem cell to read. 1569 * @val: pointer to output value. 1570 * 1571 * Return: 0 on success or negative errno. 1572 */ 1573 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1574 { 1575 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1576 } 1577 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1578 1579 /** 1580 * nvmem_cell_read_u32() - Read a cell value as a u32 1581 * 1582 * @dev: Device that requests the nvmem cell. 1583 * @cell_id: Name of nvmem cell to read. 1584 * @val: pointer to output value. 1585 * 1586 * Return: 0 on success or negative errno. 1587 */ 1588 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1589 { 1590 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1591 } 1592 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1593 1594 /** 1595 * nvmem_cell_read_u64() - Read a cell value as a u64 1596 * 1597 * @dev: Device that requests the nvmem cell. 1598 * @cell_id: Name of nvmem cell to read. 1599 * @val: pointer to output value. 1600 * 1601 * Return: 0 on success or negative errno. 1602 */ 1603 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1604 { 1605 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1606 } 1607 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1608 1609 /** 1610 * nvmem_device_cell_read() - Read a given nvmem device and cell 1611 * 1612 * @nvmem: nvmem device to read from. 1613 * @info: nvmem cell info to be read. 1614 * @buf: buffer pointer which will be populated on successful read. 1615 * 1616 * Return: length of successful bytes read on success and negative 1617 * error code on error. 1618 */ 1619 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1620 struct nvmem_cell_info *info, void *buf) 1621 { 1622 struct nvmem_cell cell; 1623 int rc; 1624 ssize_t len; 1625 1626 if (!nvmem) 1627 return -EINVAL; 1628 1629 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); 1630 if (rc) 1631 return rc; 1632 1633 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1634 if (rc) 1635 return rc; 1636 1637 return len; 1638 } 1639 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1640 1641 /** 1642 * nvmem_device_cell_write() - Write cell to a given nvmem device 1643 * 1644 * @nvmem: nvmem device to be written to. 1645 * @info: nvmem cell info to be written. 1646 * @buf: buffer to be written to cell. 1647 * 1648 * Return: length of bytes written or negative error code on failure. 1649 */ 1650 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1651 struct nvmem_cell_info *info, void *buf) 1652 { 1653 struct nvmem_cell cell; 1654 int rc; 1655 1656 if (!nvmem) 1657 return -EINVAL; 1658 1659 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); 1660 if (rc) 1661 return rc; 1662 1663 return nvmem_cell_write(&cell, buf, cell.bytes); 1664 } 1665 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1666 1667 /** 1668 * nvmem_device_read() - Read from a given nvmem device 1669 * 1670 * @nvmem: nvmem device to read from. 1671 * @offset: offset in nvmem device. 1672 * @bytes: number of bytes to read. 1673 * @buf: buffer pointer which will be populated on successful read. 1674 * 1675 * Return: length of successful bytes read on success and negative 1676 * error code on error. 1677 */ 1678 int nvmem_device_read(struct nvmem_device *nvmem, 1679 unsigned int offset, 1680 size_t bytes, void *buf) 1681 { 1682 int rc; 1683 1684 if (!nvmem) 1685 return -EINVAL; 1686 1687 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1688 1689 if (rc) 1690 return rc; 1691 1692 return bytes; 1693 } 1694 EXPORT_SYMBOL_GPL(nvmem_device_read); 1695 1696 /** 1697 * nvmem_device_write() - Write cell to a given nvmem device 1698 * 1699 * @nvmem: nvmem device to be written to. 1700 * @offset: offset in nvmem device. 1701 * @bytes: number of bytes to write. 1702 * @buf: buffer to be written. 1703 * 1704 * Return: length of bytes written or negative error code on failure. 1705 */ 1706 int nvmem_device_write(struct nvmem_device *nvmem, 1707 unsigned int offset, 1708 size_t bytes, void *buf) 1709 { 1710 int rc; 1711 1712 if (!nvmem) 1713 return -EINVAL; 1714 1715 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1716 1717 if (rc) 1718 return rc; 1719 1720 1721 return bytes; 1722 } 1723 EXPORT_SYMBOL_GPL(nvmem_device_write); 1724 1725 /** 1726 * nvmem_add_cell_table() - register a table of cell info entries 1727 * 1728 * @table: table of cell info entries 1729 */ 1730 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1731 { 1732 mutex_lock(&nvmem_cell_mutex); 1733 list_add_tail(&table->node, &nvmem_cell_tables); 1734 mutex_unlock(&nvmem_cell_mutex); 1735 } 1736 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1737 1738 /** 1739 * nvmem_del_cell_table() - remove a previously registered cell info table 1740 * 1741 * @table: table of cell info entries 1742 */ 1743 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1744 { 1745 mutex_lock(&nvmem_cell_mutex); 1746 list_del(&table->node); 1747 mutex_unlock(&nvmem_cell_mutex); 1748 } 1749 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1750 1751 /** 1752 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1753 * 1754 * @entries: array of cell lookup entries 1755 * @nentries: number of cell lookup entries in the array 1756 */ 1757 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1758 { 1759 int i; 1760 1761 mutex_lock(&nvmem_lookup_mutex); 1762 for (i = 0; i < nentries; i++) 1763 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1764 mutex_unlock(&nvmem_lookup_mutex); 1765 } 1766 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1767 1768 /** 1769 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1770 * entries 1771 * 1772 * @entries: array of cell lookup entries 1773 * @nentries: number of cell lookup entries in the array 1774 */ 1775 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1776 { 1777 int i; 1778 1779 mutex_lock(&nvmem_lookup_mutex); 1780 for (i = 0; i < nentries; i++) 1781 list_del(&entries[i].node); 1782 mutex_unlock(&nvmem_lookup_mutex); 1783 } 1784 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1785 1786 /** 1787 * nvmem_dev_name() - Get the name of a given nvmem device. 1788 * 1789 * @nvmem: nvmem device. 1790 * 1791 * Return: name of the nvmem device. 1792 */ 1793 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1794 { 1795 return dev_name(&nvmem->dev); 1796 } 1797 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1798 1799 static int __init nvmem_init(void) 1800 { 1801 return bus_register(&nvmem_bus_type); 1802 } 1803 1804 static void __exit nvmem_exit(void) 1805 { 1806 bus_unregister(&nvmem_bus_type); 1807 } 1808 1809 subsys_initcall(nvmem_init); 1810 module_exit(nvmem_exit); 1811 1812 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1813 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1814 MODULE_DESCRIPTION("nvmem Driver Core"); 1815 MODULE_LICENSE("GPL v2"); 1816