1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 const struct nvmem_keepout *keepout; 38 unsigned int nkeepout; 39 nvmem_reg_read_t reg_read; 40 nvmem_reg_write_t reg_write; 41 struct gpio_desc *wp_gpio; 42 void *priv; 43 }; 44 45 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 46 47 #define FLAG_COMPAT BIT(0) 48 49 struct nvmem_cell { 50 const char *name; 51 int offset; 52 int bytes; 53 int bit_offset; 54 int nbits; 55 struct device_node *np; 56 struct nvmem_device *nvmem; 57 struct list_head node; 58 }; 59 60 static DEFINE_MUTEX(nvmem_mutex); 61 static DEFINE_IDA(nvmem_ida); 62 63 static DEFINE_MUTEX(nvmem_cell_mutex); 64 static LIST_HEAD(nvmem_cell_tables); 65 66 static DEFINE_MUTEX(nvmem_lookup_mutex); 67 static LIST_HEAD(nvmem_lookup_list); 68 69 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 70 71 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 72 void *val, size_t bytes) 73 { 74 if (nvmem->reg_read) 75 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 76 77 return -EINVAL; 78 } 79 80 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 81 void *val, size_t bytes) 82 { 83 int ret; 84 85 if (nvmem->reg_write) { 86 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 87 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 88 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 89 return ret; 90 } 91 92 return -EINVAL; 93 } 94 95 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 96 unsigned int offset, void *val, 97 size_t bytes, int write) 98 { 99 100 unsigned int end = offset + bytes; 101 unsigned int kend, ksize; 102 const struct nvmem_keepout *keepout = nvmem->keepout; 103 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 104 int rc; 105 106 /* 107 * Skip all keepouts before the range being accessed. 108 * Keepouts are sorted. 109 */ 110 while ((keepout < keepoutend) && (keepout->end <= offset)) 111 keepout++; 112 113 while ((offset < end) && (keepout < keepoutend)) { 114 /* Access the valid portion before the keepout. */ 115 if (offset < keepout->start) { 116 kend = min(end, keepout->start); 117 ksize = kend - offset; 118 if (write) 119 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 120 else 121 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 122 123 if (rc) 124 return rc; 125 126 offset += ksize; 127 val += ksize; 128 } 129 130 /* 131 * Now we're aligned to the start of this keepout zone. Go 132 * through it. 133 */ 134 kend = min(end, keepout->end); 135 ksize = kend - offset; 136 if (!write) 137 memset(val, keepout->value, ksize); 138 139 val += ksize; 140 offset += ksize; 141 keepout++; 142 } 143 144 /* 145 * If we ran out of keepouts but there's still stuff to do, send it 146 * down directly 147 */ 148 if (offset < end) { 149 ksize = end - offset; 150 if (write) 151 return __nvmem_reg_write(nvmem, offset, val, ksize); 152 else 153 return __nvmem_reg_read(nvmem, offset, val, ksize); 154 } 155 156 return 0; 157 } 158 159 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 160 void *val, size_t bytes) 161 { 162 if (!nvmem->nkeepout) 163 return __nvmem_reg_read(nvmem, offset, val, bytes); 164 165 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 166 } 167 168 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 169 void *val, size_t bytes) 170 { 171 if (!nvmem->nkeepout) 172 return __nvmem_reg_write(nvmem, offset, val, bytes); 173 174 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 175 } 176 177 #ifdef CONFIG_NVMEM_SYSFS 178 static const char * const nvmem_type_str[] = { 179 [NVMEM_TYPE_UNKNOWN] = "Unknown", 180 [NVMEM_TYPE_EEPROM] = "EEPROM", 181 [NVMEM_TYPE_OTP] = "OTP", 182 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 183 }; 184 185 #ifdef CONFIG_DEBUG_LOCK_ALLOC 186 static struct lock_class_key eeprom_lock_key; 187 #endif 188 189 static ssize_t type_show(struct device *dev, 190 struct device_attribute *attr, char *buf) 191 { 192 struct nvmem_device *nvmem = to_nvmem_device(dev); 193 194 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 195 } 196 197 static DEVICE_ATTR_RO(type); 198 199 static struct attribute *nvmem_attrs[] = { 200 &dev_attr_type.attr, 201 NULL, 202 }; 203 204 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 205 struct bin_attribute *attr, char *buf, 206 loff_t pos, size_t count) 207 { 208 struct device *dev; 209 struct nvmem_device *nvmem; 210 int rc; 211 212 if (attr->private) 213 dev = attr->private; 214 else 215 dev = kobj_to_dev(kobj); 216 nvmem = to_nvmem_device(dev); 217 218 /* Stop the user from reading */ 219 if (pos >= nvmem->size) 220 return 0; 221 222 if (!IS_ALIGNED(pos, nvmem->stride)) 223 return -EINVAL; 224 225 if (count < nvmem->word_size) 226 return -EINVAL; 227 228 if (pos + count > nvmem->size) 229 count = nvmem->size - pos; 230 231 count = round_down(count, nvmem->word_size); 232 233 if (!nvmem->reg_read) 234 return -EPERM; 235 236 rc = nvmem_reg_read(nvmem, pos, buf, count); 237 238 if (rc) 239 return rc; 240 241 return count; 242 } 243 244 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 245 struct bin_attribute *attr, char *buf, 246 loff_t pos, size_t count) 247 { 248 struct device *dev; 249 struct nvmem_device *nvmem; 250 int rc; 251 252 if (attr->private) 253 dev = attr->private; 254 else 255 dev = kobj_to_dev(kobj); 256 nvmem = to_nvmem_device(dev); 257 258 /* Stop the user from writing */ 259 if (pos >= nvmem->size) 260 return -EFBIG; 261 262 if (!IS_ALIGNED(pos, nvmem->stride)) 263 return -EINVAL; 264 265 if (count < nvmem->word_size) 266 return -EINVAL; 267 268 if (pos + count > nvmem->size) 269 count = nvmem->size - pos; 270 271 count = round_down(count, nvmem->word_size); 272 273 if (!nvmem->reg_write) 274 return -EPERM; 275 276 rc = nvmem_reg_write(nvmem, pos, buf, count); 277 278 if (rc) 279 return rc; 280 281 return count; 282 } 283 284 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 285 { 286 umode_t mode = 0400; 287 288 if (!nvmem->root_only) 289 mode |= 0044; 290 291 if (!nvmem->read_only) 292 mode |= 0200; 293 294 if (!nvmem->reg_write) 295 mode &= ~0200; 296 297 if (!nvmem->reg_read) 298 mode &= ~0444; 299 300 return mode; 301 } 302 303 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 304 struct bin_attribute *attr, int i) 305 { 306 struct device *dev = kobj_to_dev(kobj); 307 struct nvmem_device *nvmem = to_nvmem_device(dev); 308 309 return nvmem_bin_attr_get_umode(nvmem); 310 } 311 312 /* default read/write permissions */ 313 static struct bin_attribute bin_attr_rw_nvmem = { 314 .attr = { 315 .name = "nvmem", 316 .mode = 0644, 317 }, 318 .read = bin_attr_nvmem_read, 319 .write = bin_attr_nvmem_write, 320 }; 321 322 static struct bin_attribute *nvmem_bin_attributes[] = { 323 &bin_attr_rw_nvmem, 324 NULL, 325 }; 326 327 static const struct attribute_group nvmem_bin_group = { 328 .bin_attrs = nvmem_bin_attributes, 329 .attrs = nvmem_attrs, 330 .is_bin_visible = nvmem_bin_attr_is_visible, 331 }; 332 333 static const struct attribute_group *nvmem_dev_groups[] = { 334 &nvmem_bin_group, 335 NULL, 336 }; 337 338 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 339 .attr = { 340 .name = "eeprom", 341 }, 342 .read = bin_attr_nvmem_read, 343 .write = bin_attr_nvmem_write, 344 }; 345 346 /* 347 * nvmem_setup_compat() - Create an additional binary entry in 348 * drivers sys directory, to be backwards compatible with the older 349 * drivers/misc/eeprom drivers. 350 */ 351 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 352 const struct nvmem_config *config) 353 { 354 int rval; 355 356 if (!config->compat) 357 return 0; 358 359 if (!config->base_dev) 360 return -EINVAL; 361 362 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 363 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 364 nvmem->eeprom.size = nvmem->size; 365 #ifdef CONFIG_DEBUG_LOCK_ALLOC 366 nvmem->eeprom.attr.key = &eeprom_lock_key; 367 #endif 368 nvmem->eeprom.private = &nvmem->dev; 369 nvmem->base_dev = config->base_dev; 370 371 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 372 if (rval) { 373 dev_err(&nvmem->dev, 374 "Failed to create eeprom binary file %d\n", rval); 375 return rval; 376 } 377 378 nvmem->flags |= FLAG_COMPAT; 379 380 return 0; 381 } 382 383 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 384 const struct nvmem_config *config) 385 { 386 if (config->compat) 387 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 388 } 389 390 #else /* CONFIG_NVMEM_SYSFS */ 391 392 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 393 const struct nvmem_config *config) 394 { 395 return -ENOSYS; 396 } 397 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 398 const struct nvmem_config *config) 399 { 400 } 401 402 #endif /* CONFIG_NVMEM_SYSFS */ 403 404 static void nvmem_release(struct device *dev) 405 { 406 struct nvmem_device *nvmem = to_nvmem_device(dev); 407 408 ida_free(&nvmem_ida, nvmem->id); 409 gpiod_put(nvmem->wp_gpio); 410 kfree(nvmem); 411 } 412 413 static const struct device_type nvmem_provider_type = { 414 .release = nvmem_release, 415 }; 416 417 static struct bus_type nvmem_bus_type = { 418 .name = "nvmem", 419 }; 420 421 static void nvmem_cell_drop(struct nvmem_cell *cell) 422 { 423 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 424 mutex_lock(&nvmem_mutex); 425 list_del(&cell->node); 426 mutex_unlock(&nvmem_mutex); 427 of_node_put(cell->np); 428 kfree_const(cell->name); 429 kfree(cell); 430 } 431 432 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 433 { 434 struct nvmem_cell *cell, *p; 435 436 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 437 nvmem_cell_drop(cell); 438 } 439 440 static void nvmem_cell_add(struct nvmem_cell *cell) 441 { 442 mutex_lock(&nvmem_mutex); 443 list_add_tail(&cell->node, &cell->nvmem->cells); 444 mutex_unlock(&nvmem_mutex); 445 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 446 } 447 448 static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem, 449 const struct nvmem_cell_info *info, 450 struct nvmem_cell *cell) 451 { 452 cell->nvmem = nvmem; 453 cell->offset = info->offset; 454 cell->bytes = info->bytes; 455 cell->name = info->name; 456 457 cell->bit_offset = info->bit_offset; 458 cell->nbits = info->nbits; 459 460 if (cell->nbits) 461 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 462 BITS_PER_BYTE); 463 464 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 465 dev_err(&nvmem->dev, 466 "cell %s unaligned to nvmem stride %d\n", 467 cell->name ?: "<unknown>", nvmem->stride); 468 return -EINVAL; 469 } 470 471 return 0; 472 } 473 474 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 475 const struct nvmem_cell_info *info, 476 struct nvmem_cell *cell) 477 { 478 int err; 479 480 err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell); 481 if (err) 482 return err; 483 484 cell->name = kstrdup_const(info->name, GFP_KERNEL); 485 if (!cell->name) 486 return -ENOMEM; 487 488 return 0; 489 } 490 491 /** 492 * nvmem_add_cells() - Add cell information to an nvmem device 493 * 494 * @nvmem: nvmem device to add cells to. 495 * @info: nvmem cell info to add to the device 496 * @ncells: number of cells in info 497 * 498 * Return: 0 or negative error code on failure. 499 */ 500 static int nvmem_add_cells(struct nvmem_device *nvmem, 501 const struct nvmem_cell_info *info, 502 int ncells) 503 { 504 struct nvmem_cell **cells; 505 int i, rval; 506 507 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 508 if (!cells) 509 return -ENOMEM; 510 511 for (i = 0; i < ncells; i++) { 512 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 513 if (!cells[i]) { 514 rval = -ENOMEM; 515 goto err; 516 } 517 518 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 519 if (rval) { 520 kfree(cells[i]); 521 goto err; 522 } 523 524 nvmem_cell_add(cells[i]); 525 } 526 527 /* remove tmp array */ 528 kfree(cells); 529 530 return 0; 531 err: 532 while (i--) 533 nvmem_cell_drop(cells[i]); 534 535 kfree(cells); 536 537 return rval; 538 } 539 540 /** 541 * nvmem_register_notifier() - Register a notifier block for nvmem events. 542 * 543 * @nb: notifier block to be called on nvmem events. 544 * 545 * Return: 0 on success, negative error number on failure. 546 */ 547 int nvmem_register_notifier(struct notifier_block *nb) 548 { 549 return blocking_notifier_chain_register(&nvmem_notifier, nb); 550 } 551 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 552 553 /** 554 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 555 * 556 * @nb: notifier block to be unregistered. 557 * 558 * Return: 0 on success, negative error number on failure. 559 */ 560 int nvmem_unregister_notifier(struct notifier_block *nb) 561 { 562 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 563 } 564 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 565 566 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 567 { 568 const struct nvmem_cell_info *info; 569 struct nvmem_cell_table *table; 570 struct nvmem_cell *cell; 571 int rval = 0, i; 572 573 mutex_lock(&nvmem_cell_mutex); 574 list_for_each_entry(table, &nvmem_cell_tables, node) { 575 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 576 for (i = 0; i < table->ncells; i++) { 577 info = &table->cells[i]; 578 579 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 580 if (!cell) { 581 rval = -ENOMEM; 582 goto out; 583 } 584 585 rval = nvmem_cell_info_to_nvmem_cell(nvmem, 586 info, 587 cell); 588 if (rval) { 589 kfree(cell); 590 goto out; 591 } 592 593 nvmem_cell_add(cell); 594 } 595 } 596 } 597 598 out: 599 mutex_unlock(&nvmem_cell_mutex); 600 return rval; 601 } 602 603 static struct nvmem_cell * 604 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 605 { 606 struct nvmem_cell *iter, *cell = NULL; 607 608 mutex_lock(&nvmem_mutex); 609 list_for_each_entry(iter, &nvmem->cells, node) { 610 if (strcmp(cell_id, iter->name) == 0) { 611 cell = iter; 612 break; 613 } 614 } 615 mutex_unlock(&nvmem_mutex); 616 617 return cell; 618 } 619 620 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 621 { 622 unsigned int cur = 0; 623 const struct nvmem_keepout *keepout = nvmem->keepout; 624 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 625 626 while (keepout < keepoutend) { 627 /* Ensure keepouts are sorted and don't overlap. */ 628 if (keepout->start < cur) { 629 dev_err(&nvmem->dev, 630 "Keepout regions aren't sorted or overlap.\n"); 631 632 return -ERANGE; 633 } 634 635 if (keepout->end < keepout->start) { 636 dev_err(&nvmem->dev, 637 "Invalid keepout region.\n"); 638 639 return -EINVAL; 640 } 641 642 /* 643 * Validate keepouts (and holes between) don't violate 644 * word_size constraints. 645 */ 646 if ((keepout->end - keepout->start < nvmem->word_size) || 647 ((keepout->start != cur) && 648 (keepout->start - cur < nvmem->word_size))) { 649 650 dev_err(&nvmem->dev, 651 "Keepout regions violate word_size constraints.\n"); 652 653 return -ERANGE; 654 } 655 656 /* Validate keepouts don't violate stride (alignment). */ 657 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 658 !IS_ALIGNED(keepout->end, nvmem->stride)) { 659 660 dev_err(&nvmem->dev, 661 "Keepout regions violate stride.\n"); 662 663 return -EINVAL; 664 } 665 666 cur = keepout->end; 667 keepout++; 668 } 669 670 return 0; 671 } 672 673 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 674 { 675 struct device_node *parent, *child; 676 struct device *dev = &nvmem->dev; 677 struct nvmem_cell *cell; 678 const __be32 *addr; 679 int len; 680 681 parent = dev->of_node; 682 683 for_each_child_of_node(parent, child) { 684 addr = of_get_property(child, "reg", &len); 685 if (!addr || (len < 2 * sizeof(u32))) { 686 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 687 return -EINVAL; 688 } 689 690 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 691 if (!cell) 692 return -ENOMEM; 693 694 cell->nvmem = nvmem; 695 cell->np = of_node_get(child); 696 cell->offset = be32_to_cpup(addr++); 697 cell->bytes = be32_to_cpup(addr); 698 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 699 700 addr = of_get_property(child, "bits", &len); 701 if (addr && len == (2 * sizeof(u32))) { 702 cell->bit_offset = be32_to_cpup(addr++); 703 cell->nbits = be32_to_cpup(addr); 704 } 705 706 if (cell->nbits) 707 cell->bytes = DIV_ROUND_UP( 708 cell->nbits + cell->bit_offset, 709 BITS_PER_BYTE); 710 711 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 712 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 713 cell->name, nvmem->stride); 714 /* Cells already added will be freed later. */ 715 kfree_const(cell->name); 716 kfree(cell); 717 return -EINVAL; 718 } 719 720 nvmem_cell_add(cell); 721 } 722 723 return 0; 724 } 725 726 /** 727 * nvmem_register() - Register a nvmem device for given nvmem_config. 728 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 729 * 730 * @config: nvmem device configuration with which nvmem device is created. 731 * 732 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 733 * on success. 734 */ 735 736 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 737 { 738 struct nvmem_device *nvmem; 739 int rval; 740 741 if (!config->dev) 742 return ERR_PTR(-EINVAL); 743 744 if (!config->reg_read && !config->reg_write) 745 return ERR_PTR(-EINVAL); 746 747 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 748 if (!nvmem) 749 return ERR_PTR(-ENOMEM); 750 751 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 752 if (rval < 0) { 753 kfree(nvmem); 754 return ERR_PTR(rval); 755 } 756 757 if (config->wp_gpio) 758 nvmem->wp_gpio = config->wp_gpio; 759 else 760 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 761 GPIOD_OUT_HIGH); 762 if (IS_ERR(nvmem->wp_gpio)) { 763 ida_free(&nvmem_ida, nvmem->id); 764 rval = PTR_ERR(nvmem->wp_gpio); 765 kfree(nvmem); 766 return ERR_PTR(rval); 767 } 768 769 kref_init(&nvmem->refcnt); 770 INIT_LIST_HEAD(&nvmem->cells); 771 772 nvmem->id = rval; 773 nvmem->owner = config->owner; 774 if (!nvmem->owner && config->dev->driver) 775 nvmem->owner = config->dev->driver->owner; 776 nvmem->stride = config->stride ?: 1; 777 nvmem->word_size = config->word_size ?: 1; 778 nvmem->size = config->size; 779 nvmem->dev.type = &nvmem_provider_type; 780 nvmem->dev.bus = &nvmem_bus_type; 781 nvmem->dev.parent = config->dev; 782 nvmem->root_only = config->root_only; 783 nvmem->priv = config->priv; 784 nvmem->type = config->type; 785 nvmem->reg_read = config->reg_read; 786 nvmem->reg_write = config->reg_write; 787 nvmem->keepout = config->keepout; 788 nvmem->nkeepout = config->nkeepout; 789 if (!config->no_of_node) 790 nvmem->dev.of_node = config->dev->of_node; 791 792 switch (config->id) { 793 case NVMEM_DEVID_NONE: 794 dev_set_name(&nvmem->dev, "%s", config->name); 795 break; 796 case NVMEM_DEVID_AUTO: 797 dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 798 break; 799 default: 800 dev_set_name(&nvmem->dev, "%s%d", 801 config->name ? : "nvmem", 802 config->name ? config->id : nvmem->id); 803 break; 804 } 805 806 nvmem->read_only = device_property_present(config->dev, "read-only") || 807 config->read_only || !nvmem->reg_write; 808 809 #ifdef CONFIG_NVMEM_SYSFS 810 nvmem->dev.groups = nvmem_dev_groups; 811 #endif 812 813 if (nvmem->nkeepout) { 814 rval = nvmem_validate_keepouts(nvmem); 815 if (rval) 816 goto err_put_device; 817 } 818 819 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 820 821 rval = device_register(&nvmem->dev); 822 if (rval) 823 goto err_put_device; 824 825 if (config->compat) { 826 rval = nvmem_sysfs_setup_compat(nvmem, config); 827 if (rval) 828 goto err_device_del; 829 } 830 831 if (config->cells) { 832 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 833 if (rval) 834 goto err_teardown_compat; 835 } 836 837 rval = nvmem_add_cells_from_table(nvmem); 838 if (rval) 839 goto err_remove_cells; 840 841 rval = nvmem_add_cells_from_of(nvmem); 842 if (rval) 843 goto err_remove_cells; 844 845 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 846 847 return nvmem; 848 849 err_remove_cells: 850 nvmem_device_remove_all_cells(nvmem); 851 err_teardown_compat: 852 if (config->compat) 853 nvmem_sysfs_remove_compat(nvmem, config); 854 err_device_del: 855 device_del(&nvmem->dev); 856 err_put_device: 857 put_device(&nvmem->dev); 858 859 return ERR_PTR(rval); 860 } 861 EXPORT_SYMBOL_GPL(nvmem_register); 862 863 static void nvmem_device_release(struct kref *kref) 864 { 865 struct nvmem_device *nvmem; 866 867 nvmem = container_of(kref, struct nvmem_device, refcnt); 868 869 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 870 871 if (nvmem->flags & FLAG_COMPAT) 872 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 873 874 nvmem_device_remove_all_cells(nvmem); 875 device_unregister(&nvmem->dev); 876 } 877 878 /** 879 * nvmem_unregister() - Unregister previously registered nvmem device 880 * 881 * @nvmem: Pointer to previously registered nvmem device. 882 */ 883 void nvmem_unregister(struct nvmem_device *nvmem) 884 { 885 kref_put(&nvmem->refcnt, nvmem_device_release); 886 } 887 EXPORT_SYMBOL_GPL(nvmem_unregister); 888 889 static void devm_nvmem_release(struct device *dev, void *res) 890 { 891 nvmem_unregister(*(struct nvmem_device **)res); 892 } 893 894 /** 895 * devm_nvmem_register() - Register a managed nvmem device for given 896 * nvmem_config. 897 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 898 * 899 * @dev: Device that uses the nvmem device. 900 * @config: nvmem device configuration with which nvmem device is created. 901 * 902 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 903 * on success. 904 */ 905 struct nvmem_device *devm_nvmem_register(struct device *dev, 906 const struct nvmem_config *config) 907 { 908 struct nvmem_device **ptr, *nvmem; 909 910 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 911 if (!ptr) 912 return ERR_PTR(-ENOMEM); 913 914 nvmem = nvmem_register(config); 915 916 if (!IS_ERR(nvmem)) { 917 *ptr = nvmem; 918 devres_add(dev, ptr); 919 } else { 920 devres_free(ptr); 921 } 922 923 return nvmem; 924 } 925 EXPORT_SYMBOL_GPL(devm_nvmem_register); 926 927 static int devm_nvmem_match(struct device *dev, void *res, void *data) 928 { 929 struct nvmem_device **r = res; 930 931 return *r == data; 932 } 933 934 /** 935 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 936 * device. 937 * 938 * @dev: Device that uses the nvmem device. 939 * @nvmem: Pointer to previously registered nvmem device. 940 * 941 * Return: Will be negative on error or zero on success. 942 */ 943 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 944 { 945 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 946 } 947 EXPORT_SYMBOL(devm_nvmem_unregister); 948 949 static struct nvmem_device *__nvmem_device_get(void *data, 950 int (*match)(struct device *dev, const void *data)) 951 { 952 struct nvmem_device *nvmem = NULL; 953 struct device *dev; 954 955 mutex_lock(&nvmem_mutex); 956 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 957 if (dev) 958 nvmem = to_nvmem_device(dev); 959 mutex_unlock(&nvmem_mutex); 960 if (!nvmem) 961 return ERR_PTR(-EPROBE_DEFER); 962 963 if (!try_module_get(nvmem->owner)) { 964 dev_err(&nvmem->dev, 965 "could not increase module refcount for cell %s\n", 966 nvmem_dev_name(nvmem)); 967 968 put_device(&nvmem->dev); 969 return ERR_PTR(-EINVAL); 970 } 971 972 kref_get(&nvmem->refcnt); 973 974 return nvmem; 975 } 976 977 static void __nvmem_device_put(struct nvmem_device *nvmem) 978 { 979 put_device(&nvmem->dev); 980 module_put(nvmem->owner); 981 kref_put(&nvmem->refcnt, nvmem_device_release); 982 } 983 984 #if IS_ENABLED(CONFIG_OF) 985 /** 986 * of_nvmem_device_get() - Get nvmem device from a given id 987 * 988 * @np: Device tree node that uses the nvmem device. 989 * @id: nvmem name from nvmem-names property. 990 * 991 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 992 * on success. 993 */ 994 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 995 { 996 997 struct device_node *nvmem_np; 998 struct nvmem_device *nvmem; 999 int index = 0; 1000 1001 if (id) 1002 index = of_property_match_string(np, "nvmem-names", id); 1003 1004 nvmem_np = of_parse_phandle(np, "nvmem", index); 1005 if (!nvmem_np) 1006 return ERR_PTR(-ENOENT); 1007 1008 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1009 of_node_put(nvmem_np); 1010 return nvmem; 1011 } 1012 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1013 #endif 1014 1015 /** 1016 * nvmem_device_get() - Get nvmem device from a given id 1017 * 1018 * @dev: Device that uses the nvmem device. 1019 * @dev_name: name of the requested nvmem device. 1020 * 1021 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1022 * on success. 1023 */ 1024 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1025 { 1026 if (dev->of_node) { /* try dt first */ 1027 struct nvmem_device *nvmem; 1028 1029 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1030 1031 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1032 return nvmem; 1033 1034 } 1035 1036 return __nvmem_device_get((void *)dev_name, device_match_name); 1037 } 1038 EXPORT_SYMBOL_GPL(nvmem_device_get); 1039 1040 /** 1041 * nvmem_device_find() - Find nvmem device with matching function 1042 * 1043 * @data: Data to pass to match function 1044 * @match: Callback function to check device 1045 * 1046 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1047 * on success. 1048 */ 1049 struct nvmem_device *nvmem_device_find(void *data, 1050 int (*match)(struct device *dev, const void *data)) 1051 { 1052 return __nvmem_device_get(data, match); 1053 } 1054 EXPORT_SYMBOL_GPL(nvmem_device_find); 1055 1056 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1057 { 1058 struct nvmem_device **nvmem = res; 1059 1060 if (WARN_ON(!nvmem || !*nvmem)) 1061 return 0; 1062 1063 return *nvmem == data; 1064 } 1065 1066 static void devm_nvmem_device_release(struct device *dev, void *res) 1067 { 1068 nvmem_device_put(*(struct nvmem_device **)res); 1069 } 1070 1071 /** 1072 * devm_nvmem_device_put() - put alredy got nvmem device 1073 * 1074 * @dev: Device that uses the nvmem device. 1075 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1076 * that needs to be released. 1077 */ 1078 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1079 { 1080 int ret; 1081 1082 ret = devres_release(dev, devm_nvmem_device_release, 1083 devm_nvmem_device_match, nvmem); 1084 1085 WARN_ON(ret); 1086 } 1087 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1088 1089 /** 1090 * nvmem_device_put() - put alredy got nvmem device 1091 * 1092 * @nvmem: pointer to nvmem device that needs to be released. 1093 */ 1094 void nvmem_device_put(struct nvmem_device *nvmem) 1095 { 1096 __nvmem_device_put(nvmem); 1097 } 1098 EXPORT_SYMBOL_GPL(nvmem_device_put); 1099 1100 /** 1101 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 1102 * 1103 * @dev: Device that requests the nvmem device. 1104 * @id: name id for the requested nvmem device. 1105 * 1106 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 1107 * on success. The nvmem_cell will be freed by the automatically once the 1108 * device is freed. 1109 */ 1110 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1111 { 1112 struct nvmem_device **ptr, *nvmem; 1113 1114 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1115 if (!ptr) 1116 return ERR_PTR(-ENOMEM); 1117 1118 nvmem = nvmem_device_get(dev, id); 1119 if (!IS_ERR(nvmem)) { 1120 *ptr = nvmem; 1121 devres_add(dev, ptr); 1122 } else { 1123 devres_free(ptr); 1124 } 1125 1126 return nvmem; 1127 } 1128 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1129 1130 static struct nvmem_cell * 1131 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1132 { 1133 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1134 struct nvmem_cell_lookup *lookup; 1135 struct nvmem_device *nvmem; 1136 const char *dev_id; 1137 1138 if (!dev) 1139 return ERR_PTR(-EINVAL); 1140 1141 dev_id = dev_name(dev); 1142 1143 mutex_lock(&nvmem_lookup_mutex); 1144 1145 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1146 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1147 (strcmp(lookup->con_id, con_id) == 0)) { 1148 /* This is the right entry. */ 1149 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1150 device_match_name); 1151 if (IS_ERR(nvmem)) { 1152 /* Provider may not be registered yet. */ 1153 cell = ERR_CAST(nvmem); 1154 break; 1155 } 1156 1157 cell = nvmem_find_cell_by_name(nvmem, 1158 lookup->cell_name); 1159 if (!cell) { 1160 __nvmem_device_put(nvmem); 1161 cell = ERR_PTR(-ENOENT); 1162 } 1163 break; 1164 } 1165 } 1166 1167 mutex_unlock(&nvmem_lookup_mutex); 1168 return cell; 1169 } 1170 1171 #if IS_ENABLED(CONFIG_OF) 1172 static struct nvmem_cell * 1173 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 1174 { 1175 struct nvmem_cell *iter, *cell = NULL; 1176 1177 mutex_lock(&nvmem_mutex); 1178 list_for_each_entry(iter, &nvmem->cells, node) { 1179 if (np == iter->np) { 1180 cell = iter; 1181 break; 1182 } 1183 } 1184 mutex_unlock(&nvmem_mutex); 1185 1186 return cell; 1187 } 1188 1189 /** 1190 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1191 * 1192 * @np: Device tree node that uses the nvmem cell. 1193 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1194 * for the cell at index 0 (the lone cell with no accompanying 1195 * nvmem-cell-names property). 1196 * 1197 * Return: Will be an ERR_PTR() on error or a valid pointer 1198 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1199 * nvmem_cell_put(). 1200 */ 1201 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1202 { 1203 struct device_node *cell_np, *nvmem_np; 1204 struct nvmem_device *nvmem; 1205 struct nvmem_cell *cell; 1206 int index = 0; 1207 1208 /* if cell name exists, find index to the name */ 1209 if (id) 1210 index = of_property_match_string(np, "nvmem-cell-names", id); 1211 1212 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1213 if (!cell_np) 1214 return ERR_PTR(-ENOENT); 1215 1216 nvmem_np = of_get_next_parent(cell_np); 1217 if (!nvmem_np) 1218 return ERR_PTR(-EINVAL); 1219 1220 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1221 of_node_put(nvmem_np); 1222 if (IS_ERR(nvmem)) 1223 return ERR_CAST(nvmem); 1224 1225 cell = nvmem_find_cell_by_node(nvmem, cell_np); 1226 if (!cell) { 1227 __nvmem_device_put(nvmem); 1228 return ERR_PTR(-ENOENT); 1229 } 1230 1231 return cell; 1232 } 1233 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1234 #endif 1235 1236 /** 1237 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1238 * 1239 * @dev: Device that requests the nvmem cell. 1240 * @id: nvmem cell name to get (this corresponds with the name from the 1241 * nvmem-cell-names property for DT systems and with the con_id from 1242 * the lookup entry for non-DT systems). 1243 * 1244 * Return: Will be an ERR_PTR() on error or a valid pointer 1245 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1246 * nvmem_cell_put(). 1247 */ 1248 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1249 { 1250 struct nvmem_cell *cell; 1251 1252 if (dev->of_node) { /* try dt first */ 1253 cell = of_nvmem_cell_get(dev->of_node, id); 1254 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1255 return cell; 1256 } 1257 1258 /* NULL cell id only allowed for device tree; invalid otherwise */ 1259 if (!id) 1260 return ERR_PTR(-EINVAL); 1261 1262 return nvmem_cell_get_from_lookup(dev, id); 1263 } 1264 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1265 1266 static void devm_nvmem_cell_release(struct device *dev, void *res) 1267 { 1268 nvmem_cell_put(*(struct nvmem_cell **)res); 1269 } 1270 1271 /** 1272 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1273 * 1274 * @dev: Device that requests the nvmem cell. 1275 * @id: nvmem cell name id to get. 1276 * 1277 * Return: Will be an ERR_PTR() on error or a valid pointer 1278 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1279 * automatically once the device is freed. 1280 */ 1281 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1282 { 1283 struct nvmem_cell **ptr, *cell; 1284 1285 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1286 if (!ptr) 1287 return ERR_PTR(-ENOMEM); 1288 1289 cell = nvmem_cell_get(dev, id); 1290 if (!IS_ERR(cell)) { 1291 *ptr = cell; 1292 devres_add(dev, ptr); 1293 } else { 1294 devres_free(ptr); 1295 } 1296 1297 return cell; 1298 } 1299 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1300 1301 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1302 { 1303 struct nvmem_cell **c = res; 1304 1305 if (WARN_ON(!c || !*c)) 1306 return 0; 1307 1308 return *c == data; 1309 } 1310 1311 /** 1312 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1313 * from devm_nvmem_cell_get. 1314 * 1315 * @dev: Device that requests the nvmem cell. 1316 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1317 */ 1318 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1319 { 1320 int ret; 1321 1322 ret = devres_release(dev, devm_nvmem_cell_release, 1323 devm_nvmem_cell_match, cell); 1324 1325 WARN_ON(ret); 1326 } 1327 EXPORT_SYMBOL(devm_nvmem_cell_put); 1328 1329 /** 1330 * nvmem_cell_put() - Release previously allocated nvmem cell. 1331 * 1332 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1333 */ 1334 void nvmem_cell_put(struct nvmem_cell *cell) 1335 { 1336 struct nvmem_device *nvmem = cell->nvmem; 1337 1338 __nvmem_device_put(nvmem); 1339 } 1340 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1341 1342 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 1343 { 1344 u8 *p, *b; 1345 int i, extra, bit_offset = cell->bit_offset; 1346 1347 p = b = buf; 1348 if (bit_offset) { 1349 /* First shift */ 1350 *b++ >>= bit_offset; 1351 1352 /* setup rest of the bytes if any */ 1353 for (i = 1; i < cell->bytes; i++) { 1354 /* Get bits from next byte and shift them towards msb */ 1355 *p |= *b << (BITS_PER_BYTE - bit_offset); 1356 1357 p = b; 1358 *b++ >>= bit_offset; 1359 } 1360 } else { 1361 /* point to the msb */ 1362 p += cell->bytes - 1; 1363 } 1364 1365 /* result fits in less bytes */ 1366 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1367 while (--extra >= 0) 1368 *p-- = 0; 1369 1370 /* clear msb bits if any leftover in the last byte */ 1371 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 1372 } 1373 1374 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1375 struct nvmem_cell *cell, 1376 void *buf, size_t *len) 1377 { 1378 int rc; 1379 1380 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1381 1382 if (rc) 1383 return rc; 1384 1385 /* shift bits in-place */ 1386 if (cell->bit_offset || cell->nbits) 1387 nvmem_shift_read_buffer_in_place(cell, buf); 1388 1389 if (len) 1390 *len = cell->bytes; 1391 1392 return 0; 1393 } 1394 1395 /** 1396 * nvmem_cell_read() - Read a given nvmem cell 1397 * 1398 * @cell: nvmem cell to be read. 1399 * @len: pointer to length of cell which will be populated on successful read; 1400 * can be NULL. 1401 * 1402 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1403 * buffer should be freed by the consumer with a kfree(). 1404 */ 1405 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1406 { 1407 struct nvmem_device *nvmem = cell->nvmem; 1408 u8 *buf; 1409 int rc; 1410 1411 if (!nvmem) 1412 return ERR_PTR(-EINVAL); 1413 1414 buf = kzalloc(cell->bytes, GFP_KERNEL); 1415 if (!buf) 1416 return ERR_PTR(-ENOMEM); 1417 1418 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1419 if (rc) { 1420 kfree(buf); 1421 return ERR_PTR(rc); 1422 } 1423 1424 return buf; 1425 } 1426 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1427 1428 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1429 u8 *_buf, int len) 1430 { 1431 struct nvmem_device *nvmem = cell->nvmem; 1432 int i, rc, nbits, bit_offset = cell->bit_offset; 1433 u8 v, *p, *buf, *b, pbyte, pbits; 1434 1435 nbits = cell->nbits; 1436 buf = kzalloc(cell->bytes, GFP_KERNEL); 1437 if (!buf) 1438 return ERR_PTR(-ENOMEM); 1439 1440 memcpy(buf, _buf, len); 1441 p = b = buf; 1442 1443 if (bit_offset) { 1444 pbyte = *b; 1445 *b <<= bit_offset; 1446 1447 /* setup the first byte with lsb bits from nvmem */ 1448 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1449 if (rc) 1450 goto err; 1451 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1452 1453 /* setup rest of the byte if any */ 1454 for (i = 1; i < cell->bytes; i++) { 1455 /* Get last byte bits and shift them towards lsb */ 1456 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1457 pbyte = *b; 1458 p = b; 1459 *b <<= bit_offset; 1460 *b++ |= pbits; 1461 } 1462 } 1463 1464 /* if it's not end on byte boundary */ 1465 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1466 /* setup the last byte with msb bits from nvmem */ 1467 rc = nvmem_reg_read(nvmem, 1468 cell->offset + cell->bytes - 1, &v, 1); 1469 if (rc) 1470 goto err; 1471 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1472 1473 } 1474 1475 return buf; 1476 err: 1477 kfree(buf); 1478 return ERR_PTR(rc); 1479 } 1480 1481 /** 1482 * nvmem_cell_write() - Write to a given nvmem cell 1483 * 1484 * @cell: nvmem cell to be written. 1485 * @buf: Buffer to be written. 1486 * @len: length of buffer to be written to nvmem cell. 1487 * 1488 * Return: length of bytes written or negative on failure. 1489 */ 1490 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1491 { 1492 struct nvmem_device *nvmem = cell->nvmem; 1493 int rc; 1494 1495 if (!nvmem || nvmem->read_only || 1496 (cell->bit_offset == 0 && len != cell->bytes)) 1497 return -EINVAL; 1498 1499 if (cell->bit_offset || cell->nbits) { 1500 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1501 if (IS_ERR(buf)) 1502 return PTR_ERR(buf); 1503 } 1504 1505 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1506 1507 /* free the tmp buffer */ 1508 if (cell->bit_offset || cell->nbits) 1509 kfree(buf); 1510 1511 if (rc) 1512 return rc; 1513 1514 return len; 1515 } 1516 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1517 1518 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1519 void *val, size_t count) 1520 { 1521 struct nvmem_cell *cell; 1522 void *buf; 1523 size_t len; 1524 1525 cell = nvmem_cell_get(dev, cell_id); 1526 if (IS_ERR(cell)) 1527 return PTR_ERR(cell); 1528 1529 buf = nvmem_cell_read(cell, &len); 1530 if (IS_ERR(buf)) { 1531 nvmem_cell_put(cell); 1532 return PTR_ERR(buf); 1533 } 1534 if (len != count) { 1535 kfree(buf); 1536 nvmem_cell_put(cell); 1537 return -EINVAL; 1538 } 1539 memcpy(val, buf, count); 1540 kfree(buf); 1541 nvmem_cell_put(cell); 1542 1543 return 0; 1544 } 1545 1546 /** 1547 * nvmem_cell_read_u8() - Read a cell value as a u8 1548 * 1549 * @dev: Device that requests the nvmem cell. 1550 * @cell_id: Name of nvmem cell to read. 1551 * @val: pointer to output value. 1552 * 1553 * Return: 0 on success or negative errno. 1554 */ 1555 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1556 { 1557 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1558 } 1559 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1560 1561 /** 1562 * nvmem_cell_read_u16() - Read a cell value as a u16 1563 * 1564 * @dev: Device that requests the nvmem cell. 1565 * @cell_id: Name of nvmem cell to read. 1566 * @val: pointer to output value. 1567 * 1568 * Return: 0 on success or negative errno. 1569 */ 1570 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1571 { 1572 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1573 } 1574 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1575 1576 /** 1577 * nvmem_cell_read_u32() - Read a cell value as a u32 1578 * 1579 * @dev: Device that requests the nvmem cell. 1580 * @cell_id: Name of nvmem cell to read. 1581 * @val: pointer to output value. 1582 * 1583 * Return: 0 on success or negative errno. 1584 */ 1585 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1586 { 1587 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1588 } 1589 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1590 1591 /** 1592 * nvmem_cell_read_u64() - Read a cell value as a u64 1593 * 1594 * @dev: Device that requests the nvmem cell. 1595 * @cell_id: Name of nvmem cell to read. 1596 * @val: pointer to output value. 1597 * 1598 * Return: 0 on success or negative errno. 1599 */ 1600 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1601 { 1602 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1603 } 1604 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1605 1606 /** 1607 * nvmem_device_cell_read() - Read a given nvmem device and cell 1608 * 1609 * @nvmem: nvmem device to read from. 1610 * @info: nvmem cell info to be read. 1611 * @buf: buffer pointer which will be populated on successful read. 1612 * 1613 * Return: length of successful bytes read on success and negative 1614 * error code on error. 1615 */ 1616 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1617 struct nvmem_cell_info *info, void *buf) 1618 { 1619 struct nvmem_cell cell; 1620 int rc; 1621 ssize_t len; 1622 1623 if (!nvmem) 1624 return -EINVAL; 1625 1626 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); 1627 if (rc) 1628 return rc; 1629 1630 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1631 if (rc) 1632 return rc; 1633 1634 return len; 1635 } 1636 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1637 1638 /** 1639 * nvmem_device_cell_write() - Write cell to a given nvmem device 1640 * 1641 * @nvmem: nvmem device to be written to. 1642 * @info: nvmem cell info to be written. 1643 * @buf: buffer to be written to cell. 1644 * 1645 * Return: length of bytes written or negative error code on failure. 1646 */ 1647 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1648 struct nvmem_cell_info *info, void *buf) 1649 { 1650 struct nvmem_cell cell; 1651 int rc; 1652 1653 if (!nvmem) 1654 return -EINVAL; 1655 1656 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); 1657 if (rc) 1658 return rc; 1659 1660 return nvmem_cell_write(&cell, buf, cell.bytes); 1661 } 1662 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1663 1664 /** 1665 * nvmem_device_read() - Read from a given nvmem device 1666 * 1667 * @nvmem: nvmem device to read from. 1668 * @offset: offset in nvmem device. 1669 * @bytes: number of bytes to read. 1670 * @buf: buffer pointer which will be populated on successful read. 1671 * 1672 * Return: length of successful bytes read on success and negative 1673 * error code on error. 1674 */ 1675 int nvmem_device_read(struct nvmem_device *nvmem, 1676 unsigned int offset, 1677 size_t bytes, void *buf) 1678 { 1679 int rc; 1680 1681 if (!nvmem) 1682 return -EINVAL; 1683 1684 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1685 1686 if (rc) 1687 return rc; 1688 1689 return bytes; 1690 } 1691 EXPORT_SYMBOL_GPL(nvmem_device_read); 1692 1693 /** 1694 * nvmem_device_write() - Write cell to a given nvmem device 1695 * 1696 * @nvmem: nvmem device to be written to. 1697 * @offset: offset in nvmem device. 1698 * @bytes: number of bytes to write. 1699 * @buf: buffer to be written. 1700 * 1701 * Return: length of bytes written or negative error code on failure. 1702 */ 1703 int nvmem_device_write(struct nvmem_device *nvmem, 1704 unsigned int offset, 1705 size_t bytes, void *buf) 1706 { 1707 int rc; 1708 1709 if (!nvmem) 1710 return -EINVAL; 1711 1712 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1713 1714 if (rc) 1715 return rc; 1716 1717 1718 return bytes; 1719 } 1720 EXPORT_SYMBOL_GPL(nvmem_device_write); 1721 1722 /** 1723 * nvmem_add_cell_table() - register a table of cell info entries 1724 * 1725 * @table: table of cell info entries 1726 */ 1727 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1728 { 1729 mutex_lock(&nvmem_cell_mutex); 1730 list_add_tail(&table->node, &nvmem_cell_tables); 1731 mutex_unlock(&nvmem_cell_mutex); 1732 } 1733 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1734 1735 /** 1736 * nvmem_del_cell_table() - remove a previously registered cell info table 1737 * 1738 * @table: table of cell info entries 1739 */ 1740 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1741 { 1742 mutex_lock(&nvmem_cell_mutex); 1743 list_del(&table->node); 1744 mutex_unlock(&nvmem_cell_mutex); 1745 } 1746 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1747 1748 /** 1749 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1750 * 1751 * @entries: array of cell lookup entries 1752 * @nentries: number of cell lookup entries in the array 1753 */ 1754 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1755 { 1756 int i; 1757 1758 mutex_lock(&nvmem_lookup_mutex); 1759 for (i = 0; i < nentries; i++) 1760 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1761 mutex_unlock(&nvmem_lookup_mutex); 1762 } 1763 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1764 1765 /** 1766 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1767 * entries 1768 * 1769 * @entries: array of cell lookup entries 1770 * @nentries: number of cell lookup entries in the array 1771 */ 1772 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1773 { 1774 int i; 1775 1776 mutex_lock(&nvmem_lookup_mutex); 1777 for (i = 0; i < nentries; i++) 1778 list_del(&entries[i].node); 1779 mutex_unlock(&nvmem_lookup_mutex); 1780 } 1781 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1782 1783 /** 1784 * nvmem_dev_name() - Get the name of a given nvmem device. 1785 * 1786 * @nvmem: nvmem device. 1787 * 1788 * Return: name of the nvmem device. 1789 */ 1790 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1791 { 1792 return dev_name(&nvmem->dev); 1793 } 1794 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1795 1796 static int __init nvmem_init(void) 1797 { 1798 return bus_register(&nvmem_bus_type); 1799 } 1800 1801 static void __exit nvmem_exit(void) 1802 { 1803 bus_unregister(&nvmem_bus_type); 1804 } 1805 1806 subsys_initcall(nvmem_init); 1807 module_exit(nvmem_exit); 1808 1809 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1810 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1811 MODULE_DESCRIPTION("nvmem Driver Core"); 1812 MODULE_LICENSE("GPL v2"); 1813