1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 const struct nvmem_keepout *keepout; 38 unsigned int nkeepout; 39 nvmem_reg_read_t reg_read; 40 nvmem_reg_write_t reg_write; 41 struct gpio_desc *wp_gpio; 42 void *priv; 43 }; 44 45 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 46 47 #define FLAG_COMPAT BIT(0) 48 49 struct nvmem_cell { 50 const char *name; 51 int offset; 52 int bytes; 53 int bit_offset; 54 int nbits; 55 struct device_node *np; 56 struct nvmem_device *nvmem; 57 struct list_head node; 58 }; 59 60 static DEFINE_MUTEX(nvmem_mutex); 61 static DEFINE_IDA(nvmem_ida); 62 63 static DEFINE_MUTEX(nvmem_cell_mutex); 64 static LIST_HEAD(nvmem_cell_tables); 65 66 static DEFINE_MUTEX(nvmem_lookup_mutex); 67 static LIST_HEAD(nvmem_lookup_list); 68 69 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 70 71 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 72 void *val, size_t bytes) 73 { 74 if (nvmem->reg_read) 75 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 76 77 return -EINVAL; 78 } 79 80 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 81 void *val, size_t bytes) 82 { 83 int ret; 84 85 if (nvmem->reg_write) { 86 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 87 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 88 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 89 return ret; 90 } 91 92 return -EINVAL; 93 } 94 95 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 96 unsigned int offset, void *val, 97 size_t bytes, int write) 98 { 99 100 unsigned int end = offset + bytes; 101 unsigned int kend, ksize; 102 const struct nvmem_keepout *keepout = nvmem->keepout; 103 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 104 int rc; 105 106 /* 107 * Skip all keepouts before the range being accessed. 108 * Keepouts are sorted. 109 */ 110 while ((keepout < keepoutend) && (keepout->end <= offset)) 111 keepout++; 112 113 while ((offset < end) && (keepout < keepoutend)) { 114 /* Access the valid portion before the keepout. */ 115 if (offset < keepout->start) { 116 kend = min(end, keepout->start); 117 ksize = kend - offset; 118 if (write) 119 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 120 else 121 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 122 123 if (rc) 124 return rc; 125 126 offset += ksize; 127 val += ksize; 128 } 129 130 /* 131 * Now we're aligned to the start of this keepout zone. Go 132 * through it. 133 */ 134 kend = min(end, keepout->end); 135 ksize = kend - offset; 136 if (!write) 137 memset(val, keepout->value, ksize); 138 139 val += ksize; 140 offset += ksize; 141 keepout++; 142 } 143 144 /* 145 * If we ran out of keepouts but there's still stuff to do, send it 146 * down directly 147 */ 148 if (offset < end) { 149 ksize = end - offset; 150 if (write) 151 return __nvmem_reg_write(nvmem, offset, val, ksize); 152 else 153 return __nvmem_reg_read(nvmem, offset, val, ksize); 154 } 155 156 return 0; 157 } 158 159 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 160 void *val, size_t bytes) 161 { 162 if (!nvmem->nkeepout) 163 return __nvmem_reg_read(nvmem, offset, val, bytes); 164 165 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 166 } 167 168 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 169 void *val, size_t bytes) 170 { 171 if (!nvmem->nkeepout) 172 return __nvmem_reg_write(nvmem, offset, val, bytes); 173 174 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 175 } 176 177 #ifdef CONFIG_NVMEM_SYSFS 178 static const char * const nvmem_type_str[] = { 179 [NVMEM_TYPE_UNKNOWN] = "Unknown", 180 [NVMEM_TYPE_EEPROM] = "EEPROM", 181 [NVMEM_TYPE_OTP] = "OTP", 182 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 183 [NVMEM_TYPE_FRAM] = "FRAM", 184 }; 185 186 #ifdef CONFIG_DEBUG_LOCK_ALLOC 187 static struct lock_class_key eeprom_lock_key; 188 #endif 189 190 static ssize_t type_show(struct device *dev, 191 struct device_attribute *attr, char *buf) 192 { 193 struct nvmem_device *nvmem = to_nvmem_device(dev); 194 195 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 196 } 197 198 static DEVICE_ATTR_RO(type); 199 200 static struct attribute *nvmem_attrs[] = { 201 &dev_attr_type.attr, 202 NULL, 203 }; 204 205 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 206 struct bin_attribute *attr, char *buf, 207 loff_t pos, size_t count) 208 { 209 struct device *dev; 210 struct nvmem_device *nvmem; 211 int rc; 212 213 if (attr->private) 214 dev = attr->private; 215 else 216 dev = kobj_to_dev(kobj); 217 nvmem = to_nvmem_device(dev); 218 219 /* Stop the user from reading */ 220 if (pos >= nvmem->size) 221 return 0; 222 223 if (!IS_ALIGNED(pos, nvmem->stride)) 224 return -EINVAL; 225 226 if (count < nvmem->word_size) 227 return -EINVAL; 228 229 if (pos + count > nvmem->size) 230 count = nvmem->size - pos; 231 232 count = round_down(count, nvmem->word_size); 233 234 if (!nvmem->reg_read) 235 return -EPERM; 236 237 rc = nvmem_reg_read(nvmem, pos, buf, count); 238 239 if (rc) 240 return rc; 241 242 return count; 243 } 244 245 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 246 struct bin_attribute *attr, char *buf, 247 loff_t pos, size_t count) 248 { 249 struct device *dev; 250 struct nvmem_device *nvmem; 251 int rc; 252 253 if (attr->private) 254 dev = attr->private; 255 else 256 dev = kobj_to_dev(kobj); 257 nvmem = to_nvmem_device(dev); 258 259 /* Stop the user from writing */ 260 if (pos >= nvmem->size) 261 return -EFBIG; 262 263 if (!IS_ALIGNED(pos, nvmem->stride)) 264 return -EINVAL; 265 266 if (count < nvmem->word_size) 267 return -EINVAL; 268 269 if (pos + count > nvmem->size) 270 count = nvmem->size - pos; 271 272 count = round_down(count, nvmem->word_size); 273 274 if (!nvmem->reg_write) 275 return -EPERM; 276 277 rc = nvmem_reg_write(nvmem, pos, buf, count); 278 279 if (rc) 280 return rc; 281 282 return count; 283 } 284 285 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 286 { 287 umode_t mode = 0400; 288 289 if (!nvmem->root_only) 290 mode |= 0044; 291 292 if (!nvmem->read_only) 293 mode |= 0200; 294 295 if (!nvmem->reg_write) 296 mode &= ~0200; 297 298 if (!nvmem->reg_read) 299 mode &= ~0444; 300 301 return mode; 302 } 303 304 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 305 struct bin_attribute *attr, int i) 306 { 307 struct device *dev = kobj_to_dev(kobj); 308 struct nvmem_device *nvmem = to_nvmem_device(dev); 309 310 return nvmem_bin_attr_get_umode(nvmem); 311 } 312 313 /* default read/write permissions */ 314 static struct bin_attribute bin_attr_rw_nvmem = { 315 .attr = { 316 .name = "nvmem", 317 .mode = 0644, 318 }, 319 .read = bin_attr_nvmem_read, 320 .write = bin_attr_nvmem_write, 321 }; 322 323 static struct bin_attribute *nvmem_bin_attributes[] = { 324 &bin_attr_rw_nvmem, 325 NULL, 326 }; 327 328 static const struct attribute_group nvmem_bin_group = { 329 .bin_attrs = nvmem_bin_attributes, 330 .attrs = nvmem_attrs, 331 .is_bin_visible = nvmem_bin_attr_is_visible, 332 }; 333 334 static const struct attribute_group *nvmem_dev_groups[] = { 335 &nvmem_bin_group, 336 NULL, 337 }; 338 339 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 340 .attr = { 341 .name = "eeprom", 342 }, 343 .read = bin_attr_nvmem_read, 344 .write = bin_attr_nvmem_write, 345 }; 346 347 /* 348 * nvmem_setup_compat() - Create an additional binary entry in 349 * drivers sys directory, to be backwards compatible with the older 350 * drivers/misc/eeprom drivers. 351 */ 352 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 353 const struct nvmem_config *config) 354 { 355 int rval; 356 357 if (!config->compat) 358 return 0; 359 360 if (!config->base_dev) 361 return -EINVAL; 362 363 if (config->type == NVMEM_TYPE_FRAM) 364 bin_attr_nvmem_eeprom_compat.attr.name = "fram"; 365 366 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 367 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 368 nvmem->eeprom.size = nvmem->size; 369 #ifdef CONFIG_DEBUG_LOCK_ALLOC 370 nvmem->eeprom.attr.key = &eeprom_lock_key; 371 #endif 372 nvmem->eeprom.private = &nvmem->dev; 373 nvmem->base_dev = config->base_dev; 374 375 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 376 if (rval) { 377 dev_err(&nvmem->dev, 378 "Failed to create eeprom binary file %d\n", rval); 379 return rval; 380 } 381 382 nvmem->flags |= FLAG_COMPAT; 383 384 return 0; 385 } 386 387 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 388 const struct nvmem_config *config) 389 { 390 if (config->compat) 391 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 392 } 393 394 #else /* CONFIG_NVMEM_SYSFS */ 395 396 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 397 const struct nvmem_config *config) 398 { 399 return -ENOSYS; 400 } 401 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 402 const struct nvmem_config *config) 403 { 404 } 405 406 #endif /* CONFIG_NVMEM_SYSFS */ 407 408 static void nvmem_release(struct device *dev) 409 { 410 struct nvmem_device *nvmem = to_nvmem_device(dev); 411 412 ida_free(&nvmem_ida, nvmem->id); 413 gpiod_put(nvmem->wp_gpio); 414 kfree(nvmem); 415 } 416 417 static const struct device_type nvmem_provider_type = { 418 .release = nvmem_release, 419 }; 420 421 static struct bus_type nvmem_bus_type = { 422 .name = "nvmem", 423 }; 424 425 static void nvmem_cell_drop(struct nvmem_cell *cell) 426 { 427 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 428 mutex_lock(&nvmem_mutex); 429 list_del(&cell->node); 430 mutex_unlock(&nvmem_mutex); 431 of_node_put(cell->np); 432 kfree_const(cell->name); 433 kfree(cell); 434 } 435 436 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 437 { 438 struct nvmem_cell *cell, *p; 439 440 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 441 nvmem_cell_drop(cell); 442 } 443 444 static void nvmem_cell_add(struct nvmem_cell *cell) 445 { 446 mutex_lock(&nvmem_mutex); 447 list_add_tail(&cell->node, &cell->nvmem->cells); 448 mutex_unlock(&nvmem_mutex); 449 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 450 } 451 452 static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem, 453 const struct nvmem_cell_info *info, 454 struct nvmem_cell *cell) 455 { 456 cell->nvmem = nvmem; 457 cell->offset = info->offset; 458 cell->bytes = info->bytes; 459 cell->name = info->name; 460 461 cell->bit_offset = info->bit_offset; 462 cell->nbits = info->nbits; 463 464 if (cell->nbits) 465 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 466 BITS_PER_BYTE); 467 468 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 469 dev_err(&nvmem->dev, 470 "cell %s unaligned to nvmem stride %d\n", 471 cell->name ?: "<unknown>", nvmem->stride); 472 return -EINVAL; 473 } 474 475 return 0; 476 } 477 478 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 479 const struct nvmem_cell_info *info, 480 struct nvmem_cell *cell) 481 { 482 int err; 483 484 err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell); 485 if (err) 486 return err; 487 488 cell->name = kstrdup_const(info->name, GFP_KERNEL); 489 if (!cell->name) 490 return -ENOMEM; 491 492 return 0; 493 } 494 495 /** 496 * nvmem_add_cells() - Add cell information to an nvmem device 497 * 498 * @nvmem: nvmem device to add cells to. 499 * @info: nvmem cell info to add to the device 500 * @ncells: number of cells in info 501 * 502 * Return: 0 or negative error code on failure. 503 */ 504 static int nvmem_add_cells(struct nvmem_device *nvmem, 505 const struct nvmem_cell_info *info, 506 int ncells) 507 { 508 struct nvmem_cell **cells; 509 int i, rval; 510 511 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 512 if (!cells) 513 return -ENOMEM; 514 515 for (i = 0; i < ncells; i++) { 516 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 517 if (!cells[i]) { 518 rval = -ENOMEM; 519 goto err; 520 } 521 522 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 523 if (rval) { 524 kfree(cells[i]); 525 goto err; 526 } 527 528 nvmem_cell_add(cells[i]); 529 } 530 531 /* remove tmp array */ 532 kfree(cells); 533 534 return 0; 535 err: 536 while (i--) 537 nvmem_cell_drop(cells[i]); 538 539 kfree(cells); 540 541 return rval; 542 } 543 544 /** 545 * nvmem_register_notifier() - Register a notifier block for nvmem events. 546 * 547 * @nb: notifier block to be called on nvmem events. 548 * 549 * Return: 0 on success, negative error number on failure. 550 */ 551 int nvmem_register_notifier(struct notifier_block *nb) 552 { 553 return blocking_notifier_chain_register(&nvmem_notifier, nb); 554 } 555 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 556 557 /** 558 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 559 * 560 * @nb: notifier block to be unregistered. 561 * 562 * Return: 0 on success, negative error number on failure. 563 */ 564 int nvmem_unregister_notifier(struct notifier_block *nb) 565 { 566 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 567 } 568 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 569 570 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 571 { 572 const struct nvmem_cell_info *info; 573 struct nvmem_cell_table *table; 574 struct nvmem_cell *cell; 575 int rval = 0, i; 576 577 mutex_lock(&nvmem_cell_mutex); 578 list_for_each_entry(table, &nvmem_cell_tables, node) { 579 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 580 for (i = 0; i < table->ncells; i++) { 581 info = &table->cells[i]; 582 583 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 584 if (!cell) { 585 rval = -ENOMEM; 586 goto out; 587 } 588 589 rval = nvmem_cell_info_to_nvmem_cell(nvmem, 590 info, 591 cell); 592 if (rval) { 593 kfree(cell); 594 goto out; 595 } 596 597 nvmem_cell_add(cell); 598 } 599 } 600 } 601 602 out: 603 mutex_unlock(&nvmem_cell_mutex); 604 return rval; 605 } 606 607 static struct nvmem_cell * 608 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 609 { 610 struct nvmem_cell *iter, *cell = NULL; 611 612 mutex_lock(&nvmem_mutex); 613 list_for_each_entry(iter, &nvmem->cells, node) { 614 if (strcmp(cell_id, iter->name) == 0) { 615 cell = iter; 616 break; 617 } 618 } 619 mutex_unlock(&nvmem_mutex); 620 621 return cell; 622 } 623 624 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 625 { 626 unsigned int cur = 0; 627 const struct nvmem_keepout *keepout = nvmem->keepout; 628 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 629 630 while (keepout < keepoutend) { 631 /* Ensure keepouts are sorted and don't overlap. */ 632 if (keepout->start < cur) { 633 dev_err(&nvmem->dev, 634 "Keepout regions aren't sorted or overlap.\n"); 635 636 return -ERANGE; 637 } 638 639 if (keepout->end < keepout->start) { 640 dev_err(&nvmem->dev, 641 "Invalid keepout region.\n"); 642 643 return -EINVAL; 644 } 645 646 /* 647 * Validate keepouts (and holes between) don't violate 648 * word_size constraints. 649 */ 650 if ((keepout->end - keepout->start < nvmem->word_size) || 651 ((keepout->start != cur) && 652 (keepout->start - cur < nvmem->word_size))) { 653 654 dev_err(&nvmem->dev, 655 "Keepout regions violate word_size constraints.\n"); 656 657 return -ERANGE; 658 } 659 660 /* Validate keepouts don't violate stride (alignment). */ 661 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 662 !IS_ALIGNED(keepout->end, nvmem->stride)) { 663 664 dev_err(&nvmem->dev, 665 "Keepout regions violate stride.\n"); 666 667 return -EINVAL; 668 } 669 670 cur = keepout->end; 671 keepout++; 672 } 673 674 return 0; 675 } 676 677 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 678 { 679 struct device_node *parent, *child; 680 struct device *dev = &nvmem->dev; 681 struct nvmem_cell *cell; 682 const __be32 *addr; 683 int len; 684 685 parent = dev->of_node; 686 687 for_each_child_of_node(parent, child) { 688 addr = of_get_property(child, "reg", &len); 689 if (!addr) 690 continue; 691 if (len < 2 * sizeof(u32)) { 692 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 693 of_node_put(child); 694 return -EINVAL; 695 } 696 697 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 698 if (!cell) { 699 of_node_put(child); 700 return -ENOMEM; 701 } 702 703 cell->nvmem = nvmem; 704 cell->offset = be32_to_cpup(addr++); 705 cell->bytes = be32_to_cpup(addr); 706 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 707 708 addr = of_get_property(child, "bits", &len); 709 if (addr && len == (2 * sizeof(u32))) { 710 cell->bit_offset = be32_to_cpup(addr++); 711 cell->nbits = be32_to_cpup(addr); 712 } 713 714 if (cell->nbits) 715 cell->bytes = DIV_ROUND_UP( 716 cell->nbits + cell->bit_offset, 717 BITS_PER_BYTE); 718 719 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 720 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 721 cell->name, nvmem->stride); 722 /* Cells already added will be freed later. */ 723 kfree_const(cell->name); 724 kfree(cell); 725 of_node_put(child); 726 return -EINVAL; 727 } 728 729 cell->np = of_node_get(child); 730 nvmem_cell_add(cell); 731 } 732 733 return 0; 734 } 735 736 /** 737 * nvmem_register() - Register a nvmem device for given nvmem_config. 738 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 739 * 740 * @config: nvmem device configuration with which nvmem device is created. 741 * 742 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 743 * on success. 744 */ 745 746 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 747 { 748 struct nvmem_device *nvmem; 749 int rval; 750 751 if (!config->dev) 752 return ERR_PTR(-EINVAL); 753 754 if (!config->reg_read && !config->reg_write) 755 return ERR_PTR(-EINVAL); 756 757 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 758 if (!nvmem) 759 return ERR_PTR(-ENOMEM); 760 761 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 762 if (rval < 0) { 763 kfree(nvmem); 764 return ERR_PTR(rval); 765 } 766 767 if (config->wp_gpio) 768 nvmem->wp_gpio = config->wp_gpio; 769 else 770 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 771 GPIOD_OUT_HIGH); 772 if (IS_ERR(nvmem->wp_gpio)) { 773 ida_free(&nvmem_ida, nvmem->id); 774 rval = PTR_ERR(nvmem->wp_gpio); 775 kfree(nvmem); 776 return ERR_PTR(rval); 777 } 778 779 kref_init(&nvmem->refcnt); 780 INIT_LIST_HEAD(&nvmem->cells); 781 782 nvmem->id = rval; 783 nvmem->owner = config->owner; 784 if (!nvmem->owner && config->dev->driver) 785 nvmem->owner = config->dev->driver->owner; 786 nvmem->stride = config->stride ?: 1; 787 nvmem->word_size = config->word_size ?: 1; 788 nvmem->size = config->size; 789 nvmem->dev.type = &nvmem_provider_type; 790 nvmem->dev.bus = &nvmem_bus_type; 791 nvmem->dev.parent = config->dev; 792 nvmem->root_only = config->root_only; 793 nvmem->priv = config->priv; 794 nvmem->type = config->type; 795 nvmem->reg_read = config->reg_read; 796 nvmem->reg_write = config->reg_write; 797 nvmem->keepout = config->keepout; 798 nvmem->nkeepout = config->nkeepout; 799 if (config->of_node) 800 nvmem->dev.of_node = config->of_node; 801 else if (!config->no_of_node) 802 nvmem->dev.of_node = config->dev->of_node; 803 804 switch (config->id) { 805 case NVMEM_DEVID_NONE: 806 dev_set_name(&nvmem->dev, "%s", config->name); 807 break; 808 case NVMEM_DEVID_AUTO: 809 dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 810 break; 811 default: 812 dev_set_name(&nvmem->dev, "%s%d", 813 config->name ? : "nvmem", 814 config->name ? config->id : nvmem->id); 815 break; 816 } 817 818 nvmem->read_only = device_property_present(config->dev, "read-only") || 819 config->read_only || !nvmem->reg_write; 820 821 #ifdef CONFIG_NVMEM_SYSFS 822 nvmem->dev.groups = nvmem_dev_groups; 823 #endif 824 825 if (nvmem->nkeepout) { 826 rval = nvmem_validate_keepouts(nvmem); 827 if (rval) { 828 ida_free(&nvmem_ida, nvmem->id); 829 kfree(nvmem); 830 return ERR_PTR(rval); 831 } 832 } 833 834 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 835 836 rval = device_register(&nvmem->dev); 837 if (rval) 838 goto err_put_device; 839 840 if (config->compat) { 841 rval = nvmem_sysfs_setup_compat(nvmem, config); 842 if (rval) 843 goto err_device_del; 844 } 845 846 if (config->cells) { 847 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 848 if (rval) 849 goto err_teardown_compat; 850 } 851 852 rval = nvmem_add_cells_from_table(nvmem); 853 if (rval) 854 goto err_remove_cells; 855 856 rval = nvmem_add_cells_from_of(nvmem); 857 if (rval) 858 goto err_remove_cells; 859 860 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 861 862 return nvmem; 863 864 err_remove_cells: 865 nvmem_device_remove_all_cells(nvmem); 866 err_teardown_compat: 867 if (config->compat) 868 nvmem_sysfs_remove_compat(nvmem, config); 869 err_device_del: 870 device_del(&nvmem->dev); 871 err_put_device: 872 put_device(&nvmem->dev); 873 874 return ERR_PTR(rval); 875 } 876 EXPORT_SYMBOL_GPL(nvmem_register); 877 878 static void nvmem_device_release(struct kref *kref) 879 { 880 struct nvmem_device *nvmem; 881 882 nvmem = container_of(kref, struct nvmem_device, refcnt); 883 884 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 885 886 if (nvmem->flags & FLAG_COMPAT) 887 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 888 889 nvmem_device_remove_all_cells(nvmem); 890 device_unregister(&nvmem->dev); 891 } 892 893 /** 894 * nvmem_unregister() - Unregister previously registered nvmem device 895 * 896 * @nvmem: Pointer to previously registered nvmem device. 897 */ 898 void nvmem_unregister(struct nvmem_device *nvmem) 899 { 900 kref_put(&nvmem->refcnt, nvmem_device_release); 901 } 902 EXPORT_SYMBOL_GPL(nvmem_unregister); 903 904 static void devm_nvmem_release(struct device *dev, void *res) 905 { 906 nvmem_unregister(*(struct nvmem_device **)res); 907 } 908 909 /** 910 * devm_nvmem_register() - Register a managed nvmem device for given 911 * nvmem_config. 912 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 913 * 914 * @dev: Device that uses the nvmem device. 915 * @config: nvmem device configuration with which nvmem device is created. 916 * 917 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 918 * on success. 919 */ 920 struct nvmem_device *devm_nvmem_register(struct device *dev, 921 const struct nvmem_config *config) 922 { 923 struct nvmem_device **ptr, *nvmem; 924 925 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 926 if (!ptr) 927 return ERR_PTR(-ENOMEM); 928 929 nvmem = nvmem_register(config); 930 931 if (!IS_ERR(nvmem)) { 932 *ptr = nvmem; 933 devres_add(dev, ptr); 934 } else { 935 devres_free(ptr); 936 } 937 938 return nvmem; 939 } 940 EXPORT_SYMBOL_GPL(devm_nvmem_register); 941 942 static int devm_nvmem_match(struct device *dev, void *res, void *data) 943 { 944 struct nvmem_device **r = res; 945 946 return *r == data; 947 } 948 949 /** 950 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 951 * device. 952 * 953 * @dev: Device that uses the nvmem device. 954 * @nvmem: Pointer to previously registered nvmem device. 955 * 956 * Return: Will be negative on error or zero on success. 957 */ 958 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 959 { 960 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 961 } 962 EXPORT_SYMBOL(devm_nvmem_unregister); 963 964 static struct nvmem_device *__nvmem_device_get(void *data, 965 int (*match)(struct device *dev, const void *data)) 966 { 967 struct nvmem_device *nvmem = NULL; 968 struct device *dev; 969 970 mutex_lock(&nvmem_mutex); 971 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 972 if (dev) 973 nvmem = to_nvmem_device(dev); 974 mutex_unlock(&nvmem_mutex); 975 if (!nvmem) 976 return ERR_PTR(-EPROBE_DEFER); 977 978 if (!try_module_get(nvmem->owner)) { 979 dev_err(&nvmem->dev, 980 "could not increase module refcount for cell %s\n", 981 nvmem_dev_name(nvmem)); 982 983 put_device(&nvmem->dev); 984 return ERR_PTR(-EINVAL); 985 } 986 987 kref_get(&nvmem->refcnt); 988 989 return nvmem; 990 } 991 992 static void __nvmem_device_put(struct nvmem_device *nvmem) 993 { 994 put_device(&nvmem->dev); 995 module_put(nvmem->owner); 996 kref_put(&nvmem->refcnt, nvmem_device_release); 997 } 998 999 #if IS_ENABLED(CONFIG_OF) 1000 /** 1001 * of_nvmem_device_get() - Get nvmem device from a given id 1002 * 1003 * @np: Device tree node that uses the nvmem device. 1004 * @id: nvmem name from nvmem-names property. 1005 * 1006 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1007 * on success. 1008 */ 1009 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1010 { 1011 1012 struct device_node *nvmem_np; 1013 struct nvmem_device *nvmem; 1014 int index = 0; 1015 1016 if (id) 1017 index = of_property_match_string(np, "nvmem-names", id); 1018 1019 nvmem_np = of_parse_phandle(np, "nvmem", index); 1020 if (!nvmem_np) 1021 return ERR_PTR(-ENOENT); 1022 1023 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1024 of_node_put(nvmem_np); 1025 return nvmem; 1026 } 1027 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1028 #endif 1029 1030 /** 1031 * nvmem_device_get() - Get nvmem device from a given id 1032 * 1033 * @dev: Device that uses the nvmem device. 1034 * @dev_name: name of the requested nvmem device. 1035 * 1036 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1037 * on success. 1038 */ 1039 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1040 { 1041 if (dev->of_node) { /* try dt first */ 1042 struct nvmem_device *nvmem; 1043 1044 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1045 1046 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1047 return nvmem; 1048 1049 } 1050 1051 return __nvmem_device_get((void *)dev_name, device_match_name); 1052 } 1053 EXPORT_SYMBOL_GPL(nvmem_device_get); 1054 1055 /** 1056 * nvmem_device_find() - Find nvmem device with matching function 1057 * 1058 * @data: Data to pass to match function 1059 * @match: Callback function to check device 1060 * 1061 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1062 * on success. 1063 */ 1064 struct nvmem_device *nvmem_device_find(void *data, 1065 int (*match)(struct device *dev, const void *data)) 1066 { 1067 return __nvmem_device_get(data, match); 1068 } 1069 EXPORT_SYMBOL_GPL(nvmem_device_find); 1070 1071 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1072 { 1073 struct nvmem_device **nvmem = res; 1074 1075 if (WARN_ON(!nvmem || !*nvmem)) 1076 return 0; 1077 1078 return *nvmem == data; 1079 } 1080 1081 static void devm_nvmem_device_release(struct device *dev, void *res) 1082 { 1083 nvmem_device_put(*(struct nvmem_device **)res); 1084 } 1085 1086 /** 1087 * devm_nvmem_device_put() - put alredy got nvmem device 1088 * 1089 * @dev: Device that uses the nvmem device. 1090 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1091 * that needs to be released. 1092 */ 1093 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1094 { 1095 int ret; 1096 1097 ret = devres_release(dev, devm_nvmem_device_release, 1098 devm_nvmem_device_match, nvmem); 1099 1100 WARN_ON(ret); 1101 } 1102 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1103 1104 /** 1105 * nvmem_device_put() - put alredy got nvmem device 1106 * 1107 * @nvmem: pointer to nvmem device that needs to be released. 1108 */ 1109 void nvmem_device_put(struct nvmem_device *nvmem) 1110 { 1111 __nvmem_device_put(nvmem); 1112 } 1113 EXPORT_SYMBOL_GPL(nvmem_device_put); 1114 1115 /** 1116 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 1117 * 1118 * @dev: Device that requests the nvmem device. 1119 * @id: name id for the requested nvmem device. 1120 * 1121 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 1122 * on success. The nvmem_cell will be freed by the automatically once the 1123 * device is freed. 1124 */ 1125 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1126 { 1127 struct nvmem_device **ptr, *nvmem; 1128 1129 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1130 if (!ptr) 1131 return ERR_PTR(-ENOMEM); 1132 1133 nvmem = nvmem_device_get(dev, id); 1134 if (!IS_ERR(nvmem)) { 1135 *ptr = nvmem; 1136 devres_add(dev, ptr); 1137 } else { 1138 devres_free(ptr); 1139 } 1140 1141 return nvmem; 1142 } 1143 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1144 1145 static struct nvmem_cell * 1146 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1147 { 1148 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1149 struct nvmem_cell_lookup *lookup; 1150 struct nvmem_device *nvmem; 1151 const char *dev_id; 1152 1153 if (!dev) 1154 return ERR_PTR(-EINVAL); 1155 1156 dev_id = dev_name(dev); 1157 1158 mutex_lock(&nvmem_lookup_mutex); 1159 1160 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1161 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1162 (strcmp(lookup->con_id, con_id) == 0)) { 1163 /* This is the right entry. */ 1164 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1165 device_match_name); 1166 if (IS_ERR(nvmem)) { 1167 /* Provider may not be registered yet. */ 1168 cell = ERR_CAST(nvmem); 1169 break; 1170 } 1171 1172 cell = nvmem_find_cell_by_name(nvmem, 1173 lookup->cell_name); 1174 if (!cell) { 1175 __nvmem_device_put(nvmem); 1176 cell = ERR_PTR(-ENOENT); 1177 } 1178 break; 1179 } 1180 } 1181 1182 mutex_unlock(&nvmem_lookup_mutex); 1183 return cell; 1184 } 1185 1186 #if IS_ENABLED(CONFIG_OF) 1187 static struct nvmem_cell * 1188 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 1189 { 1190 struct nvmem_cell *iter, *cell = NULL; 1191 1192 mutex_lock(&nvmem_mutex); 1193 list_for_each_entry(iter, &nvmem->cells, node) { 1194 if (np == iter->np) { 1195 cell = iter; 1196 break; 1197 } 1198 } 1199 mutex_unlock(&nvmem_mutex); 1200 1201 return cell; 1202 } 1203 1204 /** 1205 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1206 * 1207 * @np: Device tree node that uses the nvmem cell. 1208 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1209 * for the cell at index 0 (the lone cell with no accompanying 1210 * nvmem-cell-names property). 1211 * 1212 * Return: Will be an ERR_PTR() on error or a valid pointer 1213 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1214 * nvmem_cell_put(). 1215 */ 1216 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1217 { 1218 struct device_node *cell_np, *nvmem_np; 1219 struct nvmem_device *nvmem; 1220 struct nvmem_cell *cell; 1221 int index = 0; 1222 1223 /* if cell name exists, find index to the name */ 1224 if (id) 1225 index = of_property_match_string(np, "nvmem-cell-names", id); 1226 1227 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1228 if (!cell_np) 1229 return ERR_PTR(-ENOENT); 1230 1231 nvmem_np = of_get_next_parent(cell_np); 1232 if (!nvmem_np) 1233 return ERR_PTR(-EINVAL); 1234 1235 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1236 of_node_put(nvmem_np); 1237 if (IS_ERR(nvmem)) 1238 return ERR_CAST(nvmem); 1239 1240 cell = nvmem_find_cell_by_node(nvmem, cell_np); 1241 if (!cell) { 1242 __nvmem_device_put(nvmem); 1243 return ERR_PTR(-ENOENT); 1244 } 1245 1246 return cell; 1247 } 1248 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1249 #endif 1250 1251 /** 1252 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1253 * 1254 * @dev: Device that requests the nvmem cell. 1255 * @id: nvmem cell name to get (this corresponds with the name from the 1256 * nvmem-cell-names property for DT systems and with the con_id from 1257 * the lookup entry for non-DT systems). 1258 * 1259 * Return: Will be an ERR_PTR() on error or a valid pointer 1260 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1261 * nvmem_cell_put(). 1262 */ 1263 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1264 { 1265 struct nvmem_cell *cell; 1266 1267 if (dev->of_node) { /* try dt first */ 1268 cell = of_nvmem_cell_get(dev->of_node, id); 1269 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1270 return cell; 1271 } 1272 1273 /* NULL cell id only allowed for device tree; invalid otherwise */ 1274 if (!id) 1275 return ERR_PTR(-EINVAL); 1276 1277 return nvmem_cell_get_from_lookup(dev, id); 1278 } 1279 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1280 1281 static void devm_nvmem_cell_release(struct device *dev, void *res) 1282 { 1283 nvmem_cell_put(*(struct nvmem_cell **)res); 1284 } 1285 1286 /** 1287 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1288 * 1289 * @dev: Device that requests the nvmem cell. 1290 * @id: nvmem cell name id to get. 1291 * 1292 * Return: Will be an ERR_PTR() on error or a valid pointer 1293 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1294 * automatically once the device is freed. 1295 */ 1296 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1297 { 1298 struct nvmem_cell **ptr, *cell; 1299 1300 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1301 if (!ptr) 1302 return ERR_PTR(-ENOMEM); 1303 1304 cell = nvmem_cell_get(dev, id); 1305 if (!IS_ERR(cell)) { 1306 *ptr = cell; 1307 devres_add(dev, ptr); 1308 } else { 1309 devres_free(ptr); 1310 } 1311 1312 return cell; 1313 } 1314 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1315 1316 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1317 { 1318 struct nvmem_cell **c = res; 1319 1320 if (WARN_ON(!c || !*c)) 1321 return 0; 1322 1323 return *c == data; 1324 } 1325 1326 /** 1327 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1328 * from devm_nvmem_cell_get. 1329 * 1330 * @dev: Device that requests the nvmem cell. 1331 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1332 */ 1333 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1334 { 1335 int ret; 1336 1337 ret = devres_release(dev, devm_nvmem_cell_release, 1338 devm_nvmem_cell_match, cell); 1339 1340 WARN_ON(ret); 1341 } 1342 EXPORT_SYMBOL(devm_nvmem_cell_put); 1343 1344 /** 1345 * nvmem_cell_put() - Release previously allocated nvmem cell. 1346 * 1347 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1348 */ 1349 void nvmem_cell_put(struct nvmem_cell *cell) 1350 { 1351 struct nvmem_device *nvmem = cell->nvmem; 1352 1353 __nvmem_device_put(nvmem); 1354 } 1355 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1356 1357 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 1358 { 1359 u8 *p, *b; 1360 int i, extra, bit_offset = cell->bit_offset; 1361 1362 p = b = buf; 1363 if (bit_offset) { 1364 /* First shift */ 1365 *b++ >>= bit_offset; 1366 1367 /* setup rest of the bytes if any */ 1368 for (i = 1; i < cell->bytes; i++) { 1369 /* Get bits from next byte and shift them towards msb */ 1370 *p |= *b << (BITS_PER_BYTE - bit_offset); 1371 1372 p = b; 1373 *b++ >>= bit_offset; 1374 } 1375 } else { 1376 /* point to the msb */ 1377 p += cell->bytes - 1; 1378 } 1379 1380 /* result fits in less bytes */ 1381 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1382 while (--extra >= 0) 1383 *p-- = 0; 1384 1385 /* clear msb bits if any leftover in the last byte */ 1386 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 1387 } 1388 1389 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1390 struct nvmem_cell *cell, 1391 void *buf, size_t *len) 1392 { 1393 int rc; 1394 1395 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1396 1397 if (rc) 1398 return rc; 1399 1400 /* shift bits in-place */ 1401 if (cell->bit_offset || cell->nbits) 1402 nvmem_shift_read_buffer_in_place(cell, buf); 1403 1404 if (len) 1405 *len = cell->bytes; 1406 1407 return 0; 1408 } 1409 1410 /** 1411 * nvmem_cell_read() - Read a given nvmem cell 1412 * 1413 * @cell: nvmem cell to be read. 1414 * @len: pointer to length of cell which will be populated on successful read; 1415 * can be NULL. 1416 * 1417 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1418 * buffer should be freed by the consumer with a kfree(). 1419 */ 1420 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1421 { 1422 struct nvmem_device *nvmem = cell->nvmem; 1423 u8 *buf; 1424 int rc; 1425 1426 if (!nvmem) 1427 return ERR_PTR(-EINVAL); 1428 1429 buf = kzalloc(cell->bytes, GFP_KERNEL); 1430 if (!buf) 1431 return ERR_PTR(-ENOMEM); 1432 1433 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1434 if (rc) { 1435 kfree(buf); 1436 return ERR_PTR(rc); 1437 } 1438 1439 return buf; 1440 } 1441 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1442 1443 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1444 u8 *_buf, int len) 1445 { 1446 struct nvmem_device *nvmem = cell->nvmem; 1447 int i, rc, nbits, bit_offset = cell->bit_offset; 1448 u8 v, *p, *buf, *b, pbyte, pbits; 1449 1450 nbits = cell->nbits; 1451 buf = kzalloc(cell->bytes, GFP_KERNEL); 1452 if (!buf) 1453 return ERR_PTR(-ENOMEM); 1454 1455 memcpy(buf, _buf, len); 1456 p = b = buf; 1457 1458 if (bit_offset) { 1459 pbyte = *b; 1460 *b <<= bit_offset; 1461 1462 /* setup the first byte with lsb bits from nvmem */ 1463 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1464 if (rc) 1465 goto err; 1466 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1467 1468 /* setup rest of the byte if any */ 1469 for (i = 1; i < cell->bytes; i++) { 1470 /* Get last byte bits and shift them towards lsb */ 1471 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1472 pbyte = *b; 1473 p = b; 1474 *b <<= bit_offset; 1475 *b++ |= pbits; 1476 } 1477 } 1478 1479 /* if it's not end on byte boundary */ 1480 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1481 /* setup the last byte with msb bits from nvmem */ 1482 rc = nvmem_reg_read(nvmem, 1483 cell->offset + cell->bytes - 1, &v, 1); 1484 if (rc) 1485 goto err; 1486 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1487 1488 } 1489 1490 return buf; 1491 err: 1492 kfree(buf); 1493 return ERR_PTR(rc); 1494 } 1495 1496 /** 1497 * nvmem_cell_write() - Write to a given nvmem cell 1498 * 1499 * @cell: nvmem cell to be written. 1500 * @buf: Buffer to be written. 1501 * @len: length of buffer to be written to nvmem cell. 1502 * 1503 * Return: length of bytes written or negative on failure. 1504 */ 1505 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1506 { 1507 struct nvmem_device *nvmem = cell->nvmem; 1508 int rc; 1509 1510 if (!nvmem || nvmem->read_only || 1511 (cell->bit_offset == 0 && len != cell->bytes)) 1512 return -EINVAL; 1513 1514 if (cell->bit_offset || cell->nbits) { 1515 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1516 if (IS_ERR(buf)) 1517 return PTR_ERR(buf); 1518 } 1519 1520 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1521 1522 /* free the tmp buffer */ 1523 if (cell->bit_offset || cell->nbits) 1524 kfree(buf); 1525 1526 if (rc) 1527 return rc; 1528 1529 return len; 1530 } 1531 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1532 1533 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1534 void *val, size_t count) 1535 { 1536 struct nvmem_cell *cell; 1537 void *buf; 1538 size_t len; 1539 1540 cell = nvmem_cell_get(dev, cell_id); 1541 if (IS_ERR(cell)) 1542 return PTR_ERR(cell); 1543 1544 buf = nvmem_cell_read(cell, &len); 1545 if (IS_ERR(buf)) { 1546 nvmem_cell_put(cell); 1547 return PTR_ERR(buf); 1548 } 1549 if (len != count) { 1550 kfree(buf); 1551 nvmem_cell_put(cell); 1552 return -EINVAL; 1553 } 1554 memcpy(val, buf, count); 1555 kfree(buf); 1556 nvmem_cell_put(cell); 1557 1558 return 0; 1559 } 1560 1561 /** 1562 * nvmem_cell_read_u8() - Read a cell value as a u8 1563 * 1564 * @dev: Device that requests the nvmem cell. 1565 * @cell_id: Name of nvmem cell to read. 1566 * @val: pointer to output value. 1567 * 1568 * Return: 0 on success or negative errno. 1569 */ 1570 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1571 { 1572 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1573 } 1574 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1575 1576 /** 1577 * nvmem_cell_read_u16() - Read a cell value as a u16 1578 * 1579 * @dev: Device that requests the nvmem cell. 1580 * @cell_id: Name of nvmem cell to read. 1581 * @val: pointer to output value. 1582 * 1583 * Return: 0 on success or negative errno. 1584 */ 1585 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1586 { 1587 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1588 } 1589 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1590 1591 /** 1592 * nvmem_cell_read_u32() - Read a cell value as a u32 1593 * 1594 * @dev: Device that requests the nvmem cell. 1595 * @cell_id: Name of nvmem cell to read. 1596 * @val: pointer to output value. 1597 * 1598 * Return: 0 on success or negative errno. 1599 */ 1600 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1601 { 1602 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1603 } 1604 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1605 1606 /** 1607 * nvmem_cell_read_u64() - Read a cell value as a u64 1608 * 1609 * @dev: Device that requests the nvmem cell. 1610 * @cell_id: Name of nvmem cell to read. 1611 * @val: pointer to output value. 1612 * 1613 * Return: 0 on success or negative errno. 1614 */ 1615 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1616 { 1617 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1618 } 1619 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1620 1621 static const void *nvmem_cell_read_variable_common(struct device *dev, 1622 const char *cell_id, 1623 size_t max_len, size_t *len) 1624 { 1625 struct nvmem_cell *cell; 1626 int nbits; 1627 void *buf; 1628 1629 cell = nvmem_cell_get(dev, cell_id); 1630 if (IS_ERR(cell)) 1631 return cell; 1632 1633 nbits = cell->nbits; 1634 buf = nvmem_cell_read(cell, len); 1635 nvmem_cell_put(cell); 1636 if (IS_ERR(buf)) 1637 return buf; 1638 1639 /* 1640 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1641 * the length of the real data. Throw away the extra junk. 1642 */ 1643 if (nbits) 1644 *len = DIV_ROUND_UP(nbits, 8); 1645 1646 if (*len > max_len) { 1647 kfree(buf); 1648 return ERR_PTR(-ERANGE); 1649 } 1650 1651 return buf; 1652 } 1653 1654 /** 1655 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1656 * 1657 * @dev: Device that requests the nvmem cell. 1658 * @cell_id: Name of nvmem cell to read. 1659 * @val: pointer to output value. 1660 * 1661 * Return: 0 on success or negative errno. 1662 */ 1663 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1664 u32 *val) 1665 { 1666 size_t len; 1667 const u8 *buf; 1668 int i; 1669 1670 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1671 if (IS_ERR(buf)) 1672 return PTR_ERR(buf); 1673 1674 /* Copy w/ implicit endian conversion */ 1675 *val = 0; 1676 for (i = 0; i < len; i++) 1677 *val |= buf[i] << (8 * i); 1678 1679 kfree(buf); 1680 1681 return 0; 1682 } 1683 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1684 1685 /** 1686 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1687 * 1688 * @dev: Device that requests the nvmem cell. 1689 * @cell_id: Name of nvmem cell to read. 1690 * @val: pointer to output value. 1691 * 1692 * Return: 0 on success or negative errno. 1693 */ 1694 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1695 u64 *val) 1696 { 1697 size_t len; 1698 const u8 *buf; 1699 int i; 1700 1701 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1702 if (IS_ERR(buf)) 1703 return PTR_ERR(buf); 1704 1705 /* Copy w/ implicit endian conversion */ 1706 *val = 0; 1707 for (i = 0; i < len; i++) 1708 *val |= (uint64_t)buf[i] << (8 * i); 1709 1710 kfree(buf); 1711 1712 return 0; 1713 } 1714 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 1715 1716 /** 1717 * nvmem_device_cell_read() - Read a given nvmem device and cell 1718 * 1719 * @nvmem: nvmem device to read from. 1720 * @info: nvmem cell info to be read. 1721 * @buf: buffer pointer which will be populated on successful read. 1722 * 1723 * Return: length of successful bytes read on success and negative 1724 * error code on error. 1725 */ 1726 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1727 struct nvmem_cell_info *info, void *buf) 1728 { 1729 struct nvmem_cell cell; 1730 int rc; 1731 ssize_t len; 1732 1733 if (!nvmem) 1734 return -EINVAL; 1735 1736 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); 1737 if (rc) 1738 return rc; 1739 1740 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1741 if (rc) 1742 return rc; 1743 1744 return len; 1745 } 1746 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1747 1748 /** 1749 * nvmem_device_cell_write() - Write cell to a given nvmem device 1750 * 1751 * @nvmem: nvmem device to be written to. 1752 * @info: nvmem cell info to be written. 1753 * @buf: buffer to be written to cell. 1754 * 1755 * Return: length of bytes written or negative error code on failure. 1756 */ 1757 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1758 struct nvmem_cell_info *info, void *buf) 1759 { 1760 struct nvmem_cell cell; 1761 int rc; 1762 1763 if (!nvmem) 1764 return -EINVAL; 1765 1766 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); 1767 if (rc) 1768 return rc; 1769 1770 return nvmem_cell_write(&cell, buf, cell.bytes); 1771 } 1772 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1773 1774 /** 1775 * nvmem_device_read() - Read from a given nvmem device 1776 * 1777 * @nvmem: nvmem device to read from. 1778 * @offset: offset in nvmem device. 1779 * @bytes: number of bytes to read. 1780 * @buf: buffer pointer which will be populated on successful read. 1781 * 1782 * Return: length of successful bytes read on success and negative 1783 * error code on error. 1784 */ 1785 int nvmem_device_read(struct nvmem_device *nvmem, 1786 unsigned int offset, 1787 size_t bytes, void *buf) 1788 { 1789 int rc; 1790 1791 if (!nvmem) 1792 return -EINVAL; 1793 1794 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1795 1796 if (rc) 1797 return rc; 1798 1799 return bytes; 1800 } 1801 EXPORT_SYMBOL_GPL(nvmem_device_read); 1802 1803 /** 1804 * nvmem_device_write() - Write cell to a given nvmem device 1805 * 1806 * @nvmem: nvmem device to be written to. 1807 * @offset: offset in nvmem device. 1808 * @bytes: number of bytes to write. 1809 * @buf: buffer to be written. 1810 * 1811 * Return: length of bytes written or negative error code on failure. 1812 */ 1813 int nvmem_device_write(struct nvmem_device *nvmem, 1814 unsigned int offset, 1815 size_t bytes, void *buf) 1816 { 1817 int rc; 1818 1819 if (!nvmem) 1820 return -EINVAL; 1821 1822 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1823 1824 if (rc) 1825 return rc; 1826 1827 1828 return bytes; 1829 } 1830 EXPORT_SYMBOL_GPL(nvmem_device_write); 1831 1832 /** 1833 * nvmem_add_cell_table() - register a table of cell info entries 1834 * 1835 * @table: table of cell info entries 1836 */ 1837 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1838 { 1839 mutex_lock(&nvmem_cell_mutex); 1840 list_add_tail(&table->node, &nvmem_cell_tables); 1841 mutex_unlock(&nvmem_cell_mutex); 1842 } 1843 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1844 1845 /** 1846 * nvmem_del_cell_table() - remove a previously registered cell info table 1847 * 1848 * @table: table of cell info entries 1849 */ 1850 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1851 { 1852 mutex_lock(&nvmem_cell_mutex); 1853 list_del(&table->node); 1854 mutex_unlock(&nvmem_cell_mutex); 1855 } 1856 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1857 1858 /** 1859 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1860 * 1861 * @entries: array of cell lookup entries 1862 * @nentries: number of cell lookup entries in the array 1863 */ 1864 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1865 { 1866 int i; 1867 1868 mutex_lock(&nvmem_lookup_mutex); 1869 for (i = 0; i < nentries; i++) 1870 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1871 mutex_unlock(&nvmem_lookup_mutex); 1872 } 1873 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1874 1875 /** 1876 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1877 * entries 1878 * 1879 * @entries: array of cell lookup entries 1880 * @nentries: number of cell lookup entries in the array 1881 */ 1882 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1883 { 1884 int i; 1885 1886 mutex_lock(&nvmem_lookup_mutex); 1887 for (i = 0; i < nentries; i++) 1888 list_del(&entries[i].node); 1889 mutex_unlock(&nvmem_lookup_mutex); 1890 } 1891 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1892 1893 /** 1894 * nvmem_dev_name() - Get the name of a given nvmem device. 1895 * 1896 * @nvmem: nvmem device. 1897 * 1898 * Return: name of the nvmem device. 1899 */ 1900 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1901 { 1902 return dev_name(&nvmem->dev); 1903 } 1904 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1905 1906 static int __init nvmem_init(void) 1907 { 1908 return bus_register(&nvmem_bus_type); 1909 } 1910 1911 static void __exit nvmem_exit(void) 1912 { 1913 bus_unregister(&nvmem_bus_type); 1914 } 1915 1916 subsys_initcall(nvmem_init); 1917 module_exit(nvmem_exit); 1918 1919 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1920 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1921 MODULE_DESCRIPTION("nvmem Driver Core"); 1922 MODULE_LICENSE("GPL v2"); 1923