1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 const struct nvmem_keepout *keepout; 38 unsigned int nkeepout; 39 nvmem_reg_read_t reg_read; 40 nvmem_reg_write_t reg_write; 41 nvmem_cell_post_process_t cell_post_process; 42 struct gpio_desc *wp_gpio; 43 void *priv; 44 }; 45 46 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 47 48 #define FLAG_COMPAT BIT(0) 49 struct nvmem_cell_entry { 50 const char *name; 51 int offset; 52 int bytes; 53 int bit_offset; 54 int nbits; 55 struct device_node *np; 56 struct nvmem_device *nvmem; 57 struct list_head node; 58 }; 59 60 struct nvmem_cell { 61 struct nvmem_cell_entry *entry; 62 const char *id; 63 }; 64 65 static DEFINE_MUTEX(nvmem_mutex); 66 static DEFINE_IDA(nvmem_ida); 67 68 static DEFINE_MUTEX(nvmem_cell_mutex); 69 static LIST_HEAD(nvmem_cell_tables); 70 71 static DEFINE_MUTEX(nvmem_lookup_mutex); 72 static LIST_HEAD(nvmem_lookup_list); 73 74 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 75 76 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 77 void *val, size_t bytes) 78 { 79 if (nvmem->reg_read) 80 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 81 82 return -EINVAL; 83 } 84 85 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 86 void *val, size_t bytes) 87 { 88 int ret; 89 90 if (nvmem->reg_write) { 91 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 92 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 93 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 94 return ret; 95 } 96 97 return -EINVAL; 98 } 99 100 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 101 unsigned int offset, void *val, 102 size_t bytes, int write) 103 { 104 105 unsigned int end = offset + bytes; 106 unsigned int kend, ksize; 107 const struct nvmem_keepout *keepout = nvmem->keepout; 108 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 109 int rc; 110 111 /* 112 * Skip all keepouts before the range being accessed. 113 * Keepouts are sorted. 114 */ 115 while ((keepout < keepoutend) && (keepout->end <= offset)) 116 keepout++; 117 118 while ((offset < end) && (keepout < keepoutend)) { 119 /* Access the valid portion before the keepout. */ 120 if (offset < keepout->start) { 121 kend = min(end, keepout->start); 122 ksize = kend - offset; 123 if (write) 124 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 125 else 126 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 127 128 if (rc) 129 return rc; 130 131 offset += ksize; 132 val += ksize; 133 } 134 135 /* 136 * Now we're aligned to the start of this keepout zone. Go 137 * through it. 138 */ 139 kend = min(end, keepout->end); 140 ksize = kend - offset; 141 if (!write) 142 memset(val, keepout->value, ksize); 143 144 val += ksize; 145 offset += ksize; 146 keepout++; 147 } 148 149 /* 150 * If we ran out of keepouts but there's still stuff to do, send it 151 * down directly 152 */ 153 if (offset < end) { 154 ksize = end - offset; 155 if (write) 156 return __nvmem_reg_write(nvmem, offset, val, ksize); 157 else 158 return __nvmem_reg_read(nvmem, offset, val, ksize); 159 } 160 161 return 0; 162 } 163 164 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 165 void *val, size_t bytes) 166 { 167 if (!nvmem->nkeepout) 168 return __nvmem_reg_read(nvmem, offset, val, bytes); 169 170 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 171 } 172 173 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 174 void *val, size_t bytes) 175 { 176 if (!nvmem->nkeepout) 177 return __nvmem_reg_write(nvmem, offset, val, bytes); 178 179 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 180 } 181 182 #ifdef CONFIG_NVMEM_SYSFS 183 static const char * const nvmem_type_str[] = { 184 [NVMEM_TYPE_UNKNOWN] = "Unknown", 185 [NVMEM_TYPE_EEPROM] = "EEPROM", 186 [NVMEM_TYPE_OTP] = "OTP", 187 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 188 [NVMEM_TYPE_FRAM] = "FRAM", 189 }; 190 191 #ifdef CONFIG_DEBUG_LOCK_ALLOC 192 static struct lock_class_key eeprom_lock_key; 193 #endif 194 195 static ssize_t type_show(struct device *dev, 196 struct device_attribute *attr, char *buf) 197 { 198 struct nvmem_device *nvmem = to_nvmem_device(dev); 199 200 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 201 } 202 203 static DEVICE_ATTR_RO(type); 204 205 static struct attribute *nvmem_attrs[] = { 206 &dev_attr_type.attr, 207 NULL, 208 }; 209 210 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 211 struct bin_attribute *attr, char *buf, 212 loff_t pos, size_t count) 213 { 214 struct device *dev; 215 struct nvmem_device *nvmem; 216 int rc; 217 218 if (attr->private) 219 dev = attr->private; 220 else 221 dev = kobj_to_dev(kobj); 222 nvmem = to_nvmem_device(dev); 223 224 /* Stop the user from reading */ 225 if (pos >= nvmem->size) 226 return 0; 227 228 if (!IS_ALIGNED(pos, nvmem->stride)) 229 return -EINVAL; 230 231 if (count < nvmem->word_size) 232 return -EINVAL; 233 234 if (pos + count > nvmem->size) 235 count = nvmem->size - pos; 236 237 count = round_down(count, nvmem->word_size); 238 239 if (!nvmem->reg_read) 240 return -EPERM; 241 242 rc = nvmem_reg_read(nvmem, pos, buf, count); 243 244 if (rc) 245 return rc; 246 247 return count; 248 } 249 250 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 251 struct bin_attribute *attr, char *buf, 252 loff_t pos, size_t count) 253 { 254 struct device *dev; 255 struct nvmem_device *nvmem; 256 int rc; 257 258 if (attr->private) 259 dev = attr->private; 260 else 261 dev = kobj_to_dev(kobj); 262 nvmem = to_nvmem_device(dev); 263 264 /* Stop the user from writing */ 265 if (pos >= nvmem->size) 266 return -EFBIG; 267 268 if (!IS_ALIGNED(pos, nvmem->stride)) 269 return -EINVAL; 270 271 if (count < nvmem->word_size) 272 return -EINVAL; 273 274 if (pos + count > nvmem->size) 275 count = nvmem->size - pos; 276 277 count = round_down(count, nvmem->word_size); 278 279 if (!nvmem->reg_write) 280 return -EPERM; 281 282 rc = nvmem_reg_write(nvmem, pos, buf, count); 283 284 if (rc) 285 return rc; 286 287 return count; 288 } 289 290 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 291 { 292 umode_t mode = 0400; 293 294 if (!nvmem->root_only) 295 mode |= 0044; 296 297 if (!nvmem->read_only) 298 mode |= 0200; 299 300 if (!nvmem->reg_write) 301 mode &= ~0200; 302 303 if (!nvmem->reg_read) 304 mode &= ~0444; 305 306 return mode; 307 } 308 309 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 310 struct bin_attribute *attr, int i) 311 { 312 struct device *dev = kobj_to_dev(kobj); 313 struct nvmem_device *nvmem = to_nvmem_device(dev); 314 315 return nvmem_bin_attr_get_umode(nvmem); 316 } 317 318 /* default read/write permissions */ 319 static struct bin_attribute bin_attr_rw_nvmem = { 320 .attr = { 321 .name = "nvmem", 322 .mode = 0644, 323 }, 324 .read = bin_attr_nvmem_read, 325 .write = bin_attr_nvmem_write, 326 }; 327 328 static struct bin_attribute *nvmem_bin_attributes[] = { 329 &bin_attr_rw_nvmem, 330 NULL, 331 }; 332 333 static const struct attribute_group nvmem_bin_group = { 334 .bin_attrs = nvmem_bin_attributes, 335 .attrs = nvmem_attrs, 336 .is_bin_visible = nvmem_bin_attr_is_visible, 337 }; 338 339 static const struct attribute_group *nvmem_dev_groups[] = { 340 &nvmem_bin_group, 341 NULL, 342 }; 343 344 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 345 .attr = { 346 .name = "eeprom", 347 }, 348 .read = bin_attr_nvmem_read, 349 .write = bin_attr_nvmem_write, 350 }; 351 352 /* 353 * nvmem_setup_compat() - Create an additional binary entry in 354 * drivers sys directory, to be backwards compatible with the older 355 * drivers/misc/eeprom drivers. 356 */ 357 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 358 const struct nvmem_config *config) 359 { 360 int rval; 361 362 if (!config->compat) 363 return 0; 364 365 if (!config->base_dev) 366 return -EINVAL; 367 368 if (config->type == NVMEM_TYPE_FRAM) 369 bin_attr_nvmem_eeprom_compat.attr.name = "fram"; 370 371 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 372 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 373 nvmem->eeprom.size = nvmem->size; 374 #ifdef CONFIG_DEBUG_LOCK_ALLOC 375 nvmem->eeprom.attr.key = &eeprom_lock_key; 376 #endif 377 nvmem->eeprom.private = &nvmem->dev; 378 nvmem->base_dev = config->base_dev; 379 380 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 381 if (rval) { 382 dev_err(&nvmem->dev, 383 "Failed to create eeprom binary file %d\n", rval); 384 return rval; 385 } 386 387 nvmem->flags |= FLAG_COMPAT; 388 389 return 0; 390 } 391 392 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 393 const struct nvmem_config *config) 394 { 395 if (config->compat) 396 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 397 } 398 399 #else /* CONFIG_NVMEM_SYSFS */ 400 401 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 402 const struct nvmem_config *config) 403 { 404 return -ENOSYS; 405 } 406 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 407 const struct nvmem_config *config) 408 { 409 } 410 411 #endif /* CONFIG_NVMEM_SYSFS */ 412 413 static void nvmem_release(struct device *dev) 414 { 415 struct nvmem_device *nvmem = to_nvmem_device(dev); 416 417 ida_free(&nvmem_ida, nvmem->id); 418 gpiod_put(nvmem->wp_gpio); 419 kfree(nvmem); 420 } 421 422 static const struct device_type nvmem_provider_type = { 423 .release = nvmem_release, 424 }; 425 426 static struct bus_type nvmem_bus_type = { 427 .name = "nvmem", 428 }; 429 430 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 431 { 432 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 433 mutex_lock(&nvmem_mutex); 434 list_del(&cell->node); 435 mutex_unlock(&nvmem_mutex); 436 of_node_put(cell->np); 437 kfree_const(cell->name); 438 kfree(cell); 439 } 440 441 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 442 { 443 struct nvmem_cell_entry *cell, *p; 444 445 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 446 nvmem_cell_entry_drop(cell); 447 } 448 449 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 450 { 451 mutex_lock(&nvmem_mutex); 452 list_add_tail(&cell->node, &cell->nvmem->cells); 453 mutex_unlock(&nvmem_mutex); 454 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 455 } 456 457 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 458 const struct nvmem_cell_info *info, 459 struct nvmem_cell_entry *cell) 460 { 461 cell->nvmem = nvmem; 462 cell->offset = info->offset; 463 cell->bytes = info->bytes; 464 cell->name = info->name; 465 466 cell->bit_offset = info->bit_offset; 467 cell->nbits = info->nbits; 468 469 if (cell->nbits) 470 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 471 BITS_PER_BYTE); 472 473 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 474 dev_err(&nvmem->dev, 475 "cell %s unaligned to nvmem stride %d\n", 476 cell->name ?: "<unknown>", nvmem->stride); 477 return -EINVAL; 478 } 479 480 return 0; 481 } 482 483 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 484 const struct nvmem_cell_info *info, 485 struct nvmem_cell_entry *cell) 486 { 487 int err; 488 489 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 490 if (err) 491 return err; 492 493 cell->name = kstrdup_const(info->name, GFP_KERNEL); 494 if (!cell->name) 495 return -ENOMEM; 496 497 return 0; 498 } 499 500 /** 501 * nvmem_add_cells() - Add cell information to an nvmem device 502 * 503 * @nvmem: nvmem device to add cells to. 504 * @info: nvmem cell info to add to the device 505 * @ncells: number of cells in info 506 * 507 * Return: 0 or negative error code on failure. 508 */ 509 static int nvmem_add_cells(struct nvmem_device *nvmem, 510 const struct nvmem_cell_info *info, 511 int ncells) 512 { 513 struct nvmem_cell_entry **cells; 514 int i, rval; 515 516 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 517 if (!cells) 518 return -ENOMEM; 519 520 for (i = 0; i < ncells; i++) { 521 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 522 if (!cells[i]) { 523 rval = -ENOMEM; 524 goto err; 525 } 526 527 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, &info[i], cells[i]); 528 if (rval) { 529 kfree(cells[i]); 530 goto err; 531 } 532 533 nvmem_cell_entry_add(cells[i]); 534 } 535 536 /* remove tmp array */ 537 kfree(cells); 538 539 return 0; 540 err: 541 while (i--) 542 nvmem_cell_entry_drop(cells[i]); 543 544 kfree(cells); 545 546 return rval; 547 } 548 549 /** 550 * nvmem_register_notifier() - Register a notifier block for nvmem events. 551 * 552 * @nb: notifier block to be called on nvmem events. 553 * 554 * Return: 0 on success, negative error number on failure. 555 */ 556 int nvmem_register_notifier(struct notifier_block *nb) 557 { 558 return blocking_notifier_chain_register(&nvmem_notifier, nb); 559 } 560 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 561 562 /** 563 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 564 * 565 * @nb: notifier block to be unregistered. 566 * 567 * Return: 0 on success, negative error number on failure. 568 */ 569 int nvmem_unregister_notifier(struct notifier_block *nb) 570 { 571 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 572 } 573 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 574 575 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 576 { 577 const struct nvmem_cell_info *info; 578 struct nvmem_cell_table *table; 579 struct nvmem_cell_entry *cell; 580 int rval = 0, i; 581 582 mutex_lock(&nvmem_cell_mutex); 583 list_for_each_entry(table, &nvmem_cell_tables, node) { 584 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 585 for (i = 0; i < table->ncells; i++) { 586 info = &table->cells[i]; 587 588 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 589 if (!cell) { 590 rval = -ENOMEM; 591 goto out; 592 } 593 594 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 595 if (rval) { 596 kfree(cell); 597 goto out; 598 } 599 600 nvmem_cell_entry_add(cell); 601 } 602 } 603 } 604 605 out: 606 mutex_unlock(&nvmem_cell_mutex); 607 return rval; 608 } 609 610 static struct nvmem_cell_entry * 611 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 612 { 613 struct nvmem_cell_entry *iter, *cell = NULL; 614 615 mutex_lock(&nvmem_mutex); 616 list_for_each_entry(iter, &nvmem->cells, node) { 617 if (strcmp(cell_id, iter->name) == 0) { 618 cell = iter; 619 break; 620 } 621 } 622 mutex_unlock(&nvmem_mutex); 623 624 return cell; 625 } 626 627 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 628 { 629 unsigned int cur = 0; 630 const struct nvmem_keepout *keepout = nvmem->keepout; 631 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 632 633 while (keepout < keepoutend) { 634 /* Ensure keepouts are sorted and don't overlap. */ 635 if (keepout->start < cur) { 636 dev_err(&nvmem->dev, 637 "Keepout regions aren't sorted or overlap.\n"); 638 639 return -ERANGE; 640 } 641 642 if (keepout->end < keepout->start) { 643 dev_err(&nvmem->dev, 644 "Invalid keepout region.\n"); 645 646 return -EINVAL; 647 } 648 649 /* 650 * Validate keepouts (and holes between) don't violate 651 * word_size constraints. 652 */ 653 if ((keepout->end - keepout->start < nvmem->word_size) || 654 ((keepout->start != cur) && 655 (keepout->start - cur < nvmem->word_size))) { 656 657 dev_err(&nvmem->dev, 658 "Keepout regions violate word_size constraints.\n"); 659 660 return -ERANGE; 661 } 662 663 /* Validate keepouts don't violate stride (alignment). */ 664 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 665 !IS_ALIGNED(keepout->end, nvmem->stride)) { 666 667 dev_err(&nvmem->dev, 668 "Keepout regions violate stride.\n"); 669 670 return -EINVAL; 671 } 672 673 cur = keepout->end; 674 keepout++; 675 } 676 677 return 0; 678 } 679 680 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 681 { 682 struct device_node *parent, *child; 683 struct device *dev = &nvmem->dev; 684 struct nvmem_cell_entry *cell; 685 const __be32 *addr; 686 int len; 687 688 parent = dev->of_node; 689 690 for_each_child_of_node(parent, child) { 691 addr = of_get_property(child, "reg", &len); 692 if (!addr) 693 continue; 694 if (len < 2 * sizeof(u32)) { 695 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 696 of_node_put(child); 697 return -EINVAL; 698 } 699 700 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 701 if (!cell) { 702 of_node_put(child); 703 return -ENOMEM; 704 } 705 706 cell->nvmem = nvmem; 707 cell->offset = be32_to_cpup(addr++); 708 cell->bytes = be32_to_cpup(addr); 709 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 710 711 addr = of_get_property(child, "bits", &len); 712 if (addr && len == (2 * sizeof(u32))) { 713 cell->bit_offset = be32_to_cpup(addr++); 714 cell->nbits = be32_to_cpup(addr); 715 } 716 717 if (cell->nbits) 718 cell->bytes = DIV_ROUND_UP( 719 cell->nbits + cell->bit_offset, 720 BITS_PER_BYTE); 721 722 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 723 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 724 cell->name, nvmem->stride); 725 /* Cells already added will be freed later. */ 726 kfree_const(cell->name); 727 kfree(cell); 728 of_node_put(child); 729 return -EINVAL; 730 } 731 732 cell->np = of_node_get(child); 733 nvmem_cell_entry_add(cell); 734 } 735 736 return 0; 737 } 738 739 /** 740 * nvmem_register() - Register a nvmem device for given nvmem_config. 741 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 742 * 743 * @config: nvmem device configuration with which nvmem device is created. 744 * 745 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 746 * on success. 747 */ 748 749 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 750 { 751 struct nvmem_device *nvmem; 752 int rval; 753 754 if (!config->dev) 755 return ERR_PTR(-EINVAL); 756 757 if (!config->reg_read && !config->reg_write) 758 return ERR_PTR(-EINVAL); 759 760 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 761 if (!nvmem) 762 return ERR_PTR(-ENOMEM); 763 764 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 765 if (rval < 0) { 766 kfree(nvmem); 767 return ERR_PTR(rval); 768 } 769 770 if (config->wp_gpio) 771 nvmem->wp_gpio = config->wp_gpio; 772 else 773 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 774 GPIOD_OUT_HIGH); 775 if (IS_ERR(nvmem->wp_gpio)) { 776 ida_free(&nvmem_ida, nvmem->id); 777 rval = PTR_ERR(nvmem->wp_gpio); 778 kfree(nvmem); 779 return ERR_PTR(rval); 780 } 781 782 kref_init(&nvmem->refcnt); 783 INIT_LIST_HEAD(&nvmem->cells); 784 785 nvmem->id = rval; 786 nvmem->owner = config->owner; 787 if (!nvmem->owner && config->dev->driver) 788 nvmem->owner = config->dev->driver->owner; 789 nvmem->stride = config->stride ?: 1; 790 nvmem->word_size = config->word_size ?: 1; 791 nvmem->size = config->size; 792 nvmem->dev.type = &nvmem_provider_type; 793 nvmem->dev.bus = &nvmem_bus_type; 794 nvmem->dev.parent = config->dev; 795 nvmem->root_only = config->root_only; 796 nvmem->priv = config->priv; 797 nvmem->type = config->type; 798 nvmem->reg_read = config->reg_read; 799 nvmem->reg_write = config->reg_write; 800 nvmem->cell_post_process = config->cell_post_process; 801 nvmem->keepout = config->keepout; 802 nvmem->nkeepout = config->nkeepout; 803 if (config->of_node) 804 nvmem->dev.of_node = config->of_node; 805 else if (!config->no_of_node) 806 nvmem->dev.of_node = config->dev->of_node; 807 808 switch (config->id) { 809 case NVMEM_DEVID_NONE: 810 dev_set_name(&nvmem->dev, "%s", config->name); 811 break; 812 case NVMEM_DEVID_AUTO: 813 dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 814 break; 815 default: 816 dev_set_name(&nvmem->dev, "%s%d", 817 config->name ? : "nvmem", 818 config->name ? config->id : nvmem->id); 819 break; 820 } 821 822 nvmem->read_only = device_property_present(config->dev, "read-only") || 823 config->read_only || !nvmem->reg_write; 824 825 #ifdef CONFIG_NVMEM_SYSFS 826 nvmem->dev.groups = nvmem_dev_groups; 827 #endif 828 829 if (nvmem->nkeepout) { 830 rval = nvmem_validate_keepouts(nvmem); 831 if (rval) { 832 ida_free(&nvmem_ida, nvmem->id); 833 kfree(nvmem); 834 return ERR_PTR(rval); 835 } 836 } 837 838 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 839 840 rval = device_register(&nvmem->dev); 841 if (rval) 842 goto err_put_device; 843 844 if (config->compat) { 845 rval = nvmem_sysfs_setup_compat(nvmem, config); 846 if (rval) 847 goto err_device_del; 848 } 849 850 if (config->cells) { 851 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 852 if (rval) 853 goto err_teardown_compat; 854 } 855 856 rval = nvmem_add_cells_from_table(nvmem); 857 if (rval) 858 goto err_remove_cells; 859 860 rval = nvmem_add_cells_from_of(nvmem); 861 if (rval) 862 goto err_remove_cells; 863 864 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 865 866 return nvmem; 867 868 err_remove_cells: 869 nvmem_device_remove_all_cells(nvmem); 870 err_teardown_compat: 871 if (config->compat) 872 nvmem_sysfs_remove_compat(nvmem, config); 873 err_device_del: 874 device_del(&nvmem->dev); 875 err_put_device: 876 put_device(&nvmem->dev); 877 878 return ERR_PTR(rval); 879 } 880 EXPORT_SYMBOL_GPL(nvmem_register); 881 882 static void nvmem_device_release(struct kref *kref) 883 { 884 struct nvmem_device *nvmem; 885 886 nvmem = container_of(kref, struct nvmem_device, refcnt); 887 888 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 889 890 if (nvmem->flags & FLAG_COMPAT) 891 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 892 893 nvmem_device_remove_all_cells(nvmem); 894 device_unregister(&nvmem->dev); 895 } 896 897 /** 898 * nvmem_unregister() - Unregister previously registered nvmem device 899 * 900 * @nvmem: Pointer to previously registered nvmem device. 901 */ 902 void nvmem_unregister(struct nvmem_device *nvmem) 903 { 904 kref_put(&nvmem->refcnt, nvmem_device_release); 905 } 906 EXPORT_SYMBOL_GPL(nvmem_unregister); 907 908 static void devm_nvmem_release(struct device *dev, void *res) 909 { 910 nvmem_unregister(*(struct nvmem_device **)res); 911 } 912 913 /** 914 * devm_nvmem_register() - Register a managed nvmem device for given 915 * nvmem_config. 916 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 917 * 918 * @dev: Device that uses the nvmem device. 919 * @config: nvmem device configuration with which nvmem device is created. 920 * 921 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 922 * on success. 923 */ 924 struct nvmem_device *devm_nvmem_register(struct device *dev, 925 const struct nvmem_config *config) 926 { 927 struct nvmem_device **ptr, *nvmem; 928 929 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 930 if (!ptr) 931 return ERR_PTR(-ENOMEM); 932 933 nvmem = nvmem_register(config); 934 935 if (!IS_ERR(nvmem)) { 936 *ptr = nvmem; 937 devres_add(dev, ptr); 938 } else { 939 devres_free(ptr); 940 } 941 942 return nvmem; 943 } 944 EXPORT_SYMBOL_GPL(devm_nvmem_register); 945 946 static int devm_nvmem_match(struct device *dev, void *res, void *data) 947 { 948 struct nvmem_device **r = res; 949 950 return *r == data; 951 } 952 953 /** 954 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 955 * device. 956 * 957 * @dev: Device that uses the nvmem device. 958 * @nvmem: Pointer to previously registered nvmem device. 959 * 960 * Return: Will be negative on error or zero on success. 961 */ 962 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 963 { 964 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 965 } 966 EXPORT_SYMBOL(devm_nvmem_unregister); 967 968 static struct nvmem_device *__nvmem_device_get(void *data, 969 int (*match)(struct device *dev, const void *data)) 970 { 971 struct nvmem_device *nvmem = NULL; 972 struct device *dev; 973 974 mutex_lock(&nvmem_mutex); 975 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 976 if (dev) 977 nvmem = to_nvmem_device(dev); 978 mutex_unlock(&nvmem_mutex); 979 if (!nvmem) 980 return ERR_PTR(-EPROBE_DEFER); 981 982 if (!try_module_get(nvmem->owner)) { 983 dev_err(&nvmem->dev, 984 "could not increase module refcount for cell %s\n", 985 nvmem_dev_name(nvmem)); 986 987 put_device(&nvmem->dev); 988 return ERR_PTR(-EINVAL); 989 } 990 991 kref_get(&nvmem->refcnt); 992 993 return nvmem; 994 } 995 996 static void __nvmem_device_put(struct nvmem_device *nvmem) 997 { 998 put_device(&nvmem->dev); 999 module_put(nvmem->owner); 1000 kref_put(&nvmem->refcnt, nvmem_device_release); 1001 } 1002 1003 #if IS_ENABLED(CONFIG_OF) 1004 /** 1005 * of_nvmem_device_get() - Get nvmem device from a given id 1006 * 1007 * @np: Device tree node that uses the nvmem device. 1008 * @id: nvmem name from nvmem-names property. 1009 * 1010 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1011 * on success. 1012 */ 1013 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1014 { 1015 1016 struct device_node *nvmem_np; 1017 struct nvmem_device *nvmem; 1018 int index = 0; 1019 1020 if (id) 1021 index = of_property_match_string(np, "nvmem-names", id); 1022 1023 nvmem_np = of_parse_phandle(np, "nvmem", index); 1024 if (!nvmem_np) 1025 return ERR_PTR(-ENOENT); 1026 1027 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1028 of_node_put(nvmem_np); 1029 return nvmem; 1030 } 1031 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1032 #endif 1033 1034 /** 1035 * nvmem_device_get() - Get nvmem device from a given id 1036 * 1037 * @dev: Device that uses the nvmem device. 1038 * @dev_name: name of the requested nvmem device. 1039 * 1040 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1041 * on success. 1042 */ 1043 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1044 { 1045 if (dev->of_node) { /* try dt first */ 1046 struct nvmem_device *nvmem; 1047 1048 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1049 1050 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1051 return nvmem; 1052 1053 } 1054 1055 return __nvmem_device_get((void *)dev_name, device_match_name); 1056 } 1057 EXPORT_SYMBOL_GPL(nvmem_device_get); 1058 1059 /** 1060 * nvmem_device_find() - Find nvmem device with matching function 1061 * 1062 * @data: Data to pass to match function 1063 * @match: Callback function to check device 1064 * 1065 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1066 * on success. 1067 */ 1068 struct nvmem_device *nvmem_device_find(void *data, 1069 int (*match)(struct device *dev, const void *data)) 1070 { 1071 return __nvmem_device_get(data, match); 1072 } 1073 EXPORT_SYMBOL_GPL(nvmem_device_find); 1074 1075 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1076 { 1077 struct nvmem_device **nvmem = res; 1078 1079 if (WARN_ON(!nvmem || !*nvmem)) 1080 return 0; 1081 1082 return *nvmem == data; 1083 } 1084 1085 static void devm_nvmem_device_release(struct device *dev, void *res) 1086 { 1087 nvmem_device_put(*(struct nvmem_device **)res); 1088 } 1089 1090 /** 1091 * devm_nvmem_device_put() - put alredy got nvmem device 1092 * 1093 * @dev: Device that uses the nvmem device. 1094 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1095 * that needs to be released. 1096 */ 1097 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1098 { 1099 int ret; 1100 1101 ret = devres_release(dev, devm_nvmem_device_release, 1102 devm_nvmem_device_match, nvmem); 1103 1104 WARN_ON(ret); 1105 } 1106 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1107 1108 /** 1109 * nvmem_device_put() - put alredy got nvmem device 1110 * 1111 * @nvmem: pointer to nvmem device that needs to be released. 1112 */ 1113 void nvmem_device_put(struct nvmem_device *nvmem) 1114 { 1115 __nvmem_device_put(nvmem); 1116 } 1117 EXPORT_SYMBOL_GPL(nvmem_device_put); 1118 1119 /** 1120 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 1121 * 1122 * @dev: Device that requests the nvmem device. 1123 * @id: name id for the requested nvmem device. 1124 * 1125 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 1126 * on success. The nvmem_cell will be freed by the automatically once the 1127 * device is freed. 1128 */ 1129 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1130 { 1131 struct nvmem_device **ptr, *nvmem; 1132 1133 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1134 if (!ptr) 1135 return ERR_PTR(-ENOMEM); 1136 1137 nvmem = nvmem_device_get(dev, id); 1138 if (!IS_ERR(nvmem)) { 1139 *ptr = nvmem; 1140 devres_add(dev, ptr); 1141 } else { 1142 devres_free(ptr); 1143 } 1144 1145 return nvmem; 1146 } 1147 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1148 1149 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, const char *id) 1150 { 1151 struct nvmem_cell *cell; 1152 const char *name = NULL; 1153 1154 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1155 if (!cell) 1156 return ERR_PTR(-ENOMEM); 1157 1158 if (id) { 1159 name = kstrdup_const(id, GFP_KERNEL); 1160 if (!name) { 1161 kfree(cell); 1162 return ERR_PTR(-ENOMEM); 1163 } 1164 } 1165 1166 cell->id = name; 1167 cell->entry = entry; 1168 1169 return cell; 1170 } 1171 1172 static struct nvmem_cell * 1173 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1174 { 1175 struct nvmem_cell_entry *cell_entry; 1176 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1177 struct nvmem_cell_lookup *lookup; 1178 struct nvmem_device *nvmem; 1179 const char *dev_id; 1180 1181 if (!dev) 1182 return ERR_PTR(-EINVAL); 1183 1184 dev_id = dev_name(dev); 1185 1186 mutex_lock(&nvmem_lookup_mutex); 1187 1188 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1189 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1190 (strcmp(lookup->con_id, con_id) == 0)) { 1191 /* This is the right entry. */ 1192 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1193 device_match_name); 1194 if (IS_ERR(nvmem)) { 1195 /* Provider may not be registered yet. */ 1196 cell = ERR_CAST(nvmem); 1197 break; 1198 } 1199 1200 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1201 lookup->cell_name); 1202 if (!cell_entry) { 1203 __nvmem_device_put(nvmem); 1204 cell = ERR_PTR(-ENOENT); 1205 } else { 1206 cell = nvmem_create_cell(cell_entry, con_id); 1207 if (IS_ERR(cell)) 1208 __nvmem_device_put(nvmem); 1209 } 1210 break; 1211 } 1212 } 1213 1214 mutex_unlock(&nvmem_lookup_mutex); 1215 return cell; 1216 } 1217 1218 #if IS_ENABLED(CONFIG_OF) 1219 static struct nvmem_cell_entry * 1220 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1221 { 1222 struct nvmem_cell_entry *iter, *cell = NULL; 1223 1224 mutex_lock(&nvmem_mutex); 1225 list_for_each_entry(iter, &nvmem->cells, node) { 1226 if (np == iter->np) { 1227 cell = iter; 1228 break; 1229 } 1230 } 1231 mutex_unlock(&nvmem_mutex); 1232 1233 return cell; 1234 } 1235 1236 /** 1237 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1238 * 1239 * @np: Device tree node that uses the nvmem cell. 1240 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1241 * for the cell at index 0 (the lone cell with no accompanying 1242 * nvmem-cell-names property). 1243 * 1244 * Return: Will be an ERR_PTR() on error or a valid pointer 1245 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1246 * nvmem_cell_put(). 1247 */ 1248 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1249 { 1250 struct device_node *cell_np, *nvmem_np; 1251 struct nvmem_device *nvmem; 1252 struct nvmem_cell_entry *cell_entry; 1253 struct nvmem_cell *cell; 1254 int index = 0; 1255 1256 /* if cell name exists, find index to the name */ 1257 if (id) 1258 index = of_property_match_string(np, "nvmem-cell-names", id); 1259 1260 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1261 if (!cell_np) 1262 return ERR_PTR(-ENOENT); 1263 1264 nvmem_np = of_get_next_parent(cell_np); 1265 if (!nvmem_np) 1266 return ERR_PTR(-EINVAL); 1267 1268 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1269 of_node_put(nvmem_np); 1270 if (IS_ERR(nvmem)) 1271 return ERR_CAST(nvmem); 1272 1273 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1274 if (!cell_entry) { 1275 __nvmem_device_put(nvmem); 1276 return ERR_PTR(-ENOENT); 1277 } 1278 1279 cell = nvmem_create_cell(cell_entry, id); 1280 if (IS_ERR(cell)) 1281 __nvmem_device_put(nvmem); 1282 1283 return cell; 1284 } 1285 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1286 #endif 1287 1288 /** 1289 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1290 * 1291 * @dev: Device that requests the nvmem cell. 1292 * @id: nvmem cell name to get (this corresponds with the name from the 1293 * nvmem-cell-names property for DT systems and with the con_id from 1294 * the lookup entry for non-DT systems). 1295 * 1296 * Return: Will be an ERR_PTR() on error or a valid pointer 1297 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1298 * nvmem_cell_put(). 1299 */ 1300 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1301 { 1302 struct nvmem_cell *cell; 1303 1304 if (dev->of_node) { /* try dt first */ 1305 cell = of_nvmem_cell_get(dev->of_node, id); 1306 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1307 return cell; 1308 } 1309 1310 /* NULL cell id only allowed for device tree; invalid otherwise */ 1311 if (!id) 1312 return ERR_PTR(-EINVAL); 1313 1314 return nvmem_cell_get_from_lookup(dev, id); 1315 } 1316 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1317 1318 static void devm_nvmem_cell_release(struct device *dev, void *res) 1319 { 1320 nvmem_cell_put(*(struct nvmem_cell **)res); 1321 } 1322 1323 /** 1324 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1325 * 1326 * @dev: Device that requests the nvmem cell. 1327 * @id: nvmem cell name id to get. 1328 * 1329 * Return: Will be an ERR_PTR() on error or a valid pointer 1330 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1331 * automatically once the device is freed. 1332 */ 1333 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1334 { 1335 struct nvmem_cell **ptr, *cell; 1336 1337 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1338 if (!ptr) 1339 return ERR_PTR(-ENOMEM); 1340 1341 cell = nvmem_cell_get(dev, id); 1342 if (!IS_ERR(cell)) { 1343 *ptr = cell; 1344 devres_add(dev, ptr); 1345 } else { 1346 devres_free(ptr); 1347 } 1348 1349 return cell; 1350 } 1351 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1352 1353 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1354 { 1355 struct nvmem_cell **c = res; 1356 1357 if (WARN_ON(!c || !*c)) 1358 return 0; 1359 1360 return *c == data; 1361 } 1362 1363 /** 1364 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1365 * from devm_nvmem_cell_get. 1366 * 1367 * @dev: Device that requests the nvmem cell. 1368 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1369 */ 1370 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1371 { 1372 int ret; 1373 1374 ret = devres_release(dev, devm_nvmem_cell_release, 1375 devm_nvmem_cell_match, cell); 1376 1377 WARN_ON(ret); 1378 } 1379 EXPORT_SYMBOL(devm_nvmem_cell_put); 1380 1381 /** 1382 * nvmem_cell_put() - Release previously allocated nvmem cell. 1383 * 1384 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1385 */ 1386 void nvmem_cell_put(struct nvmem_cell *cell) 1387 { 1388 struct nvmem_device *nvmem = cell->entry->nvmem; 1389 1390 if (cell->id) 1391 kfree_const(cell->id); 1392 1393 kfree(cell); 1394 __nvmem_device_put(nvmem); 1395 } 1396 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1397 1398 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1399 { 1400 u8 *p, *b; 1401 int i, extra, bit_offset = cell->bit_offset; 1402 1403 p = b = buf; 1404 if (bit_offset) { 1405 /* First shift */ 1406 *b++ >>= bit_offset; 1407 1408 /* setup rest of the bytes if any */ 1409 for (i = 1; i < cell->bytes; i++) { 1410 /* Get bits from next byte and shift them towards msb */ 1411 *p |= *b << (BITS_PER_BYTE - bit_offset); 1412 1413 p = b; 1414 *b++ >>= bit_offset; 1415 } 1416 } else { 1417 /* point to the msb */ 1418 p += cell->bytes - 1; 1419 } 1420 1421 /* result fits in less bytes */ 1422 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1423 while (--extra >= 0) 1424 *p-- = 0; 1425 1426 /* clear msb bits if any leftover in the last byte */ 1427 if (cell->nbits % BITS_PER_BYTE) 1428 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1429 } 1430 1431 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1432 struct nvmem_cell_entry *cell, 1433 void *buf, size_t *len, const char *id) 1434 { 1435 int rc; 1436 1437 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1438 1439 if (rc) 1440 return rc; 1441 1442 /* shift bits in-place */ 1443 if (cell->bit_offset || cell->nbits) 1444 nvmem_shift_read_buffer_in_place(cell, buf); 1445 1446 if (nvmem->cell_post_process) { 1447 rc = nvmem->cell_post_process(nvmem->priv, id, 1448 cell->offset, buf, cell->bytes); 1449 if (rc) 1450 return rc; 1451 } 1452 1453 if (len) 1454 *len = cell->bytes; 1455 1456 return 0; 1457 } 1458 1459 /** 1460 * nvmem_cell_read() - Read a given nvmem cell 1461 * 1462 * @cell: nvmem cell to be read. 1463 * @len: pointer to length of cell which will be populated on successful read; 1464 * can be NULL. 1465 * 1466 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1467 * buffer should be freed by the consumer with a kfree(). 1468 */ 1469 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1470 { 1471 struct nvmem_device *nvmem = cell->entry->nvmem; 1472 u8 *buf; 1473 int rc; 1474 1475 if (!nvmem) 1476 return ERR_PTR(-EINVAL); 1477 1478 buf = kzalloc(cell->entry->bytes, GFP_KERNEL); 1479 if (!buf) 1480 return ERR_PTR(-ENOMEM); 1481 1482 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id); 1483 if (rc) { 1484 kfree(buf); 1485 return ERR_PTR(rc); 1486 } 1487 1488 return buf; 1489 } 1490 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1491 1492 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1493 u8 *_buf, int len) 1494 { 1495 struct nvmem_device *nvmem = cell->nvmem; 1496 int i, rc, nbits, bit_offset = cell->bit_offset; 1497 u8 v, *p, *buf, *b, pbyte, pbits; 1498 1499 nbits = cell->nbits; 1500 buf = kzalloc(cell->bytes, GFP_KERNEL); 1501 if (!buf) 1502 return ERR_PTR(-ENOMEM); 1503 1504 memcpy(buf, _buf, len); 1505 p = b = buf; 1506 1507 if (bit_offset) { 1508 pbyte = *b; 1509 *b <<= bit_offset; 1510 1511 /* setup the first byte with lsb bits from nvmem */ 1512 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1513 if (rc) 1514 goto err; 1515 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1516 1517 /* setup rest of the byte if any */ 1518 for (i = 1; i < cell->bytes; i++) { 1519 /* Get last byte bits and shift them towards lsb */ 1520 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1521 pbyte = *b; 1522 p = b; 1523 *b <<= bit_offset; 1524 *b++ |= pbits; 1525 } 1526 } 1527 1528 /* if it's not end on byte boundary */ 1529 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1530 /* setup the last byte with msb bits from nvmem */ 1531 rc = nvmem_reg_read(nvmem, 1532 cell->offset + cell->bytes - 1, &v, 1); 1533 if (rc) 1534 goto err; 1535 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1536 1537 } 1538 1539 return buf; 1540 err: 1541 kfree(buf); 1542 return ERR_PTR(rc); 1543 } 1544 1545 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1546 { 1547 struct nvmem_device *nvmem = cell->nvmem; 1548 int rc; 1549 1550 if (!nvmem || nvmem->read_only || 1551 (cell->bit_offset == 0 && len != cell->bytes)) 1552 return -EINVAL; 1553 1554 if (cell->bit_offset || cell->nbits) { 1555 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1556 if (IS_ERR(buf)) 1557 return PTR_ERR(buf); 1558 } 1559 1560 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1561 1562 /* free the tmp buffer */ 1563 if (cell->bit_offset || cell->nbits) 1564 kfree(buf); 1565 1566 if (rc) 1567 return rc; 1568 1569 return len; 1570 } 1571 1572 /** 1573 * nvmem_cell_write() - Write to a given nvmem cell 1574 * 1575 * @cell: nvmem cell to be written. 1576 * @buf: Buffer to be written. 1577 * @len: length of buffer to be written to nvmem cell. 1578 * 1579 * Return: length of bytes written or negative on failure. 1580 */ 1581 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1582 { 1583 return __nvmem_cell_entry_write(cell->entry, buf, len); 1584 } 1585 1586 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1587 1588 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1589 void *val, size_t count) 1590 { 1591 struct nvmem_cell *cell; 1592 void *buf; 1593 size_t len; 1594 1595 cell = nvmem_cell_get(dev, cell_id); 1596 if (IS_ERR(cell)) 1597 return PTR_ERR(cell); 1598 1599 buf = nvmem_cell_read(cell, &len); 1600 if (IS_ERR(buf)) { 1601 nvmem_cell_put(cell); 1602 return PTR_ERR(buf); 1603 } 1604 if (len != count) { 1605 kfree(buf); 1606 nvmem_cell_put(cell); 1607 return -EINVAL; 1608 } 1609 memcpy(val, buf, count); 1610 kfree(buf); 1611 nvmem_cell_put(cell); 1612 1613 return 0; 1614 } 1615 1616 /** 1617 * nvmem_cell_read_u8() - Read a cell value as a u8 1618 * 1619 * @dev: Device that requests the nvmem cell. 1620 * @cell_id: Name of nvmem cell to read. 1621 * @val: pointer to output value. 1622 * 1623 * Return: 0 on success or negative errno. 1624 */ 1625 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1626 { 1627 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1628 } 1629 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1630 1631 /** 1632 * nvmem_cell_read_u16() - Read a cell value as a u16 1633 * 1634 * @dev: Device that requests the nvmem cell. 1635 * @cell_id: Name of nvmem cell to read. 1636 * @val: pointer to output value. 1637 * 1638 * Return: 0 on success or negative errno. 1639 */ 1640 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1641 { 1642 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1643 } 1644 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1645 1646 /** 1647 * nvmem_cell_read_u32() - Read a cell value as a u32 1648 * 1649 * @dev: Device that requests the nvmem cell. 1650 * @cell_id: Name of nvmem cell to read. 1651 * @val: pointer to output value. 1652 * 1653 * Return: 0 on success or negative errno. 1654 */ 1655 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1656 { 1657 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1658 } 1659 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1660 1661 /** 1662 * nvmem_cell_read_u64() - Read a cell value as a u64 1663 * 1664 * @dev: Device that requests the nvmem cell. 1665 * @cell_id: Name of nvmem cell to read. 1666 * @val: pointer to output value. 1667 * 1668 * Return: 0 on success or negative errno. 1669 */ 1670 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1671 { 1672 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1673 } 1674 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1675 1676 static const void *nvmem_cell_read_variable_common(struct device *dev, 1677 const char *cell_id, 1678 size_t max_len, size_t *len) 1679 { 1680 struct nvmem_cell *cell; 1681 int nbits; 1682 void *buf; 1683 1684 cell = nvmem_cell_get(dev, cell_id); 1685 if (IS_ERR(cell)) 1686 return cell; 1687 1688 nbits = cell->entry->nbits; 1689 buf = nvmem_cell_read(cell, len); 1690 nvmem_cell_put(cell); 1691 if (IS_ERR(buf)) 1692 return buf; 1693 1694 /* 1695 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1696 * the length of the real data. Throw away the extra junk. 1697 */ 1698 if (nbits) 1699 *len = DIV_ROUND_UP(nbits, 8); 1700 1701 if (*len > max_len) { 1702 kfree(buf); 1703 return ERR_PTR(-ERANGE); 1704 } 1705 1706 return buf; 1707 } 1708 1709 /** 1710 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1711 * 1712 * @dev: Device that requests the nvmem cell. 1713 * @cell_id: Name of nvmem cell to read. 1714 * @val: pointer to output value. 1715 * 1716 * Return: 0 on success or negative errno. 1717 */ 1718 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1719 u32 *val) 1720 { 1721 size_t len; 1722 const u8 *buf; 1723 int i; 1724 1725 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1726 if (IS_ERR(buf)) 1727 return PTR_ERR(buf); 1728 1729 /* Copy w/ implicit endian conversion */ 1730 *val = 0; 1731 for (i = 0; i < len; i++) 1732 *val |= buf[i] << (8 * i); 1733 1734 kfree(buf); 1735 1736 return 0; 1737 } 1738 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1739 1740 /** 1741 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1742 * 1743 * @dev: Device that requests the nvmem cell. 1744 * @cell_id: Name of nvmem cell to read. 1745 * @val: pointer to output value. 1746 * 1747 * Return: 0 on success or negative errno. 1748 */ 1749 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1750 u64 *val) 1751 { 1752 size_t len; 1753 const u8 *buf; 1754 int i; 1755 1756 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1757 if (IS_ERR(buf)) 1758 return PTR_ERR(buf); 1759 1760 /* Copy w/ implicit endian conversion */ 1761 *val = 0; 1762 for (i = 0; i < len; i++) 1763 *val |= (uint64_t)buf[i] << (8 * i); 1764 1765 kfree(buf); 1766 1767 return 0; 1768 } 1769 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 1770 1771 /** 1772 * nvmem_device_cell_read() - Read a given nvmem device and cell 1773 * 1774 * @nvmem: nvmem device to read from. 1775 * @info: nvmem cell info to be read. 1776 * @buf: buffer pointer which will be populated on successful read. 1777 * 1778 * Return: length of successful bytes read on success and negative 1779 * error code on error. 1780 */ 1781 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1782 struct nvmem_cell_info *info, void *buf) 1783 { 1784 struct nvmem_cell_entry cell; 1785 int rc; 1786 ssize_t len; 1787 1788 if (!nvmem) 1789 return -EINVAL; 1790 1791 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 1792 if (rc) 1793 return rc; 1794 1795 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL); 1796 if (rc) 1797 return rc; 1798 1799 return len; 1800 } 1801 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1802 1803 /** 1804 * nvmem_device_cell_write() - Write cell to a given nvmem device 1805 * 1806 * @nvmem: nvmem device to be written to. 1807 * @info: nvmem cell info to be written. 1808 * @buf: buffer to be written to cell. 1809 * 1810 * Return: length of bytes written or negative error code on failure. 1811 */ 1812 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1813 struct nvmem_cell_info *info, void *buf) 1814 { 1815 struct nvmem_cell_entry cell; 1816 int rc; 1817 1818 if (!nvmem) 1819 return -EINVAL; 1820 1821 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 1822 if (rc) 1823 return rc; 1824 1825 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 1826 } 1827 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1828 1829 /** 1830 * nvmem_device_read() - Read from a given nvmem device 1831 * 1832 * @nvmem: nvmem device to read from. 1833 * @offset: offset in nvmem device. 1834 * @bytes: number of bytes to read. 1835 * @buf: buffer pointer which will be populated on successful read. 1836 * 1837 * Return: length of successful bytes read on success and negative 1838 * error code on error. 1839 */ 1840 int nvmem_device_read(struct nvmem_device *nvmem, 1841 unsigned int offset, 1842 size_t bytes, void *buf) 1843 { 1844 int rc; 1845 1846 if (!nvmem) 1847 return -EINVAL; 1848 1849 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1850 1851 if (rc) 1852 return rc; 1853 1854 return bytes; 1855 } 1856 EXPORT_SYMBOL_GPL(nvmem_device_read); 1857 1858 /** 1859 * nvmem_device_write() - Write cell to a given nvmem device 1860 * 1861 * @nvmem: nvmem device to be written to. 1862 * @offset: offset in nvmem device. 1863 * @bytes: number of bytes to write. 1864 * @buf: buffer to be written. 1865 * 1866 * Return: length of bytes written or negative error code on failure. 1867 */ 1868 int nvmem_device_write(struct nvmem_device *nvmem, 1869 unsigned int offset, 1870 size_t bytes, void *buf) 1871 { 1872 int rc; 1873 1874 if (!nvmem) 1875 return -EINVAL; 1876 1877 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1878 1879 if (rc) 1880 return rc; 1881 1882 1883 return bytes; 1884 } 1885 EXPORT_SYMBOL_GPL(nvmem_device_write); 1886 1887 /** 1888 * nvmem_add_cell_table() - register a table of cell info entries 1889 * 1890 * @table: table of cell info entries 1891 */ 1892 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1893 { 1894 mutex_lock(&nvmem_cell_mutex); 1895 list_add_tail(&table->node, &nvmem_cell_tables); 1896 mutex_unlock(&nvmem_cell_mutex); 1897 } 1898 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1899 1900 /** 1901 * nvmem_del_cell_table() - remove a previously registered cell info table 1902 * 1903 * @table: table of cell info entries 1904 */ 1905 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1906 { 1907 mutex_lock(&nvmem_cell_mutex); 1908 list_del(&table->node); 1909 mutex_unlock(&nvmem_cell_mutex); 1910 } 1911 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1912 1913 /** 1914 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1915 * 1916 * @entries: array of cell lookup entries 1917 * @nentries: number of cell lookup entries in the array 1918 */ 1919 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1920 { 1921 int i; 1922 1923 mutex_lock(&nvmem_lookup_mutex); 1924 for (i = 0; i < nentries; i++) 1925 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1926 mutex_unlock(&nvmem_lookup_mutex); 1927 } 1928 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1929 1930 /** 1931 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1932 * entries 1933 * 1934 * @entries: array of cell lookup entries 1935 * @nentries: number of cell lookup entries in the array 1936 */ 1937 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1938 { 1939 int i; 1940 1941 mutex_lock(&nvmem_lookup_mutex); 1942 for (i = 0; i < nentries; i++) 1943 list_del(&entries[i].node); 1944 mutex_unlock(&nvmem_lookup_mutex); 1945 } 1946 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1947 1948 /** 1949 * nvmem_dev_name() - Get the name of a given nvmem device. 1950 * 1951 * @nvmem: nvmem device. 1952 * 1953 * Return: name of the nvmem device. 1954 */ 1955 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1956 { 1957 return dev_name(&nvmem->dev); 1958 } 1959 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1960 1961 static int __init nvmem_init(void) 1962 { 1963 return bus_register(&nvmem_bus_type); 1964 } 1965 1966 static void __exit nvmem_exit(void) 1967 { 1968 bus_unregister(&nvmem_bus_type); 1969 } 1970 1971 subsys_initcall(nvmem_init); 1972 module_exit(nvmem_exit); 1973 1974 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1975 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1976 MODULE_DESCRIPTION("nvmem Driver Core"); 1977 MODULE_LICENSE("GPL v2"); 1978