1 /* 2 * nvmem framework core. 3 * 4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 and 9 * only version 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 */ 16 17 #include <linux/device.h> 18 #include <linux/export.h> 19 #include <linux/fs.h> 20 #include <linux/idr.h> 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/nvmem-consumer.h> 24 #include <linux/nvmem-provider.h> 25 #include <linux/of.h> 26 #include <linux/slab.h> 27 28 struct nvmem_device { 29 const char *name; 30 struct module *owner; 31 struct device dev; 32 int stride; 33 int word_size; 34 int ncells; 35 int id; 36 int users; 37 size_t size; 38 bool read_only; 39 int flags; 40 struct bin_attribute eeprom; 41 struct device *base_dev; 42 nvmem_reg_read_t reg_read; 43 nvmem_reg_write_t reg_write; 44 void *priv; 45 }; 46 47 #define FLAG_COMPAT BIT(0) 48 49 struct nvmem_cell { 50 const char *name; 51 int offset; 52 int bytes; 53 int bit_offset; 54 int nbits; 55 struct nvmem_device *nvmem; 56 struct list_head node; 57 }; 58 59 static DEFINE_MUTEX(nvmem_mutex); 60 static DEFINE_IDA(nvmem_ida); 61 62 static LIST_HEAD(nvmem_cells); 63 static DEFINE_MUTEX(nvmem_cells_mutex); 64 65 #ifdef CONFIG_DEBUG_LOCK_ALLOC 66 static struct lock_class_key eeprom_lock_key; 67 #endif 68 69 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 70 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 71 void *val, size_t bytes) 72 { 73 if (nvmem->reg_read) 74 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 75 76 return -EINVAL; 77 } 78 79 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 80 void *val, size_t bytes) 81 { 82 if (nvmem->reg_write) 83 return nvmem->reg_write(nvmem->priv, offset, val, bytes); 84 85 return -EINVAL; 86 } 87 88 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 89 struct bin_attribute *attr, 90 char *buf, loff_t pos, size_t count) 91 { 92 struct device *dev; 93 struct nvmem_device *nvmem; 94 int rc; 95 96 if (attr->private) 97 dev = attr->private; 98 else 99 dev = container_of(kobj, struct device, kobj); 100 nvmem = to_nvmem_device(dev); 101 102 /* Stop the user from reading */ 103 if (pos >= nvmem->size) 104 return 0; 105 106 if (count < nvmem->word_size) 107 return -EINVAL; 108 109 if (pos + count > nvmem->size) 110 count = nvmem->size - pos; 111 112 count = round_down(count, nvmem->word_size); 113 114 rc = nvmem_reg_read(nvmem, pos, buf, count); 115 116 if (rc) 117 return rc; 118 119 return count; 120 } 121 122 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 123 struct bin_attribute *attr, 124 char *buf, loff_t pos, size_t count) 125 { 126 struct device *dev; 127 struct nvmem_device *nvmem; 128 int rc; 129 130 if (attr->private) 131 dev = attr->private; 132 else 133 dev = container_of(kobj, struct device, kobj); 134 nvmem = to_nvmem_device(dev); 135 136 /* Stop the user from writing */ 137 if (pos >= nvmem->size) 138 return -EFBIG; 139 140 if (count < nvmem->word_size) 141 return -EINVAL; 142 143 if (pos + count > nvmem->size) 144 count = nvmem->size - pos; 145 146 count = round_down(count, nvmem->word_size); 147 148 rc = nvmem_reg_write(nvmem, pos, buf, count); 149 150 if (rc) 151 return rc; 152 153 return count; 154 } 155 156 /* default read/write permissions */ 157 static struct bin_attribute bin_attr_rw_nvmem = { 158 .attr = { 159 .name = "nvmem", 160 .mode = S_IWUSR | S_IRUGO, 161 }, 162 .read = bin_attr_nvmem_read, 163 .write = bin_attr_nvmem_write, 164 }; 165 166 static struct bin_attribute *nvmem_bin_rw_attributes[] = { 167 &bin_attr_rw_nvmem, 168 NULL, 169 }; 170 171 static const struct attribute_group nvmem_bin_rw_group = { 172 .bin_attrs = nvmem_bin_rw_attributes, 173 }; 174 175 static const struct attribute_group *nvmem_rw_dev_groups[] = { 176 &nvmem_bin_rw_group, 177 NULL, 178 }; 179 180 /* read only permission */ 181 static struct bin_attribute bin_attr_ro_nvmem = { 182 .attr = { 183 .name = "nvmem", 184 .mode = S_IRUGO, 185 }, 186 .read = bin_attr_nvmem_read, 187 }; 188 189 static struct bin_attribute *nvmem_bin_ro_attributes[] = { 190 &bin_attr_ro_nvmem, 191 NULL, 192 }; 193 194 static const struct attribute_group nvmem_bin_ro_group = { 195 .bin_attrs = nvmem_bin_ro_attributes, 196 }; 197 198 static const struct attribute_group *nvmem_ro_dev_groups[] = { 199 &nvmem_bin_ro_group, 200 NULL, 201 }; 202 203 /* default read/write permissions, root only */ 204 static struct bin_attribute bin_attr_rw_root_nvmem = { 205 .attr = { 206 .name = "nvmem", 207 .mode = S_IWUSR | S_IRUSR, 208 }, 209 .read = bin_attr_nvmem_read, 210 .write = bin_attr_nvmem_write, 211 }; 212 213 static struct bin_attribute *nvmem_bin_rw_root_attributes[] = { 214 &bin_attr_rw_root_nvmem, 215 NULL, 216 }; 217 218 static const struct attribute_group nvmem_bin_rw_root_group = { 219 .bin_attrs = nvmem_bin_rw_root_attributes, 220 }; 221 222 static const struct attribute_group *nvmem_rw_root_dev_groups[] = { 223 &nvmem_bin_rw_root_group, 224 NULL, 225 }; 226 227 /* read only permission, root only */ 228 static struct bin_attribute bin_attr_ro_root_nvmem = { 229 .attr = { 230 .name = "nvmem", 231 .mode = S_IRUSR, 232 }, 233 .read = bin_attr_nvmem_read, 234 }; 235 236 static struct bin_attribute *nvmem_bin_ro_root_attributes[] = { 237 &bin_attr_ro_root_nvmem, 238 NULL, 239 }; 240 241 static const struct attribute_group nvmem_bin_ro_root_group = { 242 .bin_attrs = nvmem_bin_ro_root_attributes, 243 }; 244 245 static const struct attribute_group *nvmem_ro_root_dev_groups[] = { 246 &nvmem_bin_ro_root_group, 247 NULL, 248 }; 249 250 static void nvmem_release(struct device *dev) 251 { 252 struct nvmem_device *nvmem = to_nvmem_device(dev); 253 254 ida_simple_remove(&nvmem_ida, nvmem->id); 255 kfree(nvmem); 256 } 257 258 static const struct device_type nvmem_provider_type = { 259 .release = nvmem_release, 260 }; 261 262 static struct bus_type nvmem_bus_type = { 263 .name = "nvmem", 264 }; 265 266 static int of_nvmem_match(struct device *dev, void *nvmem_np) 267 { 268 return dev->of_node == nvmem_np; 269 } 270 271 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) 272 { 273 struct device *d; 274 275 if (!nvmem_np) 276 return NULL; 277 278 d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); 279 280 if (!d) 281 return NULL; 282 283 return to_nvmem_device(d); 284 } 285 286 static struct nvmem_cell *nvmem_find_cell(const char *cell_id) 287 { 288 struct nvmem_cell *p; 289 290 mutex_lock(&nvmem_cells_mutex); 291 292 list_for_each_entry(p, &nvmem_cells, node) 293 if (!strcmp(p->name, cell_id)) { 294 mutex_unlock(&nvmem_cells_mutex); 295 return p; 296 } 297 298 mutex_unlock(&nvmem_cells_mutex); 299 300 return NULL; 301 } 302 303 static void nvmem_cell_drop(struct nvmem_cell *cell) 304 { 305 mutex_lock(&nvmem_cells_mutex); 306 list_del(&cell->node); 307 mutex_unlock(&nvmem_cells_mutex); 308 kfree(cell); 309 } 310 311 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 312 { 313 struct nvmem_cell *cell; 314 struct list_head *p, *n; 315 316 list_for_each_safe(p, n, &nvmem_cells) { 317 cell = list_entry(p, struct nvmem_cell, node); 318 if (cell->nvmem == nvmem) 319 nvmem_cell_drop(cell); 320 } 321 } 322 323 static void nvmem_cell_add(struct nvmem_cell *cell) 324 { 325 mutex_lock(&nvmem_cells_mutex); 326 list_add_tail(&cell->node, &nvmem_cells); 327 mutex_unlock(&nvmem_cells_mutex); 328 } 329 330 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 331 const struct nvmem_cell_info *info, 332 struct nvmem_cell *cell) 333 { 334 cell->nvmem = nvmem; 335 cell->offset = info->offset; 336 cell->bytes = info->bytes; 337 cell->name = info->name; 338 339 cell->bit_offset = info->bit_offset; 340 cell->nbits = info->nbits; 341 342 if (cell->nbits) 343 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 344 BITS_PER_BYTE); 345 346 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 347 dev_err(&nvmem->dev, 348 "cell %s unaligned to nvmem stride %d\n", 349 cell->name, nvmem->stride); 350 return -EINVAL; 351 } 352 353 return 0; 354 } 355 356 static int nvmem_add_cells(struct nvmem_device *nvmem, 357 const struct nvmem_config *cfg) 358 { 359 struct nvmem_cell **cells; 360 const struct nvmem_cell_info *info = cfg->cells; 361 int i, rval; 362 363 cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL); 364 if (!cells) 365 return -ENOMEM; 366 367 for (i = 0; i < cfg->ncells; i++) { 368 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 369 if (!cells[i]) { 370 rval = -ENOMEM; 371 goto err; 372 } 373 374 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 375 if (rval) { 376 kfree(cells[i]); 377 goto err; 378 } 379 380 nvmem_cell_add(cells[i]); 381 } 382 383 nvmem->ncells = cfg->ncells; 384 /* remove tmp array */ 385 kfree(cells); 386 387 return 0; 388 err: 389 while (i--) 390 nvmem_cell_drop(cells[i]); 391 392 kfree(cells); 393 394 return rval; 395 } 396 397 /* 398 * nvmem_setup_compat() - Create an additional binary entry in 399 * drivers sys directory, to be backwards compatible with the older 400 * drivers/misc/eeprom drivers. 401 */ 402 static int nvmem_setup_compat(struct nvmem_device *nvmem, 403 const struct nvmem_config *config) 404 { 405 int rval; 406 407 if (!config->base_dev) 408 return -EINVAL; 409 410 if (nvmem->read_only) 411 nvmem->eeprom = bin_attr_ro_root_nvmem; 412 else 413 nvmem->eeprom = bin_attr_rw_root_nvmem; 414 nvmem->eeprom.attr.name = "eeprom"; 415 nvmem->eeprom.size = nvmem->size; 416 #ifdef CONFIG_DEBUG_LOCK_ALLOC 417 nvmem->eeprom.attr.key = &eeprom_lock_key; 418 #endif 419 nvmem->eeprom.private = &nvmem->dev; 420 nvmem->base_dev = config->base_dev; 421 422 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 423 if (rval) { 424 dev_err(&nvmem->dev, 425 "Failed to create eeprom binary file %d\n", rval); 426 return rval; 427 } 428 429 nvmem->flags |= FLAG_COMPAT; 430 431 return 0; 432 } 433 434 /** 435 * nvmem_register() - Register a nvmem device for given nvmem_config. 436 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 437 * 438 * @config: nvmem device configuration with which nvmem device is created. 439 * 440 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 441 * on success. 442 */ 443 444 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 445 { 446 struct nvmem_device *nvmem; 447 int rval; 448 449 if (!config->dev) 450 return ERR_PTR(-EINVAL); 451 452 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 453 if (!nvmem) 454 return ERR_PTR(-ENOMEM); 455 456 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 457 if (rval < 0) { 458 kfree(nvmem); 459 return ERR_PTR(rval); 460 } 461 462 nvmem->id = rval; 463 nvmem->owner = config->owner; 464 if (!nvmem->owner && config->dev->driver) 465 nvmem->owner = config->dev->driver->owner; 466 nvmem->stride = config->stride ?: 1; 467 nvmem->word_size = config->word_size ?: 1; 468 nvmem->size = config->size; 469 nvmem->dev.type = &nvmem_provider_type; 470 nvmem->dev.bus = &nvmem_bus_type; 471 nvmem->dev.parent = config->dev; 472 nvmem->priv = config->priv; 473 nvmem->reg_read = config->reg_read; 474 nvmem->reg_write = config->reg_write; 475 nvmem->dev.of_node = config->dev->of_node; 476 477 if (config->id == -1 && config->name) { 478 dev_set_name(&nvmem->dev, "%s", config->name); 479 } else { 480 dev_set_name(&nvmem->dev, "%s%d", 481 config->name ? : "nvmem", 482 config->name ? config->id : nvmem->id); 483 } 484 485 nvmem->read_only = device_property_present(config->dev, "read-only") | 486 config->read_only; 487 488 if (config->root_only) 489 nvmem->dev.groups = nvmem->read_only ? 490 nvmem_ro_root_dev_groups : 491 nvmem_rw_root_dev_groups; 492 else 493 nvmem->dev.groups = nvmem->read_only ? 494 nvmem_ro_dev_groups : 495 nvmem_rw_dev_groups; 496 497 device_initialize(&nvmem->dev); 498 499 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 500 501 rval = device_add(&nvmem->dev); 502 if (rval) 503 goto err_put_device; 504 505 if (config->compat) { 506 rval = nvmem_setup_compat(nvmem, config); 507 if (rval) 508 goto err_device_del; 509 } 510 511 if (config->cells) 512 nvmem_add_cells(nvmem, config); 513 514 return nvmem; 515 516 err_device_del: 517 device_del(&nvmem->dev); 518 err_put_device: 519 put_device(&nvmem->dev); 520 521 return ERR_PTR(rval); 522 } 523 EXPORT_SYMBOL_GPL(nvmem_register); 524 525 /** 526 * nvmem_unregister() - Unregister previously registered nvmem device 527 * 528 * @nvmem: Pointer to previously registered nvmem device. 529 * 530 * Return: Will be an negative on error or a zero on success. 531 */ 532 int nvmem_unregister(struct nvmem_device *nvmem) 533 { 534 mutex_lock(&nvmem_mutex); 535 if (nvmem->users) { 536 mutex_unlock(&nvmem_mutex); 537 return -EBUSY; 538 } 539 mutex_unlock(&nvmem_mutex); 540 541 if (nvmem->flags & FLAG_COMPAT) 542 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 543 544 nvmem_device_remove_all_cells(nvmem); 545 device_del(&nvmem->dev); 546 put_device(&nvmem->dev); 547 548 return 0; 549 } 550 EXPORT_SYMBOL_GPL(nvmem_unregister); 551 552 static void devm_nvmem_release(struct device *dev, void *res) 553 { 554 WARN_ON(nvmem_unregister(*(struct nvmem_device **)res)); 555 } 556 557 /** 558 * devm_nvmem_register() - Register a managed nvmem device for given 559 * nvmem_config. 560 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 561 * 562 * @config: nvmem device configuration with which nvmem device is created. 563 * 564 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 565 * on success. 566 */ 567 struct nvmem_device *devm_nvmem_register(struct device *dev, 568 const struct nvmem_config *config) 569 { 570 struct nvmem_device **ptr, *nvmem; 571 572 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 573 if (!ptr) 574 return ERR_PTR(-ENOMEM); 575 576 nvmem = nvmem_register(config); 577 578 if (!IS_ERR(nvmem)) { 579 *ptr = nvmem; 580 devres_add(dev, ptr); 581 } else { 582 devres_free(ptr); 583 } 584 585 return nvmem; 586 } 587 EXPORT_SYMBOL_GPL(devm_nvmem_register); 588 589 static int devm_nvmem_match(struct device *dev, void *res, void *data) 590 { 591 struct nvmem_device **r = res; 592 593 return *r == data; 594 } 595 596 /** 597 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 598 * device. 599 * 600 * @nvmem: Pointer to previously registered nvmem device. 601 * 602 * Return: Will be an negative on error or a zero on success. 603 */ 604 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 605 { 606 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 607 } 608 EXPORT_SYMBOL(devm_nvmem_unregister); 609 610 611 static struct nvmem_device *__nvmem_device_get(struct device_node *np, 612 struct nvmem_cell **cellp, 613 const char *cell_id) 614 { 615 struct nvmem_device *nvmem = NULL; 616 617 mutex_lock(&nvmem_mutex); 618 619 if (np) { 620 nvmem = of_nvmem_find(np); 621 if (!nvmem) { 622 mutex_unlock(&nvmem_mutex); 623 return ERR_PTR(-EPROBE_DEFER); 624 } 625 } else { 626 struct nvmem_cell *cell = nvmem_find_cell(cell_id); 627 628 if (cell) { 629 nvmem = cell->nvmem; 630 *cellp = cell; 631 } 632 633 if (!nvmem) { 634 mutex_unlock(&nvmem_mutex); 635 return ERR_PTR(-ENOENT); 636 } 637 } 638 639 nvmem->users++; 640 mutex_unlock(&nvmem_mutex); 641 642 if (!try_module_get(nvmem->owner)) { 643 dev_err(&nvmem->dev, 644 "could not increase module refcount for cell %s\n", 645 nvmem->name); 646 647 mutex_lock(&nvmem_mutex); 648 nvmem->users--; 649 mutex_unlock(&nvmem_mutex); 650 651 return ERR_PTR(-EINVAL); 652 } 653 654 return nvmem; 655 } 656 657 static void __nvmem_device_put(struct nvmem_device *nvmem) 658 { 659 module_put(nvmem->owner); 660 mutex_lock(&nvmem_mutex); 661 nvmem->users--; 662 mutex_unlock(&nvmem_mutex); 663 } 664 665 static struct nvmem_device *nvmem_find(const char *name) 666 { 667 struct device *d; 668 669 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name); 670 671 if (!d) 672 return NULL; 673 674 return to_nvmem_device(d); 675 } 676 677 #if IS_ENABLED(CONFIG_OF) 678 /** 679 * of_nvmem_device_get() - Get nvmem device from a given id 680 * 681 * @np: Device tree node that uses the nvmem device. 682 * @id: nvmem name from nvmem-names property. 683 * 684 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 685 * on success. 686 */ 687 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 688 { 689 690 struct device_node *nvmem_np; 691 int index; 692 693 index = of_property_match_string(np, "nvmem-names", id); 694 695 nvmem_np = of_parse_phandle(np, "nvmem", index); 696 if (!nvmem_np) 697 return ERR_PTR(-EINVAL); 698 699 return __nvmem_device_get(nvmem_np, NULL, NULL); 700 } 701 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 702 #endif 703 704 /** 705 * nvmem_device_get() - Get nvmem device from a given id 706 * 707 * @dev: Device that uses the nvmem device. 708 * @dev_name: name of the requested nvmem device. 709 * 710 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 711 * on success. 712 */ 713 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 714 { 715 if (dev->of_node) { /* try dt first */ 716 struct nvmem_device *nvmem; 717 718 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 719 720 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 721 return nvmem; 722 723 } 724 725 return nvmem_find(dev_name); 726 } 727 EXPORT_SYMBOL_GPL(nvmem_device_get); 728 729 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 730 { 731 struct nvmem_device **nvmem = res; 732 733 if (WARN_ON(!nvmem || !*nvmem)) 734 return 0; 735 736 return *nvmem == data; 737 } 738 739 static void devm_nvmem_device_release(struct device *dev, void *res) 740 { 741 nvmem_device_put(*(struct nvmem_device **)res); 742 } 743 744 /** 745 * devm_nvmem_device_put() - put alredy got nvmem device 746 * 747 * @dev: Device that uses the nvmem device. 748 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 749 * that needs to be released. 750 */ 751 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 752 { 753 int ret; 754 755 ret = devres_release(dev, devm_nvmem_device_release, 756 devm_nvmem_device_match, nvmem); 757 758 WARN_ON(ret); 759 } 760 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 761 762 /** 763 * nvmem_device_put() - put alredy got nvmem device 764 * 765 * @nvmem: pointer to nvmem device that needs to be released. 766 */ 767 void nvmem_device_put(struct nvmem_device *nvmem) 768 { 769 __nvmem_device_put(nvmem); 770 } 771 EXPORT_SYMBOL_GPL(nvmem_device_put); 772 773 /** 774 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 775 * 776 * @dev: Device that requests the nvmem device. 777 * @id: name id for the requested nvmem device. 778 * 779 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 780 * on success. The nvmem_cell will be freed by the automatically once the 781 * device is freed. 782 */ 783 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 784 { 785 struct nvmem_device **ptr, *nvmem; 786 787 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 788 if (!ptr) 789 return ERR_PTR(-ENOMEM); 790 791 nvmem = nvmem_device_get(dev, id); 792 if (!IS_ERR(nvmem)) { 793 *ptr = nvmem; 794 devres_add(dev, ptr); 795 } else { 796 devres_free(ptr); 797 } 798 799 return nvmem; 800 } 801 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 802 803 static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) 804 { 805 struct nvmem_cell *cell = NULL; 806 struct nvmem_device *nvmem; 807 808 nvmem = __nvmem_device_get(NULL, &cell, cell_id); 809 if (IS_ERR(nvmem)) 810 return ERR_CAST(nvmem); 811 812 return cell; 813 } 814 815 #if IS_ENABLED(CONFIG_OF) 816 /** 817 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 818 * 819 * @np: Device tree node that uses the nvmem cell. 820 * @name: nvmem cell name from nvmem-cell-names property, or NULL 821 * for the cell at index 0 (the lone cell with no accompanying 822 * nvmem-cell-names property). 823 * 824 * Return: Will be an ERR_PTR() on error or a valid pointer 825 * to a struct nvmem_cell. The nvmem_cell will be freed by the 826 * nvmem_cell_put(). 827 */ 828 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 829 const char *name) 830 { 831 struct device_node *cell_np, *nvmem_np; 832 struct nvmem_cell *cell; 833 struct nvmem_device *nvmem; 834 const __be32 *addr; 835 int rval, len; 836 int index = 0; 837 838 /* if cell name exists, find index to the name */ 839 if (name) 840 index = of_property_match_string(np, "nvmem-cell-names", name); 841 842 cell_np = of_parse_phandle(np, "nvmem-cells", index); 843 if (!cell_np) 844 return ERR_PTR(-EINVAL); 845 846 nvmem_np = of_get_next_parent(cell_np); 847 if (!nvmem_np) 848 return ERR_PTR(-EINVAL); 849 850 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); 851 of_node_put(nvmem_np); 852 if (IS_ERR(nvmem)) 853 return ERR_CAST(nvmem); 854 855 addr = of_get_property(cell_np, "reg", &len); 856 if (!addr || (len < 2 * sizeof(u32))) { 857 dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n", 858 cell_np); 859 rval = -EINVAL; 860 goto err_mem; 861 } 862 863 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 864 if (!cell) { 865 rval = -ENOMEM; 866 goto err_mem; 867 } 868 869 cell->nvmem = nvmem; 870 cell->offset = be32_to_cpup(addr++); 871 cell->bytes = be32_to_cpup(addr); 872 cell->name = cell_np->name; 873 874 addr = of_get_property(cell_np, "bits", &len); 875 if (addr && len == (2 * sizeof(u32))) { 876 cell->bit_offset = be32_to_cpup(addr++); 877 cell->nbits = be32_to_cpup(addr); 878 } 879 880 if (cell->nbits) 881 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 882 BITS_PER_BYTE); 883 884 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 885 dev_err(&nvmem->dev, 886 "cell %s unaligned to nvmem stride %d\n", 887 cell->name, nvmem->stride); 888 rval = -EINVAL; 889 goto err_sanity; 890 } 891 892 nvmem_cell_add(cell); 893 894 return cell; 895 896 err_sanity: 897 kfree(cell); 898 899 err_mem: 900 __nvmem_device_put(nvmem); 901 902 return ERR_PTR(rval); 903 } 904 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 905 #endif 906 907 /** 908 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 909 * 910 * @dev: Device that requests the nvmem cell. 911 * @cell_id: nvmem cell name to get. 912 * 913 * Return: Will be an ERR_PTR() on error or a valid pointer 914 * to a struct nvmem_cell. The nvmem_cell will be freed by the 915 * nvmem_cell_put(). 916 */ 917 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) 918 { 919 struct nvmem_cell *cell; 920 921 if (dev->of_node) { /* try dt first */ 922 cell = of_nvmem_cell_get(dev->of_node, cell_id); 923 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 924 return cell; 925 } 926 927 return nvmem_cell_get_from_list(cell_id); 928 } 929 EXPORT_SYMBOL_GPL(nvmem_cell_get); 930 931 static void devm_nvmem_cell_release(struct device *dev, void *res) 932 { 933 nvmem_cell_put(*(struct nvmem_cell **)res); 934 } 935 936 /** 937 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 938 * 939 * @dev: Device that requests the nvmem cell. 940 * @id: nvmem cell name id to get. 941 * 942 * Return: Will be an ERR_PTR() on error or a valid pointer 943 * to a struct nvmem_cell. The nvmem_cell will be freed by the 944 * automatically once the device is freed. 945 */ 946 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 947 { 948 struct nvmem_cell **ptr, *cell; 949 950 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 951 if (!ptr) 952 return ERR_PTR(-ENOMEM); 953 954 cell = nvmem_cell_get(dev, id); 955 if (!IS_ERR(cell)) { 956 *ptr = cell; 957 devres_add(dev, ptr); 958 } else { 959 devres_free(ptr); 960 } 961 962 return cell; 963 } 964 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 965 966 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 967 { 968 struct nvmem_cell **c = res; 969 970 if (WARN_ON(!c || !*c)) 971 return 0; 972 973 return *c == data; 974 } 975 976 /** 977 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 978 * from devm_nvmem_cell_get. 979 * 980 * @dev: Device that requests the nvmem cell. 981 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 982 */ 983 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 984 { 985 int ret; 986 987 ret = devres_release(dev, devm_nvmem_cell_release, 988 devm_nvmem_cell_match, cell); 989 990 WARN_ON(ret); 991 } 992 EXPORT_SYMBOL(devm_nvmem_cell_put); 993 994 /** 995 * nvmem_cell_put() - Release previously allocated nvmem cell. 996 * 997 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 998 */ 999 void nvmem_cell_put(struct nvmem_cell *cell) 1000 { 1001 struct nvmem_device *nvmem = cell->nvmem; 1002 1003 __nvmem_device_put(nvmem); 1004 nvmem_cell_drop(cell); 1005 } 1006 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1007 1008 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 1009 { 1010 u8 *p, *b; 1011 int i, bit_offset = cell->bit_offset; 1012 1013 p = b = buf; 1014 if (bit_offset) { 1015 /* First shift */ 1016 *b++ >>= bit_offset; 1017 1018 /* setup rest of the bytes if any */ 1019 for (i = 1; i < cell->bytes; i++) { 1020 /* Get bits from next byte and shift them towards msb */ 1021 *p |= *b << (BITS_PER_BYTE - bit_offset); 1022 1023 p = b; 1024 *b++ >>= bit_offset; 1025 } 1026 1027 /* result fits in less bytes */ 1028 if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) 1029 *p-- = 0; 1030 } 1031 /* clear msb bits if any leftover in the last byte */ 1032 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 1033 } 1034 1035 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1036 struct nvmem_cell *cell, 1037 void *buf, size_t *len) 1038 { 1039 int rc; 1040 1041 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1042 1043 if (rc) 1044 return rc; 1045 1046 /* shift bits in-place */ 1047 if (cell->bit_offset || cell->nbits) 1048 nvmem_shift_read_buffer_in_place(cell, buf); 1049 1050 if (len) 1051 *len = cell->bytes; 1052 1053 return 0; 1054 } 1055 1056 /** 1057 * nvmem_cell_read() - Read a given nvmem cell 1058 * 1059 * @cell: nvmem cell to be read. 1060 * @len: pointer to length of cell which will be populated on successful read; 1061 * can be NULL. 1062 * 1063 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1064 * buffer should be freed by the consumer with a kfree(). 1065 */ 1066 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1067 { 1068 struct nvmem_device *nvmem = cell->nvmem; 1069 u8 *buf; 1070 int rc; 1071 1072 if (!nvmem) 1073 return ERR_PTR(-EINVAL); 1074 1075 buf = kzalloc(cell->bytes, GFP_KERNEL); 1076 if (!buf) 1077 return ERR_PTR(-ENOMEM); 1078 1079 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1080 if (rc) { 1081 kfree(buf); 1082 return ERR_PTR(rc); 1083 } 1084 1085 return buf; 1086 } 1087 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1088 1089 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1090 u8 *_buf, int len) 1091 { 1092 struct nvmem_device *nvmem = cell->nvmem; 1093 int i, rc, nbits, bit_offset = cell->bit_offset; 1094 u8 v, *p, *buf, *b, pbyte, pbits; 1095 1096 nbits = cell->nbits; 1097 buf = kzalloc(cell->bytes, GFP_KERNEL); 1098 if (!buf) 1099 return ERR_PTR(-ENOMEM); 1100 1101 memcpy(buf, _buf, len); 1102 p = b = buf; 1103 1104 if (bit_offset) { 1105 pbyte = *b; 1106 *b <<= bit_offset; 1107 1108 /* setup the first byte with lsb bits from nvmem */ 1109 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1110 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1111 1112 /* setup rest of the byte if any */ 1113 for (i = 1; i < cell->bytes; i++) { 1114 /* Get last byte bits and shift them towards lsb */ 1115 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1116 pbyte = *b; 1117 p = b; 1118 *b <<= bit_offset; 1119 *b++ |= pbits; 1120 } 1121 } 1122 1123 /* if it's not end on byte boundary */ 1124 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1125 /* setup the last byte with msb bits from nvmem */ 1126 rc = nvmem_reg_read(nvmem, 1127 cell->offset + cell->bytes - 1, &v, 1); 1128 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1129 1130 } 1131 1132 return buf; 1133 } 1134 1135 /** 1136 * nvmem_cell_write() - Write to a given nvmem cell 1137 * 1138 * @cell: nvmem cell to be written. 1139 * @buf: Buffer to be written. 1140 * @len: length of buffer to be written to nvmem cell. 1141 * 1142 * Return: length of bytes written or negative on failure. 1143 */ 1144 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1145 { 1146 struct nvmem_device *nvmem = cell->nvmem; 1147 int rc; 1148 1149 if (!nvmem || nvmem->read_only || 1150 (cell->bit_offset == 0 && len != cell->bytes)) 1151 return -EINVAL; 1152 1153 if (cell->bit_offset || cell->nbits) { 1154 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1155 if (IS_ERR(buf)) 1156 return PTR_ERR(buf); 1157 } 1158 1159 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1160 1161 /* free the tmp buffer */ 1162 if (cell->bit_offset || cell->nbits) 1163 kfree(buf); 1164 1165 if (rc) 1166 return rc; 1167 1168 return len; 1169 } 1170 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1171 1172 /** 1173 * nvmem_cell_read_u32() - Read a cell value as an u32 1174 * 1175 * @dev: Device that requests the nvmem cell. 1176 * @cell_id: Name of nvmem cell to read. 1177 * @val: pointer to output value. 1178 * 1179 * Return: 0 on success or negative errno. 1180 */ 1181 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1182 { 1183 struct nvmem_cell *cell; 1184 void *buf; 1185 size_t len; 1186 1187 cell = nvmem_cell_get(dev, cell_id); 1188 if (IS_ERR(cell)) 1189 return PTR_ERR(cell); 1190 1191 buf = nvmem_cell_read(cell, &len); 1192 if (IS_ERR(buf)) { 1193 nvmem_cell_put(cell); 1194 return PTR_ERR(buf); 1195 } 1196 if (len != sizeof(*val)) { 1197 kfree(buf); 1198 nvmem_cell_put(cell); 1199 return -EINVAL; 1200 } 1201 memcpy(val, buf, sizeof(*val)); 1202 1203 kfree(buf); 1204 nvmem_cell_put(cell); 1205 return 0; 1206 } 1207 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1208 1209 /** 1210 * nvmem_device_cell_read() - Read a given nvmem device and cell 1211 * 1212 * @nvmem: nvmem device to read from. 1213 * @info: nvmem cell info to be read. 1214 * @buf: buffer pointer which will be populated on successful read. 1215 * 1216 * Return: length of successful bytes read on success and negative 1217 * error code on error. 1218 */ 1219 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1220 struct nvmem_cell_info *info, void *buf) 1221 { 1222 struct nvmem_cell cell; 1223 int rc; 1224 ssize_t len; 1225 1226 if (!nvmem) 1227 return -EINVAL; 1228 1229 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1230 if (rc) 1231 return rc; 1232 1233 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1234 if (rc) 1235 return rc; 1236 1237 return len; 1238 } 1239 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1240 1241 /** 1242 * nvmem_device_cell_write() - Write cell to a given nvmem device 1243 * 1244 * @nvmem: nvmem device to be written to. 1245 * @info: nvmem cell info to be written. 1246 * @buf: buffer to be written to cell. 1247 * 1248 * Return: length of bytes written or negative error code on failure. 1249 * */ 1250 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1251 struct nvmem_cell_info *info, void *buf) 1252 { 1253 struct nvmem_cell cell; 1254 int rc; 1255 1256 if (!nvmem) 1257 return -EINVAL; 1258 1259 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1260 if (rc) 1261 return rc; 1262 1263 return nvmem_cell_write(&cell, buf, cell.bytes); 1264 } 1265 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1266 1267 /** 1268 * nvmem_device_read() - Read from a given nvmem device 1269 * 1270 * @nvmem: nvmem device to read from. 1271 * @offset: offset in nvmem device. 1272 * @bytes: number of bytes to read. 1273 * @buf: buffer pointer which will be populated on successful read. 1274 * 1275 * Return: length of successful bytes read on success and negative 1276 * error code on error. 1277 */ 1278 int nvmem_device_read(struct nvmem_device *nvmem, 1279 unsigned int offset, 1280 size_t bytes, void *buf) 1281 { 1282 int rc; 1283 1284 if (!nvmem) 1285 return -EINVAL; 1286 1287 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1288 1289 if (rc) 1290 return rc; 1291 1292 return bytes; 1293 } 1294 EXPORT_SYMBOL_GPL(nvmem_device_read); 1295 1296 /** 1297 * nvmem_device_write() - Write cell to a given nvmem device 1298 * 1299 * @nvmem: nvmem device to be written to. 1300 * @offset: offset in nvmem device. 1301 * @bytes: number of bytes to write. 1302 * @buf: buffer to be written. 1303 * 1304 * Return: length of bytes written or negative error code on failure. 1305 * */ 1306 int nvmem_device_write(struct nvmem_device *nvmem, 1307 unsigned int offset, 1308 size_t bytes, void *buf) 1309 { 1310 int rc; 1311 1312 if (!nvmem) 1313 return -EINVAL; 1314 1315 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1316 1317 if (rc) 1318 return rc; 1319 1320 1321 return bytes; 1322 } 1323 EXPORT_SYMBOL_GPL(nvmem_device_write); 1324 1325 static int __init nvmem_init(void) 1326 { 1327 return bus_register(&nvmem_bus_type); 1328 } 1329 1330 static void __exit nvmem_exit(void) 1331 { 1332 bus_unregister(&nvmem_bus_type); 1333 } 1334 1335 subsys_initcall(nvmem_init); 1336 module_exit(nvmem_exit); 1337 1338 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1339 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1340 MODULE_DESCRIPTION("nvmem Driver Core"); 1341 MODULE_LICENSE("GPL v2"); 1342