1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/of.h> 19 #include <linux/slab.h> 20 #include "nvmem.h" 21 22 struct nvmem_cell { 23 const char *name; 24 int offset; 25 int bytes; 26 int bit_offset; 27 int nbits; 28 struct device_node *np; 29 struct nvmem_device *nvmem; 30 struct list_head node; 31 }; 32 33 static DEFINE_MUTEX(nvmem_mutex); 34 static DEFINE_IDA(nvmem_ida); 35 36 static DEFINE_MUTEX(nvmem_cell_mutex); 37 static LIST_HEAD(nvmem_cell_tables); 38 39 static DEFINE_MUTEX(nvmem_lookup_mutex); 40 static LIST_HEAD(nvmem_lookup_list); 41 42 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 43 44 45 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 46 void *val, size_t bytes) 47 { 48 if (nvmem->reg_read) 49 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 50 51 return -EINVAL; 52 } 53 54 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 55 void *val, size_t bytes) 56 { 57 if (nvmem->reg_write) 58 return nvmem->reg_write(nvmem->priv, offset, val, bytes); 59 60 return -EINVAL; 61 } 62 63 static void nvmem_release(struct device *dev) 64 { 65 struct nvmem_device *nvmem = to_nvmem_device(dev); 66 67 ida_simple_remove(&nvmem_ida, nvmem->id); 68 kfree(nvmem); 69 } 70 71 static const struct device_type nvmem_provider_type = { 72 .release = nvmem_release, 73 }; 74 75 static struct bus_type nvmem_bus_type = { 76 .name = "nvmem", 77 }; 78 79 static void nvmem_cell_drop(struct nvmem_cell *cell) 80 { 81 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 82 mutex_lock(&nvmem_mutex); 83 list_del(&cell->node); 84 mutex_unlock(&nvmem_mutex); 85 of_node_put(cell->np); 86 kfree(cell->name); 87 kfree(cell); 88 } 89 90 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 91 { 92 struct nvmem_cell *cell, *p; 93 94 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 95 nvmem_cell_drop(cell); 96 } 97 98 static void nvmem_cell_add(struct nvmem_cell *cell) 99 { 100 mutex_lock(&nvmem_mutex); 101 list_add_tail(&cell->node, &cell->nvmem->cells); 102 mutex_unlock(&nvmem_mutex); 103 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 104 } 105 106 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 107 const struct nvmem_cell_info *info, 108 struct nvmem_cell *cell) 109 { 110 cell->nvmem = nvmem; 111 cell->offset = info->offset; 112 cell->bytes = info->bytes; 113 cell->name = info->name; 114 115 cell->bit_offset = info->bit_offset; 116 cell->nbits = info->nbits; 117 118 if (cell->nbits) 119 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 120 BITS_PER_BYTE); 121 122 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 123 dev_err(&nvmem->dev, 124 "cell %s unaligned to nvmem stride %d\n", 125 cell->name, nvmem->stride); 126 return -EINVAL; 127 } 128 129 return 0; 130 } 131 132 /** 133 * nvmem_add_cells() - Add cell information to an nvmem device 134 * 135 * @nvmem: nvmem device to add cells to. 136 * @info: nvmem cell info to add to the device 137 * @ncells: number of cells in info 138 * 139 * Return: 0 or negative error code on failure. 140 */ 141 static int nvmem_add_cells(struct nvmem_device *nvmem, 142 const struct nvmem_cell_info *info, 143 int ncells) 144 { 145 struct nvmem_cell **cells; 146 int i, rval; 147 148 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 149 if (!cells) 150 return -ENOMEM; 151 152 for (i = 0; i < ncells; i++) { 153 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 154 if (!cells[i]) { 155 rval = -ENOMEM; 156 goto err; 157 } 158 159 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 160 if (rval) { 161 kfree(cells[i]); 162 goto err; 163 } 164 165 nvmem_cell_add(cells[i]); 166 } 167 168 /* remove tmp array */ 169 kfree(cells); 170 171 return 0; 172 err: 173 while (i--) 174 nvmem_cell_drop(cells[i]); 175 176 kfree(cells); 177 178 return rval; 179 } 180 181 /** 182 * nvmem_register_notifier() - Register a notifier block for nvmem events. 183 * 184 * @nb: notifier block to be called on nvmem events. 185 * 186 * Return: 0 on success, negative error number on failure. 187 */ 188 int nvmem_register_notifier(struct notifier_block *nb) 189 { 190 return blocking_notifier_chain_register(&nvmem_notifier, nb); 191 } 192 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 193 194 /** 195 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 196 * 197 * @nb: notifier block to be unregistered. 198 * 199 * Return: 0 on success, negative error number on failure. 200 */ 201 int nvmem_unregister_notifier(struct notifier_block *nb) 202 { 203 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 204 } 205 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 206 207 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 208 { 209 const struct nvmem_cell_info *info; 210 struct nvmem_cell_table *table; 211 struct nvmem_cell *cell; 212 int rval = 0, i; 213 214 mutex_lock(&nvmem_cell_mutex); 215 list_for_each_entry(table, &nvmem_cell_tables, node) { 216 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 217 for (i = 0; i < table->ncells; i++) { 218 info = &table->cells[i]; 219 220 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 221 if (!cell) { 222 rval = -ENOMEM; 223 goto out; 224 } 225 226 rval = nvmem_cell_info_to_nvmem_cell(nvmem, 227 info, 228 cell); 229 if (rval) { 230 kfree(cell); 231 goto out; 232 } 233 234 nvmem_cell_add(cell); 235 } 236 } 237 } 238 239 out: 240 mutex_unlock(&nvmem_cell_mutex); 241 return rval; 242 } 243 244 static struct nvmem_cell * 245 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 246 { 247 struct nvmem_cell *iter, *cell = NULL; 248 249 mutex_lock(&nvmem_mutex); 250 list_for_each_entry(iter, &nvmem->cells, node) { 251 if (strcmp(cell_id, iter->name) == 0) { 252 cell = iter; 253 break; 254 } 255 } 256 mutex_unlock(&nvmem_mutex); 257 258 return cell; 259 } 260 261 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 262 { 263 struct device_node *parent, *child; 264 struct device *dev = &nvmem->dev; 265 struct nvmem_cell *cell; 266 const __be32 *addr; 267 int len; 268 269 parent = dev->of_node; 270 271 for_each_child_of_node(parent, child) { 272 addr = of_get_property(child, "reg", &len); 273 if (!addr || (len < 2 * sizeof(u32))) { 274 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 275 return -EINVAL; 276 } 277 278 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 279 if (!cell) 280 return -ENOMEM; 281 282 cell->nvmem = nvmem; 283 cell->np = of_node_get(child); 284 cell->offset = be32_to_cpup(addr++); 285 cell->bytes = be32_to_cpup(addr); 286 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 287 288 addr = of_get_property(child, "bits", &len); 289 if (addr && len == (2 * sizeof(u32))) { 290 cell->bit_offset = be32_to_cpup(addr++); 291 cell->nbits = be32_to_cpup(addr); 292 } 293 294 if (cell->nbits) 295 cell->bytes = DIV_ROUND_UP( 296 cell->nbits + cell->bit_offset, 297 BITS_PER_BYTE); 298 299 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 300 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 301 cell->name, nvmem->stride); 302 /* Cells already added will be freed later. */ 303 kfree(cell->name); 304 kfree(cell); 305 return -EINVAL; 306 } 307 308 nvmem_cell_add(cell); 309 } 310 311 return 0; 312 } 313 314 /** 315 * nvmem_register() - Register a nvmem device for given nvmem_config. 316 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 317 * 318 * @config: nvmem device configuration with which nvmem device is created. 319 * 320 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 321 * on success. 322 */ 323 324 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 325 { 326 struct nvmem_device *nvmem; 327 int rval; 328 329 if (!config->dev) 330 return ERR_PTR(-EINVAL); 331 332 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 333 if (!nvmem) 334 return ERR_PTR(-ENOMEM); 335 336 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 337 if (rval < 0) { 338 kfree(nvmem); 339 return ERR_PTR(rval); 340 } 341 342 kref_init(&nvmem->refcnt); 343 INIT_LIST_HEAD(&nvmem->cells); 344 345 nvmem->id = rval; 346 nvmem->owner = config->owner; 347 if (!nvmem->owner && config->dev->driver) 348 nvmem->owner = config->dev->driver->owner; 349 nvmem->stride = config->stride ?: 1; 350 nvmem->word_size = config->word_size ?: 1; 351 nvmem->size = config->size; 352 nvmem->dev.type = &nvmem_provider_type; 353 nvmem->dev.bus = &nvmem_bus_type; 354 nvmem->dev.parent = config->dev; 355 nvmem->priv = config->priv; 356 nvmem->type = config->type; 357 nvmem->reg_read = config->reg_read; 358 nvmem->reg_write = config->reg_write; 359 if (!config->no_of_node) 360 nvmem->dev.of_node = config->dev->of_node; 361 362 if (config->id == -1 && config->name) { 363 dev_set_name(&nvmem->dev, "%s", config->name); 364 } else { 365 dev_set_name(&nvmem->dev, "%s%d", 366 config->name ? : "nvmem", 367 config->name ? config->id : nvmem->id); 368 } 369 370 nvmem->read_only = device_property_present(config->dev, "read-only") || 371 config->read_only || !nvmem->reg_write; 372 373 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config); 374 375 device_initialize(&nvmem->dev); 376 377 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 378 379 rval = device_add(&nvmem->dev); 380 if (rval) 381 goto err_put_device; 382 383 if (config->compat) { 384 rval = nvmem_sysfs_setup_compat(nvmem, config); 385 if (rval) 386 goto err_device_del; 387 } 388 389 if (config->cells) { 390 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 391 if (rval) 392 goto err_teardown_compat; 393 } 394 395 rval = nvmem_add_cells_from_table(nvmem); 396 if (rval) 397 goto err_remove_cells; 398 399 rval = nvmem_add_cells_from_of(nvmem); 400 if (rval) 401 goto err_remove_cells; 402 403 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 404 405 return nvmem; 406 407 err_remove_cells: 408 nvmem_device_remove_all_cells(nvmem); 409 err_teardown_compat: 410 if (config->compat) 411 nvmem_sysfs_remove_compat(nvmem, config); 412 err_device_del: 413 device_del(&nvmem->dev); 414 err_put_device: 415 put_device(&nvmem->dev); 416 417 return ERR_PTR(rval); 418 } 419 EXPORT_SYMBOL_GPL(nvmem_register); 420 421 static void nvmem_device_release(struct kref *kref) 422 { 423 struct nvmem_device *nvmem; 424 425 nvmem = container_of(kref, struct nvmem_device, refcnt); 426 427 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 428 429 if (nvmem->flags & FLAG_COMPAT) 430 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 431 432 nvmem_device_remove_all_cells(nvmem); 433 device_del(&nvmem->dev); 434 put_device(&nvmem->dev); 435 } 436 437 /** 438 * nvmem_unregister() - Unregister previously registered nvmem device 439 * 440 * @nvmem: Pointer to previously registered nvmem device. 441 */ 442 void nvmem_unregister(struct nvmem_device *nvmem) 443 { 444 kref_put(&nvmem->refcnt, nvmem_device_release); 445 } 446 EXPORT_SYMBOL_GPL(nvmem_unregister); 447 448 static void devm_nvmem_release(struct device *dev, void *res) 449 { 450 nvmem_unregister(*(struct nvmem_device **)res); 451 } 452 453 /** 454 * devm_nvmem_register() - Register a managed nvmem device for given 455 * nvmem_config. 456 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 457 * 458 * @dev: Device that uses the nvmem device. 459 * @config: nvmem device configuration with which nvmem device is created. 460 * 461 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 462 * on success. 463 */ 464 struct nvmem_device *devm_nvmem_register(struct device *dev, 465 const struct nvmem_config *config) 466 { 467 struct nvmem_device **ptr, *nvmem; 468 469 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 470 if (!ptr) 471 return ERR_PTR(-ENOMEM); 472 473 nvmem = nvmem_register(config); 474 475 if (!IS_ERR(nvmem)) { 476 *ptr = nvmem; 477 devres_add(dev, ptr); 478 } else { 479 devres_free(ptr); 480 } 481 482 return nvmem; 483 } 484 EXPORT_SYMBOL_GPL(devm_nvmem_register); 485 486 static int devm_nvmem_match(struct device *dev, void *res, void *data) 487 { 488 struct nvmem_device **r = res; 489 490 return *r == data; 491 } 492 493 /** 494 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 495 * device. 496 * 497 * @dev: Device that uses the nvmem device. 498 * @nvmem: Pointer to previously registered nvmem device. 499 * 500 * Return: Will be an negative on error or a zero on success. 501 */ 502 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 503 { 504 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 505 } 506 EXPORT_SYMBOL(devm_nvmem_unregister); 507 508 static struct nvmem_device *__nvmem_device_get(void *data, 509 int (*match)(struct device *dev, const void *data)) 510 { 511 struct nvmem_device *nvmem = NULL; 512 struct device *dev; 513 514 mutex_lock(&nvmem_mutex); 515 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 516 if (dev) 517 nvmem = to_nvmem_device(dev); 518 mutex_unlock(&nvmem_mutex); 519 if (!nvmem) 520 return ERR_PTR(-EPROBE_DEFER); 521 522 if (!try_module_get(nvmem->owner)) { 523 dev_err(&nvmem->dev, 524 "could not increase module refcount for cell %s\n", 525 nvmem_dev_name(nvmem)); 526 527 put_device(&nvmem->dev); 528 return ERR_PTR(-EINVAL); 529 } 530 531 kref_get(&nvmem->refcnt); 532 533 return nvmem; 534 } 535 536 static void __nvmem_device_put(struct nvmem_device *nvmem) 537 { 538 put_device(&nvmem->dev); 539 module_put(nvmem->owner); 540 kref_put(&nvmem->refcnt, nvmem_device_release); 541 } 542 543 #if IS_ENABLED(CONFIG_OF) 544 /** 545 * of_nvmem_device_get() - Get nvmem device from a given id 546 * 547 * @np: Device tree node that uses the nvmem device. 548 * @id: nvmem name from nvmem-names property. 549 * 550 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 551 * on success. 552 */ 553 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 554 { 555 556 struct device_node *nvmem_np; 557 int index = 0; 558 559 if (id) 560 index = of_property_match_string(np, "nvmem-names", id); 561 562 nvmem_np = of_parse_phandle(np, "nvmem", index); 563 if (!nvmem_np) 564 return ERR_PTR(-ENOENT); 565 566 return __nvmem_device_get(nvmem_np, device_match_of_node); 567 } 568 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 569 #endif 570 571 /** 572 * nvmem_device_get() - Get nvmem device from a given id 573 * 574 * @dev: Device that uses the nvmem device. 575 * @dev_name: name of the requested nvmem device. 576 * 577 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 578 * on success. 579 */ 580 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 581 { 582 if (dev->of_node) { /* try dt first */ 583 struct nvmem_device *nvmem; 584 585 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 586 587 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 588 return nvmem; 589 590 } 591 592 return __nvmem_device_get((void *)dev_name, device_match_name); 593 } 594 EXPORT_SYMBOL_GPL(nvmem_device_get); 595 596 /** 597 * nvmem_device_find() - Find nvmem device with matching function 598 * 599 * @data: Data to pass to match function 600 * @match: Callback function to check device 601 * 602 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 603 * on success. 604 */ 605 struct nvmem_device *nvmem_device_find(void *data, 606 int (*match)(struct device *dev, const void *data)) 607 { 608 return __nvmem_device_get(data, match); 609 } 610 EXPORT_SYMBOL_GPL(nvmem_device_find); 611 612 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 613 { 614 struct nvmem_device **nvmem = res; 615 616 if (WARN_ON(!nvmem || !*nvmem)) 617 return 0; 618 619 return *nvmem == data; 620 } 621 622 static void devm_nvmem_device_release(struct device *dev, void *res) 623 { 624 nvmem_device_put(*(struct nvmem_device **)res); 625 } 626 627 /** 628 * devm_nvmem_device_put() - put alredy got nvmem device 629 * 630 * @dev: Device that uses the nvmem device. 631 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 632 * that needs to be released. 633 */ 634 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 635 { 636 int ret; 637 638 ret = devres_release(dev, devm_nvmem_device_release, 639 devm_nvmem_device_match, nvmem); 640 641 WARN_ON(ret); 642 } 643 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 644 645 /** 646 * nvmem_device_put() - put alredy got nvmem device 647 * 648 * @nvmem: pointer to nvmem device that needs to be released. 649 */ 650 void nvmem_device_put(struct nvmem_device *nvmem) 651 { 652 __nvmem_device_put(nvmem); 653 } 654 EXPORT_SYMBOL_GPL(nvmem_device_put); 655 656 /** 657 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 658 * 659 * @dev: Device that requests the nvmem device. 660 * @id: name id for the requested nvmem device. 661 * 662 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 663 * on success. The nvmem_cell will be freed by the automatically once the 664 * device is freed. 665 */ 666 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 667 { 668 struct nvmem_device **ptr, *nvmem; 669 670 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 671 if (!ptr) 672 return ERR_PTR(-ENOMEM); 673 674 nvmem = nvmem_device_get(dev, id); 675 if (!IS_ERR(nvmem)) { 676 *ptr = nvmem; 677 devres_add(dev, ptr); 678 } else { 679 devres_free(ptr); 680 } 681 682 return nvmem; 683 } 684 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 685 686 static struct nvmem_cell * 687 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 688 { 689 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 690 struct nvmem_cell_lookup *lookup; 691 struct nvmem_device *nvmem; 692 const char *dev_id; 693 694 if (!dev) 695 return ERR_PTR(-EINVAL); 696 697 dev_id = dev_name(dev); 698 699 mutex_lock(&nvmem_lookup_mutex); 700 701 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 702 if ((strcmp(lookup->dev_id, dev_id) == 0) && 703 (strcmp(lookup->con_id, con_id) == 0)) { 704 /* This is the right entry. */ 705 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 706 device_match_name); 707 if (IS_ERR(nvmem)) { 708 /* Provider may not be registered yet. */ 709 cell = ERR_CAST(nvmem); 710 break; 711 } 712 713 cell = nvmem_find_cell_by_name(nvmem, 714 lookup->cell_name); 715 if (!cell) { 716 __nvmem_device_put(nvmem); 717 cell = ERR_PTR(-ENOENT); 718 } 719 break; 720 } 721 } 722 723 mutex_unlock(&nvmem_lookup_mutex); 724 return cell; 725 } 726 727 #if IS_ENABLED(CONFIG_OF) 728 static struct nvmem_cell * 729 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 730 { 731 struct nvmem_cell *iter, *cell = NULL; 732 733 mutex_lock(&nvmem_mutex); 734 list_for_each_entry(iter, &nvmem->cells, node) { 735 if (np == iter->np) { 736 cell = iter; 737 break; 738 } 739 } 740 mutex_unlock(&nvmem_mutex); 741 742 return cell; 743 } 744 745 /** 746 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 747 * 748 * @np: Device tree node that uses the nvmem cell. 749 * @id: nvmem cell name from nvmem-cell-names property, or NULL 750 * for the cell at index 0 (the lone cell with no accompanying 751 * nvmem-cell-names property). 752 * 753 * Return: Will be an ERR_PTR() on error or a valid pointer 754 * to a struct nvmem_cell. The nvmem_cell will be freed by the 755 * nvmem_cell_put(). 756 */ 757 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 758 { 759 struct device_node *cell_np, *nvmem_np; 760 struct nvmem_device *nvmem; 761 struct nvmem_cell *cell; 762 int index = 0; 763 764 /* if cell name exists, find index to the name */ 765 if (id) 766 index = of_property_match_string(np, "nvmem-cell-names", id); 767 768 cell_np = of_parse_phandle(np, "nvmem-cells", index); 769 if (!cell_np) 770 return ERR_PTR(-ENOENT); 771 772 nvmem_np = of_get_next_parent(cell_np); 773 if (!nvmem_np) 774 return ERR_PTR(-EINVAL); 775 776 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 777 of_node_put(nvmem_np); 778 if (IS_ERR(nvmem)) 779 return ERR_CAST(nvmem); 780 781 cell = nvmem_find_cell_by_node(nvmem, cell_np); 782 if (!cell) { 783 __nvmem_device_put(nvmem); 784 return ERR_PTR(-ENOENT); 785 } 786 787 return cell; 788 } 789 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 790 #endif 791 792 /** 793 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 794 * 795 * @dev: Device that requests the nvmem cell. 796 * @id: nvmem cell name to get (this corresponds with the name from the 797 * nvmem-cell-names property for DT systems and with the con_id from 798 * the lookup entry for non-DT systems). 799 * 800 * Return: Will be an ERR_PTR() on error or a valid pointer 801 * to a struct nvmem_cell. The nvmem_cell will be freed by the 802 * nvmem_cell_put(). 803 */ 804 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 805 { 806 struct nvmem_cell *cell; 807 808 if (dev->of_node) { /* try dt first */ 809 cell = of_nvmem_cell_get(dev->of_node, id); 810 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 811 return cell; 812 } 813 814 /* NULL cell id only allowed for device tree; invalid otherwise */ 815 if (!id) 816 return ERR_PTR(-EINVAL); 817 818 return nvmem_cell_get_from_lookup(dev, id); 819 } 820 EXPORT_SYMBOL_GPL(nvmem_cell_get); 821 822 static void devm_nvmem_cell_release(struct device *dev, void *res) 823 { 824 nvmem_cell_put(*(struct nvmem_cell **)res); 825 } 826 827 /** 828 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 829 * 830 * @dev: Device that requests the nvmem cell. 831 * @id: nvmem cell name id to get. 832 * 833 * Return: Will be an ERR_PTR() on error or a valid pointer 834 * to a struct nvmem_cell. The nvmem_cell will be freed by the 835 * automatically once the device is freed. 836 */ 837 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 838 { 839 struct nvmem_cell **ptr, *cell; 840 841 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 842 if (!ptr) 843 return ERR_PTR(-ENOMEM); 844 845 cell = nvmem_cell_get(dev, id); 846 if (!IS_ERR(cell)) { 847 *ptr = cell; 848 devres_add(dev, ptr); 849 } else { 850 devres_free(ptr); 851 } 852 853 return cell; 854 } 855 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 856 857 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 858 { 859 struct nvmem_cell **c = res; 860 861 if (WARN_ON(!c || !*c)) 862 return 0; 863 864 return *c == data; 865 } 866 867 /** 868 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 869 * from devm_nvmem_cell_get. 870 * 871 * @dev: Device that requests the nvmem cell. 872 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 873 */ 874 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 875 { 876 int ret; 877 878 ret = devres_release(dev, devm_nvmem_cell_release, 879 devm_nvmem_cell_match, cell); 880 881 WARN_ON(ret); 882 } 883 EXPORT_SYMBOL(devm_nvmem_cell_put); 884 885 /** 886 * nvmem_cell_put() - Release previously allocated nvmem cell. 887 * 888 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 889 */ 890 void nvmem_cell_put(struct nvmem_cell *cell) 891 { 892 struct nvmem_device *nvmem = cell->nvmem; 893 894 __nvmem_device_put(nvmem); 895 } 896 EXPORT_SYMBOL_GPL(nvmem_cell_put); 897 898 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 899 { 900 u8 *p, *b; 901 int i, extra, bit_offset = cell->bit_offset; 902 903 p = b = buf; 904 if (bit_offset) { 905 /* First shift */ 906 *b++ >>= bit_offset; 907 908 /* setup rest of the bytes if any */ 909 for (i = 1; i < cell->bytes; i++) { 910 /* Get bits from next byte and shift them towards msb */ 911 *p |= *b << (BITS_PER_BYTE - bit_offset); 912 913 p = b; 914 *b++ >>= bit_offset; 915 } 916 } else { 917 /* point to the msb */ 918 p += cell->bytes - 1; 919 } 920 921 /* result fits in less bytes */ 922 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 923 while (--extra >= 0) 924 *p-- = 0; 925 926 /* clear msb bits if any leftover in the last byte */ 927 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 928 } 929 930 static int __nvmem_cell_read(struct nvmem_device *nvmem, 931 struct nvmem_cell *cell, 932 void *buf, size_t *len) 933 { 934 int rc; 935 936 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 937 938 if (rc) 939 return rc; 940 941 /* shift bits in-place */ 942 if (cell->bit_offset || cell->nbits) 943 nvmem_shift_read_buffer_in_place(cell, buf); 944 945 if (len) 946 *len = cell->bytes; 947 948 return 0; 949 } 950 951 /** 952 * nvmem_cell_read() - Read a given nvmem cell 953 * 954 * @cell: nvmem cell to be read. 955 * @len: pointer to length of cell which will be populated on successful read; 956 * can be NULL. 957 * 958 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 959 * buffer should be freed by the consumer with a kfree(). 960 */ 961 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 962 { 963 struct nvmem_device *nvmem = cell->nvmem; 964 u8 *buf; 965 int rc; 966 967 if (!nvmem) 968 return ERR_PTR(-EINVAL); 969 970 buf = kzalloc(cell->bytes, GFP_KERNEL); 971 if (!buf) 972 return ERR_PTR(-ENOMEM); 973 974 rc = __nvmem_cell_read(nvmem, cell, buf, len); 975 if (rc) { 976 kfree(buf); 977 return ERR_PTR(rc); 978 } 979 980 return buf; 981 } 982 EXPORT_SYMBOL_GPL(nvmem_cell_read); 983 984 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 985 u8 *_buf, int len) 986 { 987 struct nvmem_device *nvmem = cell->nvmem; 988 int i, rc, nbits, bit_offset = cell->bit_offset; 989 u8 v, *p, *buf, *b, pbyte, pbits; 990 991 nbits = cell->nbits; 992 buf = kzalloc(cell->bytes, GFP_KERNEL); 993 if (!buf) 994 return ERR_PTR(-ENOMEM); 995 996 memcpy(buf, _buf, len); 997 p = b = buf; 998 999 if (bit_offset) { 1000 pbyte = *b; 1001 *b <<= bit_offset; 1002 1003 /* setup the first byte with lsb bits from nvmem */ 1004 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1005 if (rc) 1006 goto err; 1007 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1008 1009 /* setup rest of the byte if any */ 1010 for (i = 1; i < cell->bytes; i++) { 1011 /* Get last byte bits and shift them towards lsb */ 1012 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1013 pbyte = *b; 1014 p = b; 1015 *b <<= bit_offset; 1016 *b++ |= pbits; 1017 } 1018 } 1019 1020 /* if it's not end on byte boundary */ 1021 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1022 /* setup the last byte with msb bits from nvmem */ 1023 rc = nvmem_reg_read(nvmem, 1024 cell->offset + cell->bytes - 1, &v, 1); 1025 if (rc) 1026 goto err; 1027 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1028 1029 } 1030 1031 return buf; 1032 err: 1033 kfree(buf); 1034 return ERR_PTR(rc); 1035 } 1036 1037 /** 1038 * nvmem_cell_write() - Write to a given nvmem cell 1039 * 1040 * @cell: nvmem cell to be written. 1041 * @buf: Buffer to be written. 1042 * @len: length of buffer to be written to nvmem cell. 1043 * 1044 * Return: length of bytes written or negative on failure. 1045 */ 1046 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1047 { 1048 struct nvmem_device *nvmem = cell->nvmem; 1049 int rc; 1050 1051 if (!nvmem || nvmem->read_only || 1052 (cell->bit_offset == 0 && len != cell->bytes)) 1053 return -EINVAL; 1054 1055 if (cell->bit_offset || cell->nbits) { 1056 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1057 if (IS_ERR(buf)) 1058 return PTR_ERR(buf); 1059 } 1060 1061 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1062 1063 /* free the tmp buffer */ 1064 if (cell->bit_offset || cell->nbits) 1065 kfree(buf); 1066 1067 if (rc) 1068 return rc; 1069 1070 return len; 1071 } 1072 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1073 1074 /** 1075 * nvmem_cell_read_u16() - Read a cell value as an u16 1076 * 1077 * @dev: Device that requests the nvmem cell. 1078 * @cell_id: Name of nvmem cell to read. 1079 * @val: pointer to output value. 1080 * 1081 * Return: 0 on success or negative errno. 1082 */ 1083 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1084 { 1085 struct nvmem_cell *cell; 1086 void *buf; 1087 size_t len; 1088 1089 cell = nvmem_cell_get(dev, cell_id); 1090 if (IS_ERR(cell)) 1091 return PTR_ERR(cell); 1092 1093 buf = nvmem_cell_read(cell, &len); 1094 if (IS_ERR(buf)) { 1095 nvmem_cell_put(cell); 1096 return PTR_ERR(buf); 1097 } 1098 if (len != sizeof(*val)) { 1099 kfree(buf); 1100 nvmem_cell_put(cell); 1101 return -EINVAL; 1102 } 1103 memcpy(val, buf, sizeof(*val)); 1104 kfree(buf); 1105 nvmem_cell_put(cell); 1106 1107 return 0; 1108 } 1109 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1110 1111 /** 1112 * nvmem_cell_read_u32() - Read a cell value as an u32 1113 * 1114 * @dev: Device that requests the nvmem cell. 1115 * @cell_id: Name of nvmem cell to read. 1116 * @val: pointer to output value. 1117 * 1118 * Return: 0 on success or negative errno. 1119 */ 1120 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1121 { 1122 struct nvmem_cell *cell; 1123 void *buf; 1124 size_t len; 1125 1126 cell = nvmem_cell_get(dev, cell_id); 1127 if (IS_ERR(cell)) 1128 return PTR_ERR(cell); 1129 1130 buf = nvmem_cell_read(cell, &len); 1131 if (IS_ERR(buf)) { 1132 nvmem_cell_put(cell); 1133 return PTR_ERR(buf); 1134 } 1135 if (len != sizeof(*val)) { 1136 kfree(buf); 1137 nvmem_cell_put(cell); 1138 return -EINVAL; 1139 } 1140 memcpy(val, buf, sizeof(*val)); 1141 1142 kfree(buf); 1143 nvmem_cell_put(cell); 1144 return 0; 1145 } 1146 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1147 1148 /** 1149 * nvmem_device_cell_read() - Read a given nvmem device and cell 1150 * 1151 * @nvmem: nvmem device to read from. 1152 * @info: nvmem cell info to be read. 1153 * @buf: buffer pointer which will be populated on successful read. 1154 * 1155 * Return: length of successful bytes read on success and negative 1156 * error code on error. 1157 */ 1158 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1159 struct nvmem_cell_info *info, void *buf) 1160 { 1161 struct nvmem_cell cell; 1162 int rc; 1163 ssize_t len; 1164 1165 if (!nvmem) 1166 return -EINVAL; 1167 1168 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1169 if (rc) 1170 return rc; 1171 1172 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1173 if (rc) 1174 return rc; 1175 1176 return len; 1177 } 1178 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1179 1180 /** 1181 * nvmem_device_cell_write() - Write cell to a given nvmem device 1182 * 1183 * @nvmem: nvmem device to be written to. 1184 * @info: nvmem cell info to be written. 1185 * @buf: buffer to be written to cell. 1186 * 1187 * Return: length of bytes written or negative error code on failure. 1188 */ 1189 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1190 struct nvmem_cell_info *info, void *buf) 1191 { 1192 struct nvmem_cell cell; 1193 int rc; 1194 1195 if (!nvmem) 1196 return -EINVAL; 1197 1198 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1199 if (rc) 1200 return rc; 1201 1202 return nvmem_cell_write(&cell, buf, cell.bytes); 1203 } 1204 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1205 1206 /** 1207 * nvmem_device_read() - Read from a given nvmem device 1208 * 1209 * @nvmem: nvmem device to read from. 1210 * @offset: offset in nvmem device. 1211 * @bytes: number of bytes to read. 1212 * @buf: buffer pointer which will be populated on successful read. 1213 * 1214 * Return: length of successful bytes read on success and negative 1215 * error code on error. 1216 */ 1217 int nvmem_device_read(struct nvmem_device *nvmem, 1218 unsigned int offset, 1219 size_t bytes, void *buf) 1220 { 1221 int rc; 1222 1223 if (!nvmem) 1224 return -EINVAL; 1225 1226 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1227 1228 if (rc) 1229 return rc; 1230 1231 return bytes; 1232 } 1233 EXPORT_SYMBOL_GPL(nvmem_device_read); 1234 1235 /** 1236 * nvmem_device_write() - Write cell to a given nvmem device 1237 * 1238 * @nvmem: nvmem device to be written to. 1239 * @offset: offset in nvmem device. 1240 * @bytes: number of bytes to write. 1241 * @buf: buffer to be written. 1242 * 1243 * Return: length of bytes written or negative error code on failure. 1244 */ 1245 int nvmem_device_write(struct nvmem_device *nvmem, 1246 unsigned int offset, 1247 size_t bytes, void *buf) 1248 { 1249 int rc; 1250 1251 if (!nvmem) 1252 return -EINVAL; 1253 1254 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1255 1256 if (rc) 1257 return rc; 1258 1259 1260 return bytes; 1261 } 1262 EXPORT_SYMBOL_GPL(nvmem_device_write); 1263 1264 /** 1265 * nvmem_add_cell_table() - register a table of cell info entries 1266 * 1267 * @table: table of cell info entries 1268 */ 1269 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1270 { 1271 mutex_lock(&nvmem_cell_mutex); 1272 list_add_tail(&table->node, &nvmem_cell_tables); 1273 mutex_unlock(&nvmem_cell_mutex); 1274 } 1275 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1276 1277 /** 1278 * nvmem_del_cell_table() - remove a previously registered cell info table 1279 * 1280 * @table: table of cell info entries 1281 */ 1282 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1283 { 1284 mutex_lock(&nvmem_cell_mutex); 1285 list_del(&table->node); 1286 mutex_unlock(&nvmem_cell_mutex); 1287 } 1288 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1289 1290 /** 1291 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1292 * 1293 * @entries: array of cell lookup entries 1294 * @nentries: number of cell lookup entries in the array 1295 */ 1296 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1297 { 1298 int i; 1299 1300 mutex_lock(&nvmem_lookup_mutex); 1301 for (i = 0; i < nentries; i++) 1302 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1303 mutex_unlock(&nvmem_lookup_mutex); 1304 } 1305 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1306 1307 /** 1308 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1309 * entries 1310 * 1311 * @entries: array of cell lookup entries 1312 * @nentries: number of cell lookup entries in the array 1313 */ 1314 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1315 { 1316 int i; 1317 1318 mutex_lock(&nvmem_lookup_mutex); 1319 for (i = 0; i < nentries; i++) 1320 list_del(&entries[i].node); 1321 mutex_unlock(&nvmem_lookup_mutex); 1322 } 1323 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1324 1325 /** 1326 * nvmem_dev_name() - Get the name of a given nvmem device. 1327 * 1328 * @nvmem: nvmem device. 1329 * 1330 * Return: name of the nvmem device. 1331 */ 1332 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1333 { 1334 return dev_name(&nvmem->dev); 1335 } 1336 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1337 1338 static int __init nvmem_init(void) 1339 { 1340 return bus_register(&nvmem_bus_type); 1341 } 1342 1343 static void __exit nvmem_exit(void) 1344 { 1345 bus_unregister(&nvmem_bus_type); 1346 } 1347 1348 subsys_initcall(nvmem_init); 1349 module_exit(nvmem_exit); 1350 1351 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1352 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1353 MODULE_DESCRIPTION("nvmem Driver Core"); 1354 MODULE_LICENSE("GPL v2"); 1355