1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/of.h> 19 #include <linux/slab.h> 20 #include "nvmem.h" 21 22 struct nvmem_cell { 23 const char *name; 24 int offset; 25 int bytes; 26 int bit_offset; 27 int nbits; 28 struct device_node *np; 29 struct nvmem_device *nvmem; 30 struct list_head node; 31 }; 32 33 static DEFINE_MUTEX(nvmem_mutex); 34 static DEFINE_IDA(nvmem_ida); 35 36 static DEFINE_MUTEX(nvmem_cell_mutex); 37 static LIST_HEAD(nvmem_cell_tables); 38 39 static DEFINE_MUTEX(nvmem_lookup_mutex); 40 static LIST_HEAD(nvmem_lookup_list); 41 42 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 43 44 45 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 46 void *val, size_t bytes) 47 { 48 if (nvmem->reg_read) 49 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 50 51 return -EINVAL; 52 } 53 54 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 55 void *val, size_t bytes) 56 { 57 if (nvmem->reg_write) 58 return nvmem->reg_write(nvmem->priv, offset, val, bytes); 59 60 return -EINVAL; 61 } 62 63 static void nvmem_release(struct device *dev) 64 { 65 struct nvmem_device *nvmem = to_nvmem_device(dev); 66 67 ida_simple_remove(&nvmem_ida, nvmem->id); 68 kfree(nvmem); 69 } 70 71 static const struct device_type nvmem_provider_type = { 72 .release = nvmem_release, 73 }; 74 75 static struct bus_type nvmem_bus_type = { 76 .name = "nvmem", 77 }; 78 79 static void nvmem_cell_drop(struct nvmem_cell *cell) 80 { 81 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 82 mutex_lock(&nvmem_mutex); 83 list_del(&cell->node); 84 mutex_unlock(&nvmem_mutex); 85 of_node_put(cell->np); 86 kfree_const(cell->name); 87 kfree(cell); 88 } 89 90 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 91 { 92 struct nvmem_cell *cell, *p; 93 94 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 95 nvmem_cell_drop(cell); 96 } 97 98 static void nvmem_cell_add(struct nvmem_cell *cell) 99 { 100 mutex_lock(&nvmem_mutex); 101 list_add_tail(&cell->node, &cell->nvmem->cells); 102 mutex_unlock(&nvmem_mutex); 103 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 104 } 105 106 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 107 const struct nvmem_cell_info *info, 108 struct nvmem_cell *cell) 109 { 110 cell->nvmem = nvmem; 111 cell->offset = info->offset; 112 cell->bytes = info->bytes; 113 cell->name = kstrdup_const(info->name, GFP_KERNEL); 114 if (!cell->name) 115 return -ENOMEM; 116 117 cell->bit_offset = info->bit_offset; 118 cell->nbits = info->nbits; 119 120 if (cell->nbits) 121 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 122 BITS_PER_BYTE); 123 124 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 125 dev_err(&nvmem->dev, 126 "cell %s unaligned to nvmem stride %d\n", 127 cell->name, nvmem->stride); 128 return -EINVAL; 129 } 130 131 return 0; 132 } 133 134 /** 135 * nvmem_add_cells() - Add cell information to an nvmem device 136 * 137 * @nvmem: nvmem device to add cells to. 138 * @info: nvmem cell info to add to the device 139 * @ncells: number of cells in info 140 * 141 * Return: 0 or negative error code on failure. 142 */ 143 static int nvmem_add_cells(struct nvmem_device *nvmem, 144 const struct nvmem_cell_info *info, 145 int ncells) 146 { 147 struct nvmem_cell **cells; 148 int i, rval; 149 150 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 151 if (!cells) 152 return -ENOMEM; 153 154 for (i = 0; i < ncells; i++) { 155 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 156 if (!cells[i]) { 157 rval = -ENOMEM; 158 goto err; 159 } 160 161 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 162 if (rval) { 163 kfree(cells[i]); 164 goto err; 165 } 166 167 nvmem_cell_add(cells[i]); 168 } 169 170 /* remove tmp array */ 171 kfree(cells); 172 173 return 0; 174 err: 175 while (i--) 176 nvmem_cell_drop(cells[i]); 177 178 kfree(cells); 179 180 return rval; 181 } 182 183 /** 184 * nvmem_register_notifier() - Register a notifier block for nvmem events. 185 * 186 * @nb: notifier block to be called on nvmem events. 187 * 188 * Return: 0 on success, negative error number on failure. 189 */ 190 int nvmem_register_notifier(struct notifier_block *nb) 191 { 192 return blocking_notifier_chain_register(&nvmem_notifier, nb); 193 } 194 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 195 196 /** 197 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 198 * 199 * @nb: notifier block to be unregistered. 200 * 201 * Return: 0 on success, negative error number on failure. 202 */ 203 int nvmem_unregister_notifier(struct notifier_block *nb) 204 { 205 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 206 } 207 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 208 209 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 210 { 211 const struct nvmem_cell_info *info; 212 struct nvmem_cell_table *table; 213 struct nvmem_cell *cell; 214 int rval = 0, i; 215 216 mutex_lock(&nvmem_cell_mutex); 217 list_for_each_entry(table, &nvmem_cell_tables, node) { 218 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 219 for (i = 0; i < table->ncells; i++) { 220 info = &table->cells[i]; 221 222 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 223 if (!cell) { 224 rval = -ENOMEM; 225 goto out; 226 } 227 228 rval = nvmem_cell_info_to_nvmem_cell(nvmem, 229 info, 230 cell); 231 if (rval) { 232 kfree(cell); 233 goto out; 234 } 235 236 nvmem_cell_add(cell); 237 } 238 } 239 } 240 241 out: 242 mutex_unlock(&nvmem_cell_mutex); 243 return rval; 244 } 245 246 static struct nvmem_cell * 247 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 248 { 249 struct nvmem_cell *iter, *cell = NULL; 250 251 mutex_lock(&nvmem_mutex); 252 list_for_each_entry(iter, &nvmem->cells, node) { 253 if (strcmp(cell_id, iter->name) == 0) { 254 cell = iter; 255 break; 256 } 257 } 258 mutex_unlock(&nvmem_mutex); 259 260 return cell; 261 } 262 263 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 264 { 265 struct device_node *parent, *child; 266 struct device *dev = &nvmem->dev; 267 struct nvmem_cell *cell; 268 const __be32 *addr; 269 int len; 270 271 parent = dev->of_node; 272 273 for_each_child_of_node(parent, child) { 274 addr = of_get_property(child, "reg", &len); 275 if (!addr || (len < 2 * sizeof(u32))) { 276 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 277 return -EINVAL; 278 } 279 280 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 281 if (!cell) 282 return -ENOMEM; 283 284 cell->nvmem = nvmem; 285 cell->np = of_node_get(child); 286 cell->offset = be32_to_cpup(addr++); 287 cell->bytes = be32_to_cpup(addr); 288 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 289 290 addr = of_get_property(child, "bits", &len); 291 if (addr && len == (2 * sizeof(u32))) { 292 cell->bit_offset = be32_to_cpup(addr++); 293 cell->nbits = be32_to_cpup(addr); 294 } 295 296 if (cell->nbits) 297 cell->bytes = DIV_ROUND_UP( 298 cell->nbits + cell->bit_offset, 299 BITS_PER_BYTE); 300 301 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 302 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 303 cell->name, nvmem->stride); 304 /* Cells already added will be freed later. */ 305 kfree_const(cell->name); 306 kfree(cell); 307 return -EINVAL; 308 } 309 310 nvmem_cell_add(cell); 311 } 312 313 return 0; 314 } 315 316 /** 317 * nvmem_register() - Register a nvmem device for given nvmem_config. 318 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 319 * 320 * @config: nvmem device configuration with which nvmem device is created. 321 * 322 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 323 * on success. 324 */ 325 326 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 327 { 328 struct nvmem_device *nvmem; 329 int rval; 330 331 if (!config->dev) 332 return ERR_PTR(-EINVAL); 333 334 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 335 if (!nvmem) 336 return ERR_PTR(-ENOMEM); 337 338 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 339 if (rval < 0) { 340 kfree(nvmem); 341 return ERR_PTR(rval); 342 } 343 344 kref_init(&nvmem->refcnt); 345 INIT_LIST_HEAD(&nvmem->cells); 346 347 nvmem->id = rval; 348 nvmem->owner = config->owner; 349 if (!nvmem->owner && config->dev->driver) 350 nvmem->owner = config->dev->driver->owner; 351 nvmem->stride = config->stride ?: 1; 352 nvmem->word_size = config->word_size ?: 1; 353 nvmem->size = config->size; 354 nvmem->dev.type = &nvmem_provider_type; 355 nvmem->dev.bus = &nvmem_bus_type; 356 nvmem->dev.parent = config->dev; 357 nvmem->priv = config->priv; 358 nvmem->type = config->type; 359 nvmem->reg_read = config->reg_read; 360 nvmem->reg_write = config->reg_write; 361 if (!config->no_of_node) 362 nvmem->dev.of_node = config->dev->of_node; 363 364 if (config->id == -1 && config->name) { 365 dev_set_name(&nvmem->dev, "%s", config->name); 366 } else { 367 dev_set_name(&nvmem->dev, "%s%d", 368 config->name ? : "nvmem", 369 config->name ? config->id : nvmem->id); 370 } 371 372 nvmem->read_only = device_property_present(config->dev, "read-only") || 373 config->read_only || !nvmem->reg_write; 374 375 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config); 376 377 device_initialize(&nvmem->dev); 378 379 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 380 381 rval = device_add(&nvmem->dev); 382 if (rval) 383 goto err_put_device; 384 385 if (config->compat) { 386 rval = nvmem_sysfs_setup_compat(nvmem, config); 387 if (rval) 388 goto err_device_del; 389 } 390 391 if (config->cells) { 392 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 393 if (rval) 394 goto err_teardown_compat; 395 } 396 397 rval = nvmem_add_cells_from_table(nvmem); 398 if (rval) 399 goto err_remove_cells; 400 401 rval = nvmem_add_cells_from_of(nvmem); 402 if (rval) 403 goto err_remove_cells; 404 405 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 406 407 return nvmem; 408 409 err_remove_cells: 410 nvmem_device_remove_all_cells(nvmem); 411 err_teardown_compat: 412 if (config->compat) 413 nvmem_sysfs_remove_compat(nvmem, config); 414 err_device_del: 415 device_del(&nvmem->dev); 416 err_put_device: 417 put_device(&nvmem->dev); 418 419 return ERR_PTR(rval); 420 } 421 EXPORT_SYMBOL_GPL(nvmem_register); 422 423 static void nvmem_device_release(struct kref *kref) 424 { 425 struct nvmem_device *nvmem; 426 427 nvmem = container_of(kref, struct nvmem_device, refcnt); 428 429 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 430 431 if (nvmem->flags & FLAG_COMPAT) 432 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 433 434 nvmem_device_remove_all_cells(nvmem); 435 device_del(&nvmem->dev); 436 put_device(&nvmem->dev); 437 } 438 439 /** 440 * nvmem_unregister() - Unregister previously registered nvmem device 441 * 442 * @nvmem: Pointer to previously registered nvmem device. 443 */ 444 void nvmem_unregister(struct nvmem_device *nvmem) 445 { 446 kref_put(&nvmem->refcnt, nvmem_device_release); 447 } 448 EXPORT_SYMBOL_GPL(nvmem_unregister); 449 450 static void devm_nvmem_release(struct device *dev, void *res) 451 { 452 nvmem_unregister(*(struct nvmem_device **)res); 453 } 454 455 /** 456 * devm_nvmem_register() - Register a managed nvmem device for given 457 * nvmem_config. 458 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 459 * 460 * @dev: Device that uses the nvmem device. 461 * @config: nvmem device configuration with which nvmem device is created. 462 * 463 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 464 * on success. 465 */ 466 struct nvmem_device *devm_nvmem_register(struct device *dev, 467 const struct nvmem_config *config) 468 { 469 struct nvmem_device **ptr, *nvmem; 470 471 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 472 if (!ptr) 473 return ERR_PTR(-ENOMEM); 474 475 nvmem = nvmem_register(config); 476 477 if (!IS_ERR(nvmem)) { 478 *ptr = nvmem; 479 devres_add(dev, ptr); 480 } else { 481 devres_free(ptr); 482 } 483 484 return nvmem; 485 } 486 EXPORT_SYMBOL_GPL(devm_nvmem_register); 487 488 static int devm_nvmem_match(struct device *dev, void *res, void *data) 489 { 490 struct nvmem_device **r = res; 491 492 return *r == data; 493 } 494 495 /** 496 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 497 * device. 498 * 499 * @dev: Device that uses the nvmem device. 500 * @nvmem: Pointer to previously registered nvmem device. 501 * 502 * Return: Will be an negative on error or a zero on success. 503 */ 504 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 505 { 506 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 507 } 508 EXPORT_SYMBOL(devm_nvmem_unregister); 509 510 static struct nvmem_device *__nvmem_device_get(void *data, 511 int (*match)(struct device *dev, const void *data)) 512 { 513 struct nvmem_device *nvmem = NULL; 514 struct device *dev; 515 516 mutex_lock(&nvmem_mutex); 517 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 518 if (dev) 519 nvmem = to_nvmem_device(dev); 520 mutex_unlock(&nvmem_mutex); 521 if (!nvmem) 522 return ERR_PTR(-EPROBE_DEFER); 523 524 if (!try_module_get(nvmem->owner)) { 525 dev_err(&nvmem->dev, 526 "could not increase module refcount for cell %s\n", 527 nvmem_dev_name(nvmem)); 528 529 put_device(&nvmem->dev); 530 return ERR_PTR(-EINVAL); 531 } 532 533 kref_get(&nvmem->refcnt); 534 535 return nvmem; 536 } 537 538 static void __nvmem_device_put(struct nvmem_device *nvmem) 539 { 540 put_device(&nvmem->dev); 541 module_put(nvmem->owner); 542 kref_put(&nvmem->refcnt, nvmem_device_release); 543 } 544 545 #if IS_ENABLED(CONFIG_OF) 546 /** 547 * of_nvmem_device_get() - Get nvmem device from a given id 548 * 549 * @np: Device tree node that uses the nvmem device. 550 * @id: nvmem name from nvmem-names property. 551 * 552 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 553 * on success. 554 */ 555 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 556 { 557 558 struct device_node *nvmem_np; 559 int index = 0; 560 561 if (id) 562 index = of_property_match_string(np, "nvmem-names", id); 563 564 nvmem_np = of_parse_phandle(np, "nvmem", index); 565 if (!nvmem_np) 566 return ERR_PTR(-ENOENT); 567 568 return __nvmem_device_get(nvmem_np, device_match_of_node); 569 } 570 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 571 #endif 572 573 /** 574 * nvmem_device_get() - Get nvmem device from a given id 575 * 576 * @dev: Device that uses the nvmem device. 577 * @dev_name: name of the requested nvmem device. 578 * 579 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 580 * on success. 581 */ 582 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 583 { 584 if (dev->of_node) { /* try dt first */ 585 struct nvmem_device *nvmem; 586 587 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 588 589 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 590 return nvmem; 591 592 } 593 594 return __nvmem_device_get((void *)dev_name, device_match_name); 595 } 596 EXPORT_SYMBOL_GPL(nvmem_device_get); 597 598 /** 599 * nvmem_device_find() - Find nvmem device with matching function 600 * 601 * @data: Data to pass to match function 602 * @match: Callback function to check device 603 * 604 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 605 * on success. 606 */ 607 struct nvmem_device *nvmem_device_find(void *data, 608 int (*match)(struct device *dev, const void *data)) 609 { 610 return __nvmem_device_get(data, match); 611 } 612 EXPORT_SYMBOL_GPL(nvmem_device_find); 613 614 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 615 { 616 struct nvmem_device **nvmem = res; 617 618 if (WARN_ON(!nvmem || !*nvmem)) 619 return 0; 620 621 return *nvmem == data; 622 } 623 624 static void devm_nvmem_device_release(struct device *dev, void *res) 625 { 626 nvmem_device_put(*(struct nvmem_device **)res); 627 } 628 629 /** 630 * devm_nvmem_device_put() - put alredy got nvmem device 631 * 632 * @dev: Device that uses the nvmem device. 633 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 634 * that needs to be released. 635 */ 636 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 637 { 638 int ret; 639 640 ret = devres_release(dev, devm_nvmem_device_release, 641 devm_nvmem_device_match, nvmem); 642 643 WARN_ON(ret); 644 } 645 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 646 647 /** 648 * nvmem_device_put() - put alredy got nvmem device 649 * 650 * @nvmem: pointer to nvmem device that needs to be released. 651 */ 652 void nvmem_device_put(struct nvmem_device *nvmem) 653 { 654 __nvmem_device_put(nvmem); 655 } 656 EXPORT_SYMBOL_GPL(nvmem_device_put); 657 658 /** 659 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 660 * 661 * @dev: Device that requests the nvmem device. 662 * @id: name id for the requested nvmem device. 663 * 664 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 665 * on success. The nvmem_cell will be freed by the automatically once the 666 * device is freed. 667 */ 668 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 669 { 670 struct nvmem_device **ptr, *nvmem; 671 672 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 673 if (!ptr) 674 return ERR_PTR(-ENOMEM); 675 676 nvmem = nvmem_device_get(dev, id); 677 if (!IS_ERR(nvmem)) { 678 *ptr = nvmem; 679 devres_add(dev, ptr); 680 } else { 681 devres_free(ptr); 682 } 683 684 return nvmem; 685 } 686 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 687 688 static struct nvmem_cell * 689 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 690 { 691 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 692 struct nvmem_cell_lookup *lookup; 693 struct nvmem_device *nvmem; 694 const char *dev_id; 695 696 if (!dev) 697 return ERR_PTR(-EINVAL); 698 699 dev_id = dev_name(dev); 700 701 mutex_lock(&nvmem_lookup_mutex); 702 703 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 704 if ((strcmp(lookup->dev_id, dev_id) == 0) && 705 (strcmp(lookup->con_id, con_id) == 0)) { 706 /* This is the right entry. */ 707 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 708 device_match_name); 709 if (IS_ERR(nvmem)) { 710 /* Provider may not be registered yet. */ 711 cell = ERR_CAST(nvmem); 712 break; 713 } 714 715 cell = nvmem_find_cell_by_name(nvmem, 716 lookup->cell_name); 717 if (!cell) { 718 __nvmem_device_put(nvmem); 719 cell = ERR_PTR(-ENOENT); 720 } 721 break; 722 } 723 } 724 725 mutex_unlock(&nvmem_lookup_mutex); 726 return cell; 727 } 728 729 #if IS_ENABLED(CONFIG_OF) 730 static struct nvmem_cell * 731 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 732 { 733 struct nvmem_cell *iter, *cell = NULL; 734 735 mutex_lock(&nvmem_mutex); 736 list_for_each_entry(iter, &nvmem->cells, node) { 737 if (np == iter->np) { 738 cell = iter; 739 break; 740 } 741 } 742 mutex_unlock(&nvmem_mutex); 743 744 return cell; 745 } 746 747 /** 748 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 749 * 750 * @np: Device tree node that uses the nvmem cell. 751 * @id: nvmem cell name from nvmem-cell-names property, or NULL 752 * for the cell at index 0 (the lone cell with no accompanying 753 * nvmem-cell-names property). 754 * 755 * Return: Will be an ERR_PTR() on error or a valid pointer 756 * to a struct nvmem_cell. The nvmem_cell will be freed by the 757 * nvmem_cell_put(). 758 */ 759 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 760 { 761 struct device_node *cell_np, *nvmem_np; 762 struct nvmem_device *nvmem; 763 struct nvmem_cell *cell; 764 int index = 0; 765 766 /* if cell name exists, find index to the name */ 767 if (id) 768 index = of_property_match_string(np, "nvmem-cell-names", id); 769 770 cell_np = of_parse_phandle(np, "nvmem-cells", index); 771 if (!cell_np) 772 return ERR_PTR(-ENOENT); 773 774 nvmem_np = of_get_next_parent(cell_np); 775 if (!nvmem_np) 776 return ERR_PTR(-EINVAL); 777 778 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 779 of_node_put(nvmem_np); 780 if (IS_ERR(nvmem)) 781 return ERR_CAST(nvmem); 782 783 cell = nvmem_find_cell_by_node(nvmem, cell_np); 784 if (!cell) { 785 __nvmem_device_put(nvmem); 786 return ERR_PTR(-ENOENT); 787 } 788 789 return cell; 790 } 791 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 792 #endif 793 794 /** 795 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 796 * 797 * @dev: Device that requests the nvmem cell. 798 * @id: nvmem cell name to get (this corresponds with the name from the 799 * nvmem-cell-names property for DT systems and with the con_id from 800 * the lookup entry for non-DT systems). 801 * 802 * Return: Will be an ERR_PTR() on error or a valid pointer 803 * to a struct nvmem_cell. The nvmem_cell will be freed by the 804 * nvmem_cell_put(). 805 */ 806 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 807 { 808 struct nvmem_cell *cell; 809 810 if (dev->of_node) { /* try dt first */ 811 cell = of_nvmem_cell_get(dev->of_node, id); 812 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 813 return cell; 814 } 815 816 /* NULL cell id only allowed for device tree; invalid otherwise */ 817 if (!id) 818 return ERR_PTR(-EINVAL); 819 820 return nvmem_cell_get_from_lookup(dev, id); 821 } 822 EXPORT_SYMBOL_GPL(nvmem_cell_get); 823 824 static void devm_nvmem_cell_release(struct device *dev, void *res) 825 { 826 nvmem_cell_put(*(struct nvmem_cell **)res); 827 } 828 829 /** 830 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 831 * 832 * @dev: Device that requests the nvmem cell. 833 * @id: nvmem cell name id to get. 834 * 835 * Return: Will be an ERR_PTR() on error or a valid pointer 836 * to a struct nvmem_cell. The nvmem_cell will be freed by the 837 * automatically once the device is freed. 838 */ 839 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 840 { 841 struct nvmem_cell **ptr, *cell; 842 843 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 844 if (!ptr) 845 return ERR_PTR(-ENOMEM); 846 847 cell = nvmem_cell_get(dev, id); 848 if (!IS_ERR(cell)) { 849 *ptr = cell; 850 devres_add(dev, ptr); 851 } else { 852 devres_free(ptr); 853 } 854 855 return cell; 856 } 857 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 858 859 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 860 { 861 struct nvmem_cell **c = res; 862 863 if (WARN_ON(!c || !*c)) 864 return 0; 865 866 return *c == data; 867 } 868 869 /** 870 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 871 * from devm_nvmem_cell_get. 872 * 873 * @dev: Device that requests the nvmem cell. 874 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 875 */ 876 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 877 { 878 int ret; 879 880 ret = devres_release(dev, devm_nvmem_cell_release, 881 devm_nvmem_cell_match, cell); 882 883 WARN_ON(ret); 884 } 885 EXPORT_SYMBOL(devm_nvmem_cell_put); 886 887 /** 888 * nvmem_cell_put() - Release previously allocated nvmem cell. 889 * 890 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 891 */ 892 void nvmem_cell_put(struct nvmem_cell *cell) 893 { 894 struct nvmem_device *nvmem = cell->nvmem; 895 896 __nvmem_device_put(nvmem); 897 } 898 EXPORT_SYMBOL_GPL(nvmem_cell_put); 899 900 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 901 { 902 u8 *p, *b; 903 int i, extra, bit_offset = cell->bit_offset; 904 905 p = b = buf; 906 if (bit_offset) { 907 /* First shift */ 908 *b++ >>= bit_offset; 909 910 /* setup rest of the bytes if any */ 911 for (i = 1; i < cell->bytes; i++) { 912 /* Get bits from next byte and shift them towards msb */ 913 *p |= *b << (BITS_PER_BYTE - bit_offset); 914 915 p = b; 916 *b++ >>= bit_offset; 917 } 918 } else { 919 /* point to the msb */ 920 p += cell->bytes - 1; 921 } 922 923 /* result fits in less bytes */ 924 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 925 while (--extra >= 0) 926 *p-- = 0; 927 928 /* clear msb bits if any leftover in the last byte */ 929 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 930 } 931 932 static int __nvmem_cell_read(struct nvmem_device *nvmem, 933 struct nvmem_cell *cell, 934 void *buf, size_t *len) 935 { 936 int rc; 937 938 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 939 940 if (rc) 941 return rc; 942 943 /* shift bits in-place */ 944 if (cell->bit_offset || cell->nbits) 945 nvmem_shift_read_buffer_in_place(cell, buf); 946 947 if (len) 948 *len = cell->bytes; 949 950 return 0; 951 } 952 953 /** 954 * nvmem_cell_read() - Read a given nvmem cell 955 * 956 * @cell: nvmem cell to be read. 957 * @len: pointer to length of cell which will be populated on successful read; 958 * can be NULL. 959 * 960 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 961 * buffer should be freed by the consumer with a kfree(). 962 */ 963 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 964 { 965 struct nvmem_device *nvmem = cell->nvmem; 966 u8 *buf; 967 int rc; 968 969 if (!nvmem) 970 return ERR_PTR(-EINVAL); 971 972 buf = kzalloc(cell->bytes, GFP_KERNEL); 973 if (!buf) 974 return ERR_PTR(-ENOMEM); 975 976 rc = __nvmem_cell_read(nvmem, cell, buf, len); 977 if (rc) { 978 kfree(buf); 979 return ERR_PTR(rc); 980 } 981 982 return buf; 983 } 984 EXPORT_SYMBOL_GPL(nvmem_cell_read); 985 986 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 987 u8 *_buf, int len) 988 { 989 struct nvmem_device *nvmem = cell->nvmem; 990 int i, rc, nbits, bit_offset = cell->bit_offset; 991 u8 v, *p, *buf, *b, pbyte, pbits; 992 993 nbits = cell->nbits; 994 buf = kzalloc(cell->bytes, GFP_KERNEL); 995 if (!buf) 996 return ERR_PTR(-ENOMEM); 997 998 memcpy(buf, _buf, len); 999 p = b = buf; 1000 1001 if (bit_offset) { 1002 pbyte = *b; 1003 *b <<= bit_offset; 1004 1005 /* setup the first byte with lsb bits from nvmem */ 1006 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1007 if (rc) 1008 goto err; 1009 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1010 1011 /* setup rest of the byte if any */ 1012 for (i = 1; i < cell->bytes; i++) { 1013 /* Get last byte bits and shift them towards lsb */ 1014 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1015 pbyte = *b; 1016 p = b; 1017 *b <<= bit_offset; 1018 *b++ |= pbits; 1019 } 1020 } 1021 1022 /* if it's not end on byte boundary */ 1023 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1024 /* setup the last byte with msb bits from nvmem */ 1025 rc = nvmem_reg_read(nvmem, 1026 cell->offset + cell->bytes - 1, &v, 1); 1027 if (rc) 1028 goto err; 1029 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1030 1031 } 1032 1033 return buf; 1034 err: 1035 kfree(buf); 1036 return ERR_PTR(rc); 1037 } 1038 1039 /** 1040 * nvmem_cell_write() - Write to a given nvmem cell 1041 * 1042 * @cell: nvmem cell to be written. 1043 * @buf: Buffer to be written. 1044 * @len: length of buffer to be written to nvmem cell. 1045 * 1046 * Return: length of bytes written or negative on failure. 1047 */ 1048 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1049 { 1050 struct nvmem_device *nvmem = cell->nvmem; 1051 int rc; 1052 1053 if (!nvmem || nvmem->read_only || 1054 (cell->bit_offset == 0 && len != cell->bytes)) 1055 return -EINVAL; 1056 1057 if (cell->bit_offset || cell->nbits) { 1058 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1059 if (IS_ERR(buf)) 1060 return PTR_ERR(buf); 1061 } 1062 1063 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1064 1065 /* free the tmp buffer */ 1066 if (cell->bit_offset || cell->nbits) 1067 kfree(buf); 1068 1069 if (rc) 1070 return rc; 1071 1072 return len; 1073 } 1074 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1075 1076 /** 1077 * nvmem_cell_read_u16() - Read a cell value as an u16 1078 * 1079 * @dev: Device that requests the nvmem cell. 1080 * @cell_id: Name of nvmem cell to read. 1081 * @val: pointer to output value. 1082 * 1083 * Return: 0 on success or negative errno. 1084 */ 1085 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1086 { 1087 struct nvmem_cell *cell; 1088 void *buf; 1089 size_t len; 1090 1091 cell = nvmem_cell_get(dev, cell_id); 1092 if (IS_ERR(cell)) 1093 return PTR_ERR(cell); 1094 1095 buf = nvmem_cell_read(cell, &len); 1096 if (IS_ERR(buf)) { 1097 nvmem_cell_put(cell); 1098 return PTR_ERR(buf); 1099 } 1100 if (len != sizeof(*val)) { 1101 kfree(buf); 1102 nvmem_cell_put(cell); 1103 return -EINVAL; 1104 } 1105 memcpy(val, buf, sizeof(*val)); 1106 kfree(buf); 1107 nvmem_cell_put(cell); 1108 1109 return 0; 1110 } 1111 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1112 1113 /** 1114 * nvmem_cell_read_u32() - Read a cell value as an u32 1115 * 1116 * @dev: Device that requests the nvmem cell. 1117 * @cell_id: Name of nvmem cell to read. 1118 * @val: pointer to output value. 1119 * 1120 * Return: 0 on success or negative errno. 1121 */ 1122 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1123 { 1124 struct nvmem_cell *cell; 1125 void *buf; 1126 size_t len; 1127 1128 cell = nvmem_cell_get(dev, cell_id); 1129 if (IS_ERR(cell)) 1130 return PTR_ERR(cell); 1131 1132 buf = nvmem_cell_read(cell, &len); 1133 if (IS_ERR(buf)) { 1134 nvmem_cell_put(cell); 1135 return PTR_ERR(buf); 1136 } 1137 if (len != sizeof(*val)) { 1138 kfree(buf); 1139 nvmem_cell_put(cell); 1140 return -EINVAL; 1141 } 1142 memcpy(val, buf, sizeof(*val)); 1143 1144 kfree(buf); 1145 nvmem_cell_put(cell); 1146 return 0; 1147 } 1148 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1149 1150 /** 1151 * nvmem_device_cell_read() - Read a given nvmem device and cell 1152 * 1153 * @nvmem: nvmem device to read from. 1154 * @info: nvmem cell info to be read. 1155 * @buf: buffer pointer which will be populated on successful read. 1156 * 1157 * Return: length of successful bytes read on success and negative 1158 * error code on error. 1159 */ 1160 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1161 struct nvmem_cell_info *info, void *buf) 1162 { 1163 struct nvmem_cell cell; 1164 int rc; 1165 ssize_t len; 1166 1167 if (!nvmem) 1168 return -EINVAL; 1169 1170 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1171 if (rc) 1172 return rc; 1173 1174 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1175 if (rc) 1176 return rc; 1177 1178 return len; 1179 } 1180 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1181 1182 /** 1183 * nvmem_device_cell_write() - Write cell to a given nvmem device 1184 * 1185 * @nvmem: nvmem device to be written to. 1186 * @info: nvmem cell info to be written. 1187 * @buf: buffer to be written to cell. 1188 * 1189 * Return: length of bytes written or negative error code on failure. 1190 */ 1191 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1192 struct nvmem_cell_info *info, void *buf) 1193 { 1194 struct nvmem_cell cell; 1195 int rc; 1196 1197 if (!nvmem) 1198 return -EINVAL; 1199 1200 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1201 if (rc) 1202 return rc; 1203 1204 return nvmem_cell_write(&cell, buf, cell.bytes); 1205 } 1206 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1207 1208 /** 1209 * nvmem_device_read() - Read from a given nvmem device 1210 * 1211 * @nvmem: nvmem device to read from. 1212 * @offset: offset in nvmem device. 1213 * @bytes: number of bytes to read. 1214 * @buf: buffer pointer which will be populated on successful read. 1215 * 1216 * Return: length of successful bytes read on success and negative 1217 * error code on error. 1218 */ 1219 int nvmem_device_read(struct nvmem_device *nvmem, 1220 unsigned int offset, 1221 size_t bytes, void *buf) 1222 { 1223 int rc; 1224 1225 if (!nvmem) 1226 return -EINVAL; 1227 1228 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1229 1230 if (rc) 1231 return rc; 1232 1233 return bytes; 1234 } 1235 EXPORT_SYMBOL_GPL(nvmem_device_read); 1236 1237 /** 1238 * nvmem_device_write() - Write cell to a given nvmem device 1239 * 1240 * @nvmem: nvmem device to be written to. 1241 * @offset: offset in nvmem device. 1242 * @bytes: number of bytes to write. 1243 * @buf: buffer to be written. 1244 * 1245 * Return: length of bytes written or negative error code on failure. 1246 */ 1247 int nvmem_device_write(struct nvmem_device *nvmem, 1248 unsigned int offset, 1249 size_t bytes, void *buf) 1250 { 1251 int rc; 1252 1253 if (!nvmem) 1254 return -EINVAL; 1255 1256 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1257 1258 if (rc) 1259 return rc; 1260 1261 1262 return bytes; 1263 } 1264 EXPORT_SYMBOL_GPL(nvmem_device_write); 1265 1266 /** 1267 * nvmem_add_cell_table() - register a table of cell info entries 1268 * 1269 * @table: table of cell info entries 1270 */ 1271 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1272 { 1273 mutex_lock(&nvmem_cell_mutex); 1274 list_add_tail(&table->node, &nvmem_cell_tables); 1275 mutex_unlock(&nvmem_cell_mutex); 1276 } 1277 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1278 1279 /** 1280 * nvmem_del_cell_table() - remove a previously registered cell info table 1281 * 1282 * @table: table of cell info entries 1283 */ 1284 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1285 { 1286 mutex_lock(&nvmem_cell_mutex); 1287 list_del(&table->node); 1288 mutex_unlock(&nvmem_cell_mutex); 1289 } 1290 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1291 1292 /** 1293 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1294 * 1295 * @entries: array of cell lookup entries 1296 * @nentries: number of cell lookup entries in the array 1297 */ 1298 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1299 { 1300 int i; 1301 1302 mutex_lock(&nvmem_lookup_mutex); 1303 for (i = 0; i < nentries; i++) 1304 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1305 mutex_unlock(&nvmem_lookup_mutex); 1306 } 1307 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1308 1309 /** 1310 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1311 * entries 1312 * 1313 * @entries: array of cell lookup entries 1314 * @nentries: number of cell lookup entries in the array 1315 */ 1316 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1317 { 1318 int i; 1319 1320 mutex_lock(&nvmem_lookup_mutex); 1321 for (i = 0; i < nentries; i++) 1322 list_del(&entries[i].node); 1323 mutex_unlock(&nvmem_lookup_mutex); 1324 } 1325 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1326 1327 /** 1328 * nvmem_dev_name() - Get the name of a given nvmem device. 1329 * 1330 * @nvmem: nvmem device. 1331 * 1332 * Return: name of the nvmem device. 1333 */ 1334 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1335 { 1336 return dev_name(&nvmem->dev); 1337 } 1338 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1339 1340 static int __init nvmem_init(void) 1341 { 1342 return bus_register(&nvmem_bus_type); 1343 } 1344 1345 static void __exit nvmem_exit(void) 1346 { 1347 bus_unregister(&nvmem_bus_type); 1348 } 1349 1350 subsys_initcall(nvmem_init); 1351 module_exit(nvmem_exit); 1352 1353 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1354 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1355 MODULE_DESCRIPTION("nvmem Driver Core"); 1356 MODULE_LICENSE("GPL v2"); 1357