1 /* 2 * nvmem framework core. 3 * 4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 and 9 * only version 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 */ 16 17 #include <linux/device.h> 18 #include <linux/export.h> 19 #include <linux/fs.h> 20 #include <linux/idr.h> 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/nvmem-consumer.h> 24 #include <linux/nvmem-provider.h> 25 #include <linux/of.h> 26 #include <linux/slab.h> 27 28 struct nvmem_device { 29 const char *name; 30 struct module *owner; 31 struct device dev; 32 int stride; 33 int word_size; 34 int ncells; 35 int id; 36 int users; 37 size_t size; 38 bool read_only; 39 int flags; 40 struct bin_attribute eeprom; 41 struct device *base_dev; 42 nvmem_reg_read_t reg_read; 43 nvmem_reg_write_t reg_write; 44 void *priv; 45 }; 46 47 #define FLAG_COMPAT BIT(0) 48 49 struct nvmem_cell { 50 const char *name; 51 int offset; 52 int bytes; 53 int bit_offset; 54 int nbits; 55 struct nvmem_device *nvmem; 56 struct list_head node; 57 }; 58 59 static DEFINE_MUTEX(nvmem_mutex); 60 static DEFINE_IDA(nvmem_ida); 61 62 static LIST_HEAD(nvmem_cells); 63 static DEFINE_MUTEX(nvmem_cells_mutex); 64 65 #ifdef CONFIG_DEBUG_LOCK_ALLOC 66 static struct lock_class_key eeprom_lock_key; 67 #endif 68 69 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 70 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 71 void *val, size_t bytes) 72 { 73 if (nvmem->reg_read) 74 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 75 76 return -EINVAL; 77 } 78 79 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 80 void *val, size_t bytes) 81 { 82 if (nvmem->reg_write) 83 return nvmem->reg_write(nvmem->priv, offset, val, bytes); 84 85 return -EINVAL; 86 } 87 88 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 89 struct bin_attribute *attr, 90 char *buf, loff_t pos, size_t count) 91 { 92 struct device *dev; 93 struct nvmem_device *nvmem; 94 int rc; 95 96 if (attr->private) 97 dev = attr->private; 98 else 99 dev = container_of(kobj, struct device, kobj); 100 nvmem = to_nvmem_device(dev); 101 102 /* Stop the user from reading */ 103 if (pos >= nvmem->size) 104 return 0; 105 106 if (count < nvmem->word_size) 107 return -EINVAL; 108 109 if (pos + count > nvmem->size) 110 count = nvmem->size - pos; 111 112 count = round_down(count, nvmem->word_size); 113 114 rc = nvmem_reg_read(nvmem, pos, buf, count); 115 116 if (rc) 117 return rc; 118 119 return count; 120 } 121 122 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 123 struct bin_attribute *attr, 124 char *buf, loff_t pos, size_t count) 125 { 126 struct device *dev; 127 struct nvmem_device *nvmem; 128 int rc; 129 130 if (attr->private) 131 dev = attr->private; 132 else 133 dev = container_of(kobj, struct device, kobj); 134 nvmem = to_nvmem_device(dev); 135 136 /* Stop the user from writing */ 137 if (pos >= nvmem->size) 138 return 0; 139 140 if (count < nvmem->word_size) 141 return -EINVAL; 142 143 if (pos + count > nvmem->size) 144 count = nvmem->size - pos; 145 146 count = round_down(count, nvmem->word_size); 147 148 rc = nvmem_reg_write(nvmem, pos, buf, count); 149 150 if (rc) 151 return rc; 152 153 return count; 154 } 155 156 /* default read/write permissions */ 157 static struct bin_attribute bin_attr_rw_nvmem = { 158 .attr = { 159 .name = "nvmem", 160 .mode = S_IWUSR | S_IRUGO, 161 }, 162 .read = bin_attr_nvmem_read, 163 .write = bin_attr_nvmem_write, 164 }; 165 166 static struct bin_attribute *nvmem_bin_rw_attributes[] = { 167 &bin_attr_rw_nvmem, 168 NULL, 169 }; 170 171 static const struct attribute_group nvmem_bin_rw_group = { 172 .bin_attrs = nvmem_bin_rw_attributes, 173 }; 174 175 static const struct attribute_group *nvmem_rw_dev_groups[] = { 176 &nvmem_bin_rw_group, 177 NULL, 178 }; 179 180 /* read only permission */ 181 static struct bin_attribute bin_attr_ro_nvmem = { 182 .attr = { 183 .name = "nvmem", 184 .mode = S_IRUGO, 185 }, 186 .read = bin_attr_nvmem_read, 187 }; 188 189 static struct bin_attribute *nvmem_bin_ro_attributes[] = { 190 &bin_attr_ro_nvmem, 191 NULL, 192 }; 193 194 static const struct attribute_group nvmem_bin_ro_group = { 195 .bin_attrs = nvmem_bin_ro_attributes, 196 }; 197 198 static const struct attribute_group *nvmem_ro_dev_groups[] = { 199 &nvmem_bin_ro_group, 200 NULL, 201 }; 202 203 /* default read/write permissions, root only */ 204 static struct bin_attribute bin_attr_rw_root_nvmem = { 205 .attr = { 206 .name = "nvmem", 207 .mode = S_IWUSR | S_IRUSR, 208 }, 209 .read = bin_attr_nvmem_read, 210 .write = bin_attr_nvmem_write, 211 }; 212 213 static struct bin_attribute *nvmem_bin_rw_root_attributes[] = { 214 &bin_attr_rw_root_nvmem, 215 NULL, 216 }; 217 218 static const struct attribute_group nvmem_bin_rw_root_group = { 219 .bin_attrs = nvmem_bin_rw_root_attributes, 220 }; 221 222 static const struct attribute_group *nvmem_rw_root_dev_groups[] = { 223 &nvmem_bin_rw_root_group, 224 NULL, 225 }; 226 227 /* read only permission, root only */ 228 static struct bin_attribute bin_attr_ro_root_nvmem = { 229 .attr = { 230 .name = "nvmem", 231 .mode = S_IRUSR, 232 }, 233 .read = bin_attr_nvmem_read, 234 }; 235 236 static struct bin_attribute *nvmem_bin_ro_root_attributes[] = { 237 &bin_attr_ro_root_nvmem, 238 NULL, 239 }; 240 241 static const struct attribute_group nvmem_bin_ro_root_group = { 242 .bin_attrs = nvmem_bin_ro_root_attributes, 243 }; 244 245 static const struct attribute_group *nvmem_ro_root_dev_groups[] = { 246 &nvmem_bin_ro_root_group, 247 NULL, 248 }; 249 250 static void nvmem_release(struct device *dev) 251 { 252 struct nvmem_device *nvmem = to_nvmem_device(dev); 253 254 ida_simple_remove(&nvmem_ida, nvmem->id); 255 kfree(nvmem); 256 } 257 258 static const struct device_type nvmem_provider_type = { 259 .release = nvmem_release, 260 }; 261 262 static struct bus_type nvmem_bus_type = { 263 .name = "nvmem", 264 }; 265 266 static int of_nvmem_match(struct device *dev, void *nvmem_np) 267 { 268 return dev->of_node == nvmem_np; 269 } 270 271 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) 272 { 273 struct device *d; 274 275 if (!nvmem_np) 276 return NULL; 277 278 d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); 279 280 if (!d) 281 return NULL; 282 283 return to_nvmem_device(d); 284 } 285 286 static struct nvmem_cell *nvmem_find_cell(const char *cell_id) 287 { 288 struct nvmem_cell *p; 289 290 list_for_each_entry(p, &nvmem_cells, node) 291 if (p && !strcmp(p->name, cell_id)) 292 return p; 293 294 return NULL; 295 } 296 297 static void nvmem_cell_drop(struct nvmem_cell *cell) 298 { 299 mutex_lock(&nvmem_cells_mutex); 300 list_del(&cell->node); 301 mutex_unlock(&nvmem_cells_mutex); 302 kfree(cell); 303 } 304 305 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 306 { 307 struct nvmem_cell *cell; 308 struct list_head *p, *n; 309 310 list_for_each_safe(p, n, &nvmem_cells) { 311 cell = list_entry(p, struct nvmem_cell, node); 312 if (cell->nvmem == nvmem) 313 nvmem_cell_drop(cell); 314 } 315 } 316 317 static void nvmem_cell_add(struct nvmem_cell *cell) 318 { 319 mutex_lock(&nvmem_cells_mutex); 320 list_add_tail(&cell->node, &nvmem_cells); 321 mutex_unlock(&nvmem_cells_mutex); 322 } 323 324 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 325 const struct nvmem_cell_info *info, 326 struct nvmem_cell *cell) 327 { 328 cell->nvmem = nvmem; 329 cell->offset = info->offset; 330 cell->bytes = info->bytes; 331 cell->name = info->name; 332 333 cell->bit_offset = info->bit_offset; 334 cell->nbits = info->nbits; 335 336 if (cell->nbits) 337 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 338 BITS_PER_BYTE); 339 340 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 341 dev_err(&nvmem->dev, 342 "cell %s unaligned to nvmem stride %d\n", 343 cell->name, nvmem->stride); 344 return -EINVAL; 345 } 346 347 return 0; 348 } 349 350 static int nvmem_add_cells(struct nvmem_device *nvmem, 351 const struct nvmem_config *cfg) 352 { 353 struct nvmem_cell **cells; 354 const struct nvmem_cell_info *info = cfg->cells; 355 int i, rval; 356 357 cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL); 358 if (!cells) 359 return -ENOMEM; 360 361 for (i = 0; i < cfg->ncells; i++) { 362 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 363 if (!cells[i]) { 364 rval = -ENOMEM; 365 goto err; 366 } 367 368 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 369 if (rval) { 370 kfree(cells[i]); 371 goto err; 372 } 373 374 nvmem_cell_add(cells[i]); 375 } 376 377 nvmem->ncells = cfg->ncells; 378 /* remove tmp array */ 379 kfree(cells); 380 381 return 0; 382 err: 383 while (i--) 384 nvmem_cell_drop(cells[i]); 385 386 kfree(cells); 387 388 return rval; 389 } 390 391 /* 392 * nvmem_setup_compat() - Create an additional binary entry in 393 * drivers sys directory, to be backwards compatible with the older 394 * drivers/misc/eeprom drivers. 395 */ 396 static int nvmem_setup_compat(struct nvmem_device *nvmem, 397 const struct nvmem_config *config) 398 { 399 int rval; 400 401 if (!config->base_dev) 402 return -EINVAL; 403 404 if (nvmem->read_only) 405 nvmem->eeprom = bin_attr_ro_root_nvmem; 406 else 407 nvmem->eeprom = bin_attr_rw_root_nvmem; 408 nvmem->eeprom.attr.name = "eeprom"; 409 nvmem->eeprom.size = nvmem->size; 410 #ifdef CONFIG_DEBUG_LOCK_ALLOC 411 nvmem->eeprom.attr.key = &eeprom_lock_key; 412 #endif 413 nvmem->eeprom.private = &nvmem->dev; 414 nvmem->base_dev = config->base_dev; 415 416 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 417 if (rval) { 418 dev_err(&nvmem->dev, 419 "Failed to create eeprom binary file %d\n", rval); 420 return rval; 421 } 422 423 nvmem->flags |= FLAG_COMPAT; 424 425 return 0; 426 } 427 428 /** 429 * nvmem_register() - Register a nvmem device for given nvmem_config. 430 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 431 * 432 * @config: nvmem device configuration with which nvmem device is created. 433 * 434 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 435 * on success. 436 */ 437 438 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 439 { 440 struct nvmem_device *nvmem; 441 struct device_node *np; 442 int rval; 443 444 if (!config->dev) 445 return ERR_PTR(-EINVAL); 446 447 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 448 if (!nvmem) 449 return ERR_PTR(-ENOMEM); 450 451 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 452 if (rval < 0) { 453 kfree(nvmem); 454 return ERR_PTR(rval); 455 } 456 457 nvmem->id = rval; 458 nvmem->owner = config->owner; 459 nvmem->stride = config->stride; 460 nvmem->word_size = config->word_size; 461 nvmem->size = config->size; 462 nvmem->dev.type = &nvmem_provider_type; 463 nvmem->dev.bus = &nvmem_bus_type; 464 nvmem->dev.parent = config->dev; 465 nvmem->priv = config->priv; 466 nvmem->reg_read = config->reg_read; 467 nvmem->reg_write = config->reg_write; 468 np = config->dev->of_node; 469 nvmem->dev.of_node = np; 470 dev_set_name(&nvmem->dev, "%s%d", 471 config->name ? : "nvmem", 472 config->name ? config->id : nvmem->id); 473 474 nvmem->read_only = of_property_read_bool(np, "read-only") | 475 config->read_only; 476 477 if (config->root_only) 478 nvmem->dev.groups = nvmem->read_only ? 479 nvmem_ro_root_dev_groups : 480 nvmem_rw_root_dev_groups; 481 else 482 nvmem->dev.groups = nvmem->read_only ? 483 nvmem_ro_dev_groups : 484 nvmem_rw_dev_groups; 485 486 device_initialize(&nvmem->dev); 487 488 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 489 490 rval = device_add(&nvmem->dev); 491 if (rval) 492 goto out; 493 494 if (config->compat) { 495 rval = nvmem_setup_compat(nvmem, config); 496 if (rval) 497 goto out; 498 } 499 500 if (config->cells) 501 nvmem_add_cells(nvmem, config); 502 503 return nvmem; 504 out: 505 ida_simple_remove(&nvmem_ida, nvmem->id); 506 kfree(nvmem); 507 return ERR_PTR(rval); 508 } 509 EXPORT_SYMBOL_GPL(nvmem_register); 510 511 /** 512 * nvmem_unregister() - Unregister previously registered nvmem device 513 * 514 * @nvmem: Pointer to previously registered nvmem device. 515 * 516 * Return: Will be an negative on error or a zero on success. 517 */ 518 int nvmem_unregister(struct nvmem_device *nvmem) 519 { 520 mutex_lock(&nvmem_mutex); 521 if (nvmem->users) { 522 mutex_unlock(&nvmem_mutex); 523 return -EBUSY; 524 } 525 mutex_unlock(&nvmem_mutex); 526 527 if (nvmem->flags & FLAG_COMPAT) 528 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 529 530 nvmem_device_remove_all_cells(nvmem); 531 device_del(&nvmem->dev); 532 533 return 0; 534 } 535 EXPORT_SYMBOL_GPL(nvmem_unregister); 536 537 static struct nvmem_device *__nvmem_device_get(struct device_node *np, 538 struct nvmem_cell **cellp, 539 const char *cell_id) 540 { 541 struct nvmem_device *nvmem = NULL; 542 543 mutex_lock(&nvmem_mutex); 544 545 if (np) { 546 nvmem = of_nvmem_find(np); 547 if (!nvmem) { 548 mutex_unlock(&nvmem_mutex); 549 return ERR_PTR(-EPROBE_DEFER); 550 } 551 } else { 552 struct nvmem_cell *cell = nvmem_find_cell(cell_id); 553 554 if (cell) { 555 nvmem = cell->nvmem; 556 *cellp = cell; 557 } 558 559 if (!nvmem) { 560 mutex_unlock(&nvmem_mutex); 561 return ERR_PTR(-ENOENT); 562 } 563 } 564 565 nvmem->users++; 566 mutex_unlock(&nvmem_mutex); 567 568 if (!try_module_get(nvmem->owner)) { 569 dev_err(&nvmem->dev, 570 "could not increase module refcount for cell %s\n", 571 nvmem->name); 572 573 mutex_lock(&nvmem_mutex); 574 nvmem->users--; 575 mutex_unlock(&nvmem_mutex); 576 577 return ERR_PTR(-EINVAL); 578 } 579 580 return nvmem; 581 } 582 583 static void __nvmem_device_put(struct nvmem_device *nvmem) 584 { 585 module_put(nvmem->owner); 586 mutex_lock(&nvmem_mutex); 587 nvmem->users--; 588 mutex_unlock(&nvmem_mutex); 589 } 590 591 static int nvmem_match(struct device *dev, void *data) 592 { 593 return !strcmp(dev_name(dev), data); 594 } 595 596 static struct nvmem_device *nvmem_find(const char *name) 597 { 598 struct device *d; 599 600 d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match); 601 602 if (!d) 603 return NULL; 604 605 return to_nvmem_device(d); 606 } 607 608 #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) 609 /** 610 * of_nvmem_device_get() - Get nvmem device from a given id 611 * 612 * @np: Device tree node that uses the nvmem device. 613 * @id: nvmem name from nvmem-names property. 614 * 615 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 616 * on success. 617 */ 618 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 619 { 620 621 struct device_node *nvmem_np; 622 int index; 623 624 index = of_property_match_string(np, "nvmem-names", id); 625 626 nvmem_np = of_parse_phandle(np, "nvmem", index); 627 if (!nvmem_np) 628 return ERR_PTR(-EINVAL); 629 630 return __nvmem_device_get(nvmem_np, NULL, NULL); 631 } 632 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 633 #endif 634 635 /** 636 * nvmem_device_get() - Get nvmem device from a given id 637 * 638 * @dev: Device that uses the nvmem device. 639 * @dev_name: name of the requested nvmem device. 640 * 641 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 642 * on success. 643 */ 644 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 645 { 646 if (dev->of_node) { /* try dt first */ 647 struct nvmem_device *nvmem; 648 649 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 650 651 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 652 return nvmem; 653 654 } 655 656 return nvmem_find(dev_name); 657 } 658 EXPORT_SYMBOL_GPL(nvmem_device_get); 659 660 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 661 { 662 struct nvmem_device **nvmem = res; 663 664 if (WARN_ON(!nvmem || !*nvmem)) 665 return 0; 666 667 return *nvmem == data; 668 } 669 670 static void devm_nvmem_device_release(struct device *dev, void *res) 671 { 672 nvmem_device_put(*(struct nvmem_device **)res); 673 } 674 675 /** 676 * devm_nvmem_device_put() - put alredy got nvmem device 677 * 678 * @dev: Device that uses the nvmem device. 679 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 680 * that needs to be released. 681 */ 682 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 683 { 684 int ret; 685 686 ret = devres_release(dev, devm_nvmem_device_release, 687 devm_nvmem_device_match, nvmem); 688 689 WARN_ON(ret); 690 } 691 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 692 693 /** 694 * nvmem_device_put() - put alredy got nvmem device 695 * 696 * @nvmem: pointer to nvmem device that needs to be released. 697 */ 698 void nvmem_device_put(struct nvmem_device *nvmem) 699 { 700 __nvmem_device_put(nvmem); 701 } 702 EXPORT_SYMBOL_GPL(nvmem_device_put); 703 704 /** 705 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 706 * 707 * @dev: Device that requests the nvmem device. 708 * @id: name id for the requested nvmem device. 709 * 710 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 711 * on success. The nvmem_cell will be freed by the automatically once the 712 * device is freed. 713 */ 714 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 715 { 716 struct nvmem_device **ptr, *nvmem; 717 718 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 719 if (!ptr) 720 return ERR_PTR(-ENOMEM); 721 722 nvmem = nvmem_device_get(dev, id); 723 if (!IS_ERR(nvmem)) { 724 *ptr = nvmem; 725 devres_add(dev, ptr); 726 } else { 727 devres_free(ptr); 728 } 729 730 return nvmem; 731 } 732 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 733 734 static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) 735 { 736 struct nvmem_cell *cell = NULL; 737 struct nvmem_device *nvmem; 738 739 nvmem = __nvmem_device_get(NULL, &cell, cell_id); 740 if (IS_ERR(nvmem)) 741 return ERR_CAST(nvmem); 742 743 return cell; 744 } 745 746 #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) 747 /** 748 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 749 * 750 * @np: Device tree node that uses the nvmem cell. 751 * @name: nvmem cell name from nvmem-cell-names property, or NULL 752 * for the cell at index 0 (the lone cell with no accompanying 753 * nvmem-cell-names property). 754 * 755 * Return: Will be an ERR_PTR() on error or a valid pointer 756 * to a struct nvmem_cell. The nvmem_cell will be freed by the 757 * nvmem_cell_put(). 758 */ 759 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 760 const char *name) 761 { 762 struct device_node *cell_np, *nvmem_np; 763 struct nvmem_cell *cell; 764 struct nvmem_device *nvmem; 765 const __be32 *addr; 766 int rval, len; 767 int index = 0; 768 769 /* if cell name exists, find index to the name */ 770 if (name) 771 index = of_property_match_string(np, "nvmem-cell-names", name); 772 773 cell_np = of_parse_phandle(np, "nvmem-cells", index); 774 if (!cell_np) 775 return ERR_PTR(-EINVAL); 776 777 nvmem_np = of_get_next_parent(cell_np); 778 if (!nvmem_np) 779 return ERR_PTR(-EINVAL); 780 781 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); 782 if (IS_ERR(nvmem)) 783 return ERR_CAST(nvmem); 784 785 addr = of_get_property(cell_np, "reg", &len); 786 if (!addr || (len < 2 * sizeof(u32))) { 787 dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n", 788 cell_np->full_name); 789 rval = -EINVAL; 790 goto err_mem; 791 } 792 793 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 794 if (!cell) { 795 rval = -ENOMEM; 796 goto err_mem; 797 } 798 799 cell->nvmem = nvmem; 800 cell->offset = be32_to_cpup(addr++); 801 cell->bytes = be32_to_cpup(addr); 802 cell->name = cell_np->name; 803 804 addr = of_get_property(cell_np, "bits", &len); 805 if (addr && len == (2 * sizeof(u32))) { 806 cell->bit_offset = be32_to_cpup(addr++); 807 cell->nbits = be32_to_cpup(addr); 808 } 809 810 if (cell->nbits) 811 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 812 BITS_PER_BYTE); 813 814 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 815 dev_err(&nvmem->dev, 816 "cell %s unaligned to nvmem stride %d\n", 817 cell->name, nvmem->stride); 818 rval = -EINVAL; 819 goto err_sanity; 820 } 821 822 nvmem_cell_add(cell); 823 824 return cell; 825 826 err_sanity: 827 kfree(cell); 828 829 err_mem: 830 __nvmem_device_put(nvmem); 831 832 return ERR_PTR(rval); 833 } 834 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 835 #endif 836 837 /** 838 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 839 * 840 * @dev: Device that requests the nvmem cell. 841 * @cell_id: nvmem cell name to get. 842 * 843 * Return: Will be an ERR_PTR() on error or a valid pointer 844 * to a struct nvmem_cell. The nvmem_cell will be freed by the 845 * nvmem_cell_put(). 846 */ 847 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) 848 { 849 struct nvmem_cell *cell; 850 851 if (dev->of_node) { /* try dt first */ 852 cell = of_nvmem_cell_get(dev->of_node, cell_id); 853 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 854 return cell; 855 } 856 857 return nvmem_cell_get_from_list(cell_id); 858 } 859 EXPORT_SYMBOL_GPL(nvmem_cell_get); 860 861 static void devm_nvmem_cell_release(struct device *dev, void *res) 862 { 863 nvmem_cell_put(*(struct nvmem_cell **)res); 864 } 865 866 /** 867 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 868 * 869 * @dev: Device that requests the nvmem cell. 870 * @id: nvmem cell name id to get. 871 * 872 * Return: Will be an ERR_PTR() on error or a valid pointer 873 * to a struct nvmem_cell. The nvmem_cell will be freed by the 874 * automatically once the device is freed. 875 */ 876 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 877 { 878 struct nvmem_cell **ptr, *cell; 879 880 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 881 if (!ptr) 882 return ERR_PTR(-ENOMEM); 883 884 cell = nvmem_cell_get(dev, id); 885 if (!IS_ERR(cell)) { 886 *ptr = cell; 887 devres_add(dev, ptr); 888 } else { 889 devres_free(ptr); 890 } 891 892 return cell; 893 } 894 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 895 896 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 897 { 898 struct nvmem_cell **c = res; 899 900 if (WARN_ON(!c || !*c)) 901 return 0; 902 903 return *c == data; 904 } 905 906 /** 907 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 908 * from devm_nvmem_cell_get. 909 * 910 * @dev: Device that requests the nvmem cell. 911 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 912 */ 913 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 914 { 915 int ret; 916 917 ret = devres_release(dev, devm_nvmem_cell_release, 918 devm_nvmem_cell_match, cell); 919 920 WARN_ON(ret); 921 } 922 EXPORT_SYMBOL(devm_nvmem_cell_put); 923 924 /** 925 * nvmem_cell_put() - Release previously allocated nvmem cell. 926 * 927 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 928 */ 929 void nvmem_cell_put(struct nvmem_cell *cell) 930 { 931 struct nvmem_device *nvmem = cell->nvmem; 932 933 __nvmem_device_put(nvmem); 934 nvmem_cell_drop(cell); 935 } 936 EXPORT_SYMBOL_GPL(nvmem_cell_put); 937 938 static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, 939 void *buf) 940 { 941 u8 *p, *b; 942 int i, bit_offset = cell->bit_offset; 943 944 p = b = buf; 945 if (bit_offset) { 946 /* First shift */ 947 *b++ >>= bit_offset; 948 949 /* setup rest of the bytes if any */ 950 for (i = 1; i < cell->bytes; i++) { 951 /* Get bits from next byte and shift them towards msb */ 952 *p |= *b << (BITS_PER_BYTE - bit_offset); 953 954 p = b; 955 *b++ >>= bit_offset; 956 } 957 958 /* result fits in less bytes */ 959 if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) 960 *p-- = 0; 961 } 962 /* clear msb bits if any leftover in the last byte */ 963 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 964 } 965 966 static int __nvmem_cell_read(struct nvmem_device *nvmem, 967 struct nvmem_cell *cell, 968 void *buf, size_t *len) 969 { 970 int rc; 971 972 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 973 974 if (rc) 975 return rc; 976 977 /* shift bits in-place */ 978 if (cell->bit_offset || cell->nbits) 979 nvmem_shift_read_buffer_in_place(cell, buf); 980 981 if (len) 982 *len = cell->bytes; 983 984 return 0; 985 } 986 987 /** 988 * nvmem_cell_read() - Read a given nvmem cell 989 * 990 * @cell: nvmem cell to be read. 991 * @len: pointer to length of cell which will be populated on successful read; 992 * can be NULL. 993 * 994 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 995 * buffer should be freed by the consumer with a kfree(). 996 */ 997 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 998 { 999 struct nvmem_device *nvmem = cell->nvmem; 1000 u8 *buf; 1001 int rc; 1002 1003 if (!nvmem) 1004 return ERR_PTR(-EINVAL); 1005 1006 buf = kzalloc(cell->bytes, GFP_KERNEL); 1007 if (!buf) 1008 return ERR_PTR(-ENOMEM); 1009 1010 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1011 if (rc) { 1012 kfree(buf); 1013 return ERR_PTR(rc); 1014 } 1015 1016 return buf; 1017 } 1018 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1019 1020 static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1021 u8 *_buf, int len) 1022 { 1023 struct nvmem_device *nvmem = cell->nvmem; 1024 int i, rc, nbits, bit_offset = cell->bit_offset; 1025 u8 v, *p, *buf, *b, pbyte, pbits; 1026 1027 nbits = cell->nbits; 1028 buf = kzalloc(cell->bytes, GFP_KERNEL); 1029 if (!buf) 1030 return ERR_PTR(-ENOMEM); 1031 1032 memcpy(buf, _buf, len); 1033 p = b = buf; 1034 1035 if (bit_offset) { 1036 pbyte = *b; 1037 *b <<= bit_offset; 1038 1039 /* setup the first byte with lsb bits from nvmem */ 1040 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1041 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1042 1043 /* setup rest of the byte if any */ 1044 for (i = 1; i < cell->bytes; i++) { 1045 /* Get last byte bits and shift them towards lsb */ 1046 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1047 pbyte = *b; 1048 p = b; 1049 *b <<= bit_offset; 1050 *b++ |= pbits; 1051 } 1052 } 1053 1054 /* if it's not end on byte boundary */ 1055 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1056 /* setup the last byte with msb bits from nvmem */ 1057 rc = nvmem_reg_read(nvmem, 1058 cell->offset + cell->bytes - 1, &v, 1); 1059 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1060 1061 } 1062 1063 return buf; 1064 } 1065 1066 /** 1067 * nvmem_cell_write() - Write to a given nvmem cell 1068 * 1069 * @cell: nvmem cell to be written. 1070 * @buf: Buffer to be written. 1071 * @len: length of buffer to be written to nvmem cell. 1072 * 1073 * Return: length of bytes written or negative on failure. 1074 */ 1075 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1076 { 1077 struct nvmem_device *nvmem = cell->nvmem; 1078 int rc; 1079 1080 if (!nvmem || nvmem->read_only || 1081 (cell->bit_offset == 0 && len != cell->bytes)) 1082 return -EINVAL; 1083 1084 if (cell->bit_offset || cell->nbits) { 1085 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1086 if (IS_ERR(buf)) 1087 return PTR_ERR(buf); 1088 } 1089 1090 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1091 1092 /* free the tmp buffer */ 1093 if (cell->bit_offset || cell->nbits) 1094 kfree(buf); 1095 1096 if (rc) 1097 return rc; 1098 1099 return len; 1100 } 1101 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1102 1103 /** 1104 * nvmem_device_cell_read() - Read a given nvmem device and cell 1105 * 1106 * @nvmem: nvmem device to read from. 1107 * @info: nvmem cell info to be read. 1108 * @buf: buffer pointer which will be populated on successful read. 1109 * 1110 * Return: length of successful bytes read on success and negative 1111 * error code on error. 1112 */ 1113 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1114 struct nvmem_cell_info *info, void *buf) 1115 { 1116 struct nvmem_cell cell; 1117 int rc; 1118 ssize_t len; 1119 1120 if (!nvmem) 1121 return -EINVAL; 1122 1123 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1124 if (rc) 1125 return rc; 1126 1127 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1128 if (rc) 1129 return rc; 1130 1131 return len; 1132 } 1133 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1134 1135 /** 1136 * nvmem_device_cell_write() - Write cell to a given nvmem device 1137 * 1138 * @nvmem: nvmem device to be written to. 1139 * @info: nvmem cell info to be written. 1140 * @buf: buffer to be written to cell. 1141 * 1142 * Return: length of bytes written or negative error code on failure. 1143 * */ 1144 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1145 struct nvmem_cell_info *info, void *buf) 1146 { 1147 struct nvmem_cell cell; 1148 int rc; 1149 1150 if (!nvmem) 1151 return -EINVAL; 1152 1153 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1154 if (rc) 1155 return rc; 1156 1157 return nvmem_cell_write(&cell, buf, cell.bytes); 1158 } 1159 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1160 1161 /** 1162 * nvmem_device_read() - Read from a given nvmem device 1163 * 1164 * @nvmem: nvmem device to read from. 1165 * @offset: offset in nvmem device. 1166 * @bytes: number of bytes to read. 1167 * @buf: buffer pointer which will be populated on successful read. 1168 * 1169 * Return: length of successful bytes read on success and negative 1170 * error code on error. 1171 */ 1172 int nvmem_device_read(struct nvmem_device *nvmem, 1173 unsigned int offset, 1174 size_t bytes, void *buf) 1175 { 1176 int rc; 1177 1178 if (!nvmem) 1179 return -EINVAL; 1180 1181 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1182 1183 if (rc) 1184 return rc; 1185 1186 return bytes; 1187 } 1188 EXPORT_SYMBOL_GPL(nvmem_device_read); 1189 1190 /** 1191 * nvmem_device_write() - Write cell to a given nvmem device 1192 * 1193 * @nvmem: nvmem device to be written to. 1194 * @offset: offset in nvmem device. 1195 * @bytes: number of bytes to write. 1196 * @buf: buffer to be written. 1197 * 1198 * Return: length of bytes written or negative error code on failure. 1199 * */ 1200 int nvmem_device_write(struct nvmem_device *nvmem, 1201 unsigned int offset, 1202 size_t bytes, void *buf) 1203 { 1204 int rc; 1205 1206 if (!nvmem) 1207 return -EINVAL; 1208 1209 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1210 1211 if (rc) 1212 return rc; 1213 1214 1215 return bytes; 1216 } 1217 EXPORT_SYMBOL_GPL(nvmem_device_write); 1218 1219 static int __init nvmem_init(void) 1220 { 1221 return bus_register(&nvmem_bus_type); 1222 } 1223 1224 static void __exit nvmem_exit(void) 1225 { 1226 bus_unregister(&nvmem_bus_type); 1227 } 1228 1229 subsys_initcall(nvmem_init); 1230 module_exit(nvmem_exit); 1231 1232 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1233 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1234 MODULE_DESCRIPTION("nvmem Driver Core"); 1235 MODULE_LICENSE("GPL v2"); 1236