1 /* 2 * nvmem framework core. 3 * 4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 and 9 * only version 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 */ 16 17 #include <linux/device.h> 18 #include <linux/export.h> 19 #include <linux/fs.h> 20 #include <linux/idr.h> 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/nvmem-consumer.h> 24 #include <linux/nvmem-provider.h> 25 #include <linux/of.h> 26 #include <linux/regmap.h> 27 #include <linux/slab.h> 28 29 struct nvmem_device { 30 const char *name; 31 struct regmap *regmap; 32 struct module *owner; 33 struct device dev; 34 int stride; 35 int word_size; 36 int ncells; 37 int id; 38 int users; 39 size_t size; 40 bool read_only; 41 }; 42 43 struct nvmem_cell { 44 const char *name; 45 int offset; 46 int bytes; 47 int bit_offset; 48 int nbits; 49 struct nvmem_device *nvmem; 50 struct list_head node; 51 }; 52 53 static DEFINE_MUTEX(nvmem_mutex); 54 static DEFINE_IDA(nvmem_ida); 55 56 static LIST_HEAD(nvmem_cells); 57 static DEFINE_MUTEX(nvmem_cells_mutex); 58 59 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 60 61 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 62 struct bin_attribute *attr, 63 char *buf, loff_t pos, size_t count) 64 { 65 struct device *dev = container_of(kobj, struct device, kobj); 66 struct nvmem_device *nvmem = to_nvmem_device(dev); 67 int rc; 68 69 /* Stop the user from reading */ 70 if (pos >= nvmem->size) 71 return 0; 72 73 if (count < nvmem->word_size) 74 return -EINVAL; 75 76 if (pos + count > nvmem->size) 77 count = nvmem->size - pos; 78 79 count = round_down(count, nvmem->word_size); 80 81 rc = regmap_raw_read(nvmem->regmap, pos, buf, count); 82 83 if (IS_ERR_VALUE(rc)) 84 return rc; 85 86 return count; 87 } 88 89 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 90 struct bin_attribute *attr, 91 char *buf, loff_t pos, size_t count) 92 { 93 struct device *dev = container_of(kobj, struct device, kobj); 94 struct nvmem_device *nvmem = to_nvmem_device(dev); 95 int rc; 96 97 /* Stop the user from writing */ 98 if (pos >= nvmem->size) 99 return 0; 100 101 if (count < nvmem->word_size) 102 return -EINVAL; 103 104 if (pos + count > nvmem->size) 105 count = nvmem->size - pos; 106 107 count = round_down(count, nvmem->word_size); 108 109 rc = regmap_raw_write(nvmem->regmap, pos, buf, count); 110 111 if (IS_ERR_VALUE(rc)) 112 return rc; 113 114 return count; 115 } 116 117 /* default read/write permissions */ 118 static struct bin_attribute bin_attr_rw_nvmem = { 119 .attr = { 120 .name = "nvmem", 121 .mode = S_IWUSR | S_IRUGO, 122 }, 123 .read = bin_attr_nvmem_read, 124 .write = bin_attr_nvmem_write, 125 }; 126 127 static struct bin_attribute *nvmem_bin_rw_attributes[] = { 128 &bin_attr_rw_nvmem, 129 NULL, 130 }; 131 132 static const struct attribute_group nvmem_bin_rw_group = { 133 .bin_attrs = nvmem_bin_rw_attributes, 134 }; 135 136 static const struct attribute_group *nvmem_rw_dev_groups[] = { 137 &nvmem_bin_rw_group, 138 NULL, 139 }; 140 141 /* read only permission */ 142 static struct bin_attribute bin_attr_ro_nvmem = { 143 .attr = { 144 .name = "nvmem", 145 .mode = S_IRUGO, 146 }, 147 .read = bin_attr_nvmem_read, 148 }; 149 150 static struct bin_attribute *nvmem_bin_ro_attributes[] = { 151 &bin_attr_ro_nvmem, 152 NULL, 153 }; 154 155 static const struct attribute_group nvmem_bin_ro_group = { 156 .bin_attrs = nvmem_bin_ro_attributes, 157 }; 158 159 static const struct attribute_group *nvmem_ro_dev_groups[] = { 160 &nvmem_bin_ro_group, 161 NULL, 162 }; 163 164 static void nvmem_release(struct device *dev) 165 { 166 struct nvmem_device *nvmem = to_nvmem_device(dev); 167 168 ida_simple_remove(&nvmem_ida, nvmem->id); 169 kfree(nvmem); 170 } 171 172 static const struct device_type nvmem_provider_type = { 173 .release = nvmem_release, 174 }; 175 176 static struct bus_type nvmem_bus_type = { 177 .name = "nvmem", 178 }; 179 180 static int of_nvmem_match(struct device *dev, void *nvmem_np) 181 { 182 return dev->of_node == nvmem_np; 183 } 184 185 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) 186 { 187 struct device *d; 188 189 if (!nvmem_np) 190 return NULL; 191 192 d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); 193 194 if (!d) 195 return NULL; 196 197 return to_nvmem_device(d); 198 } 199 200 static struct nvmem_cell *nvmem_find_cell(const char *cell_id) 201 { 202 struct nvmem_cell *p; 203 204 list_for_each_entry(p, &nvmem_cells, node) 205 if (p && !strcmp(p->name, cell_id)) 206 return p; 207 208 return NULL; 209 } 210 211 static void nvmem_cell_drop(struct nvmem_cell *cell) 212 { 213 mutex_lock(&nvmem_cells_mutex); 214 list_del(&cell->node); 215 mutex_unlock(&nvmem_cells_mutex); 216 kfree(cell); 217 } 218 219 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 220 { 221 struct nvmem_cell *cell; 222 struct list_head *p, *n; 223 224 list_for_each_safe(p, n, &nvmem_cells) { 225 cell = list_entry(p, struct nvmem_cell, node); 226 if (cell->nvmem == nvmem) 227 nvmem_cell_drop(cell); 228 } 229 } 230 231 static void nvmem_cell_add(struct nvmem_cell *cell) 232 { 233 mutex_lock(&nvmem_cells_mutex); 234 list_add_tail(&cell->node, &nvmem_cells); 235 mutex_unlock(&nvmem_cells_mutex); 236 } 237 238 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 239 const struct nvmem_cell_info *info, 240 struct nvmem_cell *cell) 241 { 242 cell->nvmem = nvmem; 243 cell->offset = info->offset; 244 cell->bytes = info->bytes; 245 cell->name = info->name; 246 247 cell->bit_offset = info->bit_offset; 248 cell->nbits = info->nbits; 249 250 if (cell->nbits) 251 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 252 BITS_PER_BYTE); 253 254 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 255 dev_err(&nvmem->dev, 256 "cell %s unaligned to nvmem stride %d\n", 257 cell->name, nvmem->stride); 258 return -EINVAL; 259 } 260 261 return 0; 262 } 263 264 static int nvmem_add_cells(struct nvmem_device *nvmem, 265 const struct nvmem_config *cfg) 266 { 267 struct nvmem_cell **cells; 268 const struct nvmem_cell_info *info = cfg->cells; 269 int i, rval; 270 271 cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL); 272 if (!cells) 273 return -ENOMEM; 274 275 for (i = 0; i < cfg->ncells; i++) { 276 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 277 if (!cells[i]) { 278 rval = -ENOMEM; 279 goto err; 280 } 281 282 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 283 if (IS_ERR_VALUE(rval)) { 284 kfree(cells[i]); 285 goto err; 286 } 287 288 nvmem_cell_add(cells[i]); 289 } 290 291 nvmem->ncells = cfg->ncells; 292 /* remove tmp array */ 293 kfree(cells); 294 295 return 0; 296 err: 297 while (--i) 298 nvmem_cell_drop(cells[i]); 299 300 return rval; 301 } 302 303 /** 304 * nvmem_register() - Register a nvmem device for given nvmem_config. 305 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 306 * 307 * @config: nvmem device configuration with which nvmem device is created. 308 * 309 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 310 * on success. 311 */ 312 313 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 314 { 315 struct nvmem_device *nvmem; 316 struct device_node *np; 317 struct regmap *rm; 318 int rval; 319 320 if (!config->dev) 321 return ERR_PTR(-EINVAL); 322 323 rm = dev_get_regmap(config->dev, NULL); 324 if (!rm) { 325 dev_err(config->dev, "Regmap not found\n"); 326 return ERR_PTR(-EINVAL); 327 } 328 329 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 330 if (!nvmem) 331 return ERR_PTR(-ENOMEM); 332 333 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 334 if (rval < 0) { 335 kfree(nvmem); 336 return ERR_PTR(rval); 337 } 338 339 nvmem->id = rval; 340 nvmem->regmap = rm; 341 nvmem->owner = config->owner; 342 nvmem->stride = regmap_get_reg_stride(rm); 343 nvmem->word_size = regmap_get_val_bytes(rm); 344 nvmem->size = regmap_get_max_register(rm) + nvmem->stride; 345 nvmem->dev.type = &nvmem_provider_type; 346 nvmem->dev.bus = &nvmem_bus_type; 347 nvmem->dev.parent = config->dev; 348 np = config->dev->of_node; 349 nvmem->dev.of_node = np; 350 dev_set_name(&nvmem->dev, "%s%d", 351 config->name ? : "nvmem", config->id); 352 353 nvmem->read_only = of_property_read_bool(np, "read-only") | 354 config->read_only; 355 356 nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups : 357 nvmem_rw_dev_groups; 358 359 device_initialize(&nvmem->dev); 360 361 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 362 363 rval = device_add(&nvmem->dev); 364 if (rval) { 365 ida_simple_remove(&nvmem_ida, nvmem->id); 366 kfree(nvmem); 367 return ERR_PTR(rval); 368 } 369 370 if (config->cells) 371 nvmem_add_cells(nvmem, config); 372 373 return nvmem; 374 } 375 EXPORT_SYMBOL_GPL(nvmem_register); 376 377 /** 378 * nvmem_unregister() - Unregister previously registered nvmem device 379 * 380 * @nvmem: Pointer to previously registered nvmem device. 381 * 382 * Return: Will be an negative on error or a zero on success. 383 */ 384 int nvmem_unregister(struct nvmem_device *nvmem) 385 { 386 mutex_lock(&nvmem_mutex); 387 if (nvmem->users) { 388 mutex_unlock(&nvmem_mutex); 389 return -EBUSY; 390 } 391 mutex_unlock(&nvmem_mutex); 392 393 nvmem_device_remove_all_cells(nvmem); 394 device_del(&nvmem->dev); 395 396 return 0; 397 } 398 EXPORT_SYMBOL_GPL(nvmem_unregister); 399 400 static struct nvmem_device *__nvmem_device_get(struct device_node *np, 401 struct nvmem_cell **cellp, 402 const char *cell_id) 403 { 404 struct nvmem_device *nvmem = NULL; 405 406 mutex_lock(&nvmem_mutex); 407 408 if (np) { 409 nvmem = of_nvmem_find(np); 410 if (!nvmem) { 411 mutex_unlock(&nvmem_mutex); 412 return ERR_PTR(-EPROBE_DEFER); 413 } 414 } else { 415 struct nvmem_cell *cell = nvmem_find_cell(cell_id); 416 417 if (cell) { 418 nvmem = cell->nvmem; 419 *cellp = cell; 420 } 421 422 if (!nvmem) { 423 mutex_unlock(&nvmem_mutex); 424 return ERR_PTR(-ENOENT); 425 } 426 } 427 428 nvmem->users++; 429 mutex_unlock(&nvmem_mutex); 430 431 if (!try_module_get(nvmem->owner)) { 432 dev_err(&nvmem->dev, 433 "could not increase module refcount for cell %s\n", 434 nvmem->name); 435 436 mutex_lock(&nvmem_mutex); 437 nvmem->users--; 438 mutex_unlock(&nvmem_mutex); 439 440 return ERR_PTR(-EINVAL); 441 } 442 443 return nvmem; 444 } 445 446 static void __nvmem_device_put(struct nvmem_device *nvmem) 447 { 448 module_put(nvmem->owner); 449 mutex_lock(&nvmem_mutex); 450 nvmem->users--; 451 mutex_unlock(&nvmem_mutex); 452 } 453 454 static int nvmem_match(struct device *dev, void *data) 455 { 456 return !strcmp(dev_name(dev), data); 457 } 458 459 static struct nvmem_device *nvmem_find(const char *name) 460 { 461 struct device *d; 462 463 d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match); 464 465 if (!d) 466 return NULL; 467 468 return to_nvmem_device(d); 469 } 470 471 #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) 472 /** 473 * of_nvmem_device_get() - Get nvmem device from a given id 474 * 475 * @dev node: Device tree node that uses the nvmem device 476 * @id: nvmem name from nvmem-names property. 477 * 478 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 479 * on success. 480 */ 481 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 482 { 483 484 struct device_node *nvmem_np; 485 int index; 486 487 index = of_property_match_string(np, "nvmem-names", id); 488 489 nvmem_np = of_parse_phandle(np, "nvmem", index); 490 if (!nvmem_np) 491 return ERR_PTR(-EINVAL); 492 493 return __nvmem_device_get(nvmem_np, NULL, NULL); 494 } 495 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 496 #endif 497 498 /** 499 * nvmem_device_get() - Get nvmem device from a given id 500 * 501 * @dev : Device that uses the nvmem device 502 * @id: nvmem name from nvmem-names property. 503 * 504 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 505 * on success. 506 */ 507 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 508 { 509 if (dev->of_node) { /* try dt first */ 510 struct nvmem_device *nvmem; 511 512 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 513 514 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 515 return nvmem; 516 517 } 518 519 return nvmem_find(dev_name); 520 } 521 EXPORT_SYMBOL_GPL(nvmem_device_get); 522 523 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 524 { 525 struct nvmem_device **nvmem = res; 526 527 if (WARN_ON(!nvmem || !*nvmem)) 528 return 0; 529 530 return *nvmem == data; 531 } 532 533 static void devm_nvmem_device_release(struct device *dev, void *res) 534 { 535 nvmem_device_put(*(struct nvmem_device **)res); 536 } 537 538 /** 539 * devm_nvmem_device_put() - put alredy got nvmem device 540 * 541 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 542 * that needs to be released. 543 */ 544 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 545 { 546 int ret; 547 548 ret = devres_release(dev, devm_nvmem_device_release, 549 devm_nvmem_device_match, nvmem); 550 551 WARN_ON(ret); 552 } 553 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 554 555 /** 556 * nvmem_device_put() - put alredy got nvmem device 557 * 558 * @nvmem: pointer to nvmem device that needs to be released. 559 */ 560 void nvmem_device_put(struct nvmem_device *nvmem) 561 { 562 __nvmem_device_put(nvmem); 563 } 564 EXPORT_SYMBOL_GPL(nvmem_device_put); 565 566 /** 567 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 568 * 569 * @dev node: Device tree node that uses the nvmem cell 570 * @id: nvmem name in nvmems property. 571 * 572 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 573 * on success. The nvmem_cell will be freed by the automatically once the 574 * device is freed. 575 */ 576 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 577 { 578 struct nvmem_device **ptr, *nvmem; 579 580 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 581 if (!ptr) 582 return ERR_PTR(-ENOMEM); 583 584 nvmem = nvmem_device_get(dev, id); 585 if (!IS_ERR(nvmem)) { 586 *ptr = nvmem; 587 devres_add(dev, ptr); 588 } else { 589 devres_free(ptr); 590 } 591 592 return nvmem; 593 } 594 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 595 596 static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) 597 { 598 struct nvmem_cell *cell = NULL; 599 struct nvmem_device *nvmem; 600 601 nvmem = __nvmem_device_get(NULL, &cell, cell_id); 602 if (IS_ERR(nvmem)) 603 return ERR_CAST(nvmem); 604 605 return cell; 606 } 607 608 #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) 609 /** 610 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 611 * 612 * @dev node: Device tree node that uses the nvmem cell 613 * @id: nvmem cell name from nvmem-cell-names property. 614 * 615 * Return: Will be an ERR_PTR() on error or a valid pointer 616 * to a struct nvmem_cell. The nvmem_cell will be freed by the 617 * nvmem_cell_put(). 618 */ 619 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 620 const char *name) 621 { 622 struct device_node *cell_np, *nvmem_np; 623 struct nvmem_cell *cell; 624 struct nvmem_device *nvmem; 625 const __be32 *addr; 626 int rval, len, index; 627 628 index = of_property_match_string(np, "nvmem-cell-names", name); 629 630 cell_np = of_parse_phandle(np, "nvmem-cells", index); 631 if (!cell_np) 632 return ERR_PTR(-EINVAL); 633 634 nvmem_np = of_get_next_parent(cell_np); 635 if (!nvmem_np) 636 return ERR_PTR(-EINVAL); 637 638 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); 639 if (IS_ERR(nvmem)) 640 return ERR_CAST(nvmem); 641 642 addr = of_get_property(cell_np, "reg", &len); 643 if (!addr || (len < 2 * sizeof(u32))) { 644 dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n", 645 cell_np->full_name); 646 rval = -EINVAL; 647 goto err_mem; 648 } 649 650 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 651 if (!cell) { 652 rval = -ENOMEM; 653 goto err_mem; 654 } 655 656 cell->nvmem = nvmem; 657 cell->offset = be32_to_cpup(addr++); 658 cell->bytes = be32_to_cpup(addr); 659 cell->name = cell_np->name; 660 661 addr = of_get_property(cell_np, "bits", &len); 662 if (addr && len == (2 * sizeof(u32))) { 663 cell->bit_offset = be32_to_cpup(addr++); 664 cell->nbits = be32_to_cpup(addr); 665 } 666 667 if (cell->nbits) 668 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 669 BITS_PER_BYTE); 670 671 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 672 dev_err(&nvmem->dev, 673 "cell %s unaligned to nvmem stride %d\n", 674 cell->name, nvmem->stride); 675 rval = -EINVAL; 676 goto err_sanity; 677 } 678 679 nvmem_cell_add(cell); 680 681 return cell; 682 683 err_sanity: 684 kfree(cell); 685 686 err_mem: 687 __nvmem_device_put(nvmem); 688 689 return ERR_PTR(rval); 690 } 691 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 692 #endif 693 694 /** 695 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 696 * 697 * @dev node: Device tree node that uses the nvmem cell 698 * @id: nvmem cell name to get. 699 * 700 * Return: Will be an ERR_PTR() on error or a valid pointer 701 * to a struct nvmem_cell. The nvmem_cell will be freed by the 702 * nvmem_cell_put(). 703 */ 704 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) 705 { 706 struct nvmem_cell *cell; 707 708 if (dev->of_node) { /* try dt first */ 709 cell = of_nvmem_cell_get(dev->of_node, cell_id); 710 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 711 return cell; 712 } 713 714 return nvmem_cell_get_from_list(cell_id); 715 } 716 EXPORT_SYMBOL_GPL(nvmem_cell_get); 717 718 static void devm_nvmem_cell_release(struct device *dev, void *res) 719 { 720 nvmem_cell_put(*(struct nvmem_cell **)res); 721 } 722 723 /** 724 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 725 * 726 * @dev node: Device tree node that uses the nvmem cell 727 * @id: nvmem id in nvmem-names property. 728 * 729 * Return: Will be an ERR_PTR() on error or a valid pointer 730 * to a struct nvmem_cell. The nvmem_cell will be freed by the 731 * automatically once the device is freed. 732 */ 733 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 734 { 735 struct nvmem_cell **ptr, *cell; 736 737 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 738 if (!ptr) 739 return ERR_PTR(-ENOMEM); 740 741 cell = nvmem_cell_get(dev, id); 742 if (!IS_ERR(cell)) { 743 *ptr = cell; 744 devres_add(dev, ptr); 745 } else { 746 devres_free(ptr); 747 } 748 749 return cell; 750 } 751 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 752 753 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 754 { 755 struct nvmem_cell **c = res; 756 757 if (WARN_ON(!c || !*c)) 758 return 0; 759 760 return *c == data; 761 } 762 763 /** 764 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 765 * from devm_nvmem_cell_get. 766 * 767 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get() 768 */ 769 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 770 { 771 int ret; 772 773 ret = devres_release(dev, devm_nvmem_cell_release, 774 devm_nvmem_cell_match, cell); 775 776 WARN_ON(ret); 777 } 778 EXPORT_SYMBOL(devm_nvmem_cell_put); 779 780 /** 781 * nvmem_cell_put() - Release previously allocated nvmem cell. 782 * 783 * @cell: Previously allocated nvmem cell by nvmem_cell_get() 784 */ 785 void nvmem_cell_put(struct nvmem_cell *cell) 786 { 787 struct nvmem_device *nvmem = cell->nvmem; 788 789 __nvmem_device_put(nvmem); 790 nvmem_cell_drop(cell); 791 } 792 EXPORT_SYMBOL_GPL(nvmem_cell_put); 793 794 static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, 795 void *buf) 796 { 797 u8 *p, *b; 798 int i, bit_offset = cell->bit_offset; 799 800 p = b = buf; 801 if (bit_offset) { 802 /* First shift */ 803 *b++ >>= bit_offset; 804 805 /* setup rest of the bytes if any */ 806 for (i = 1; i < cell->bytes; i++) { 807 /* Get bits from next byte and shift them towards msb */ 808 *p |= *b << (BITS_PER_BYTE - bit_offset); 809 810 p = b; 811 *b++ >>= bit_offset; 812 } 813 814 /* result fits in less bytes */ 815 if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) 816 *p-- = 0; 817 } 818 /* clear msb bits if any leftover in the last byte */ 819 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 820 } 821 822 static int __nvmem_cell_read(struct nvmem_device *nvmem, 823 struct nvmem_cell *cell, 824 void *buf, size_t *len) 825 { 826 int rc; 827 828 rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes); 829 830 if (IS_ERR_VALUE(rc)) 831 return rc; 832 833 /* shift bits in-place */ 834 if (cell->bit_offset || cell->nbits) 835 nvmem_shift_read_buffer_in_place(cell, buf); 836 837 *len = cell->bytes; 838 839 return 0; 840 } 841 842 /** 843 * nvmem_cell_read() - Read a given nvmem cell 844 * 845 * @cell: nvmem cell to be read. 846 * @len: pointer to length of cell which will be populated on successful read. 847 * 848 * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success. 849 * The buffer should be freed by the consumer with a kfree(). 850 */ 851 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 852 { 853 struct nvmem_device *nvmem = cell->nvmem; 854 u8 *buf; 855 int rc; 856 857 if (!nvmem || !nvmem->regmap) 858 return ERR_PTR(-EINVAL); 859 860 buf = kzalloc(cell->bytes, GFP_KERNEL); 861 if (!buf) 862 return ERR_PTR(-ENOMEM); 863 864 rc = __nvmem_cell_read(nvmem, cell, buf, len); 865 if (IS_ERR_VALUE(rc)) { 866 kfree(buf); 867 return ERR_PTR(rc); 868 } 869 870 return buf; 871 } 872 EXPORT_SYMBOL_GPL(nvmem_cell_read); 873 874 static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 875 u8 *_buf, int len) 876 { 877 struct nvmem_device *nvmem = cell->nvmem; 878 int i, rc, nbits, bit_offset = cell->bit_offset; 879 u8 v, *p, *buf, *b, pbyte, pbits; 880 881 nbits = cell->nbits; 882 buf = kzalloc(cell->bytes, GFP_KERNEL); 883 if (!buf) 884 return ERR_PTR(-ENOMEM); 885 886 memcpy(buf, _buf, len); 887 p = b = buf; 888 889 if (bit_offset) { 890 pbyte = *b; 891 *b <<= bit_offset; 892 893 /* setup the first byte with lsb bits from nvmem */ 894 rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1); 895 *b++ |= GENMASK(bit_offset - 1, 0) & v; 896 897 /* setup rest of the byte if any */ 898 for (i = 1; i < cell->bytes; i++) { 899 /* Get last byte bits and shift them towards lsb */ 900 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 901 pbyte = *b; 902 p = b; 903 *b <<= bit_offset; 904 *b++ |= pbits; 905 } 906 } 907 908 /* if it's not end on byte boundary */ 909 if ((nbits + bit_offset) % BITS_PER_BYTE) { 910 /* setup the last byte with msb bits from nvmem */ 911 rc = regmap_raw_read(nvmem->regmap, 912 cell->offset + cell->bytes - 1, &v, 1); 913 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 914 915 } 916 917 return buf; 918 } 919 920 /** 921 * nvmem_cell_write() - Write to a given nvmem cell 922 * 923 * @cell: nvmem cell to be written. 924 * @buf: Buffer to be written. 925 * @len: length of buffer to be written to nvmem cell. 926 * 927 * Return: length of bytes written or negative on failure. 928 */ 929 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 930 { 931 struct nvmem_device *nvmem = cell->nvmem; 932 int rc; 933 934 if (!nvmem || !nvmem->regmap || nvmem->read_only || 935 (cell->bit_offset == 0 && len != cell->bytes)) 936 return -EINVAL; 937 938 if (cell->bit_offset || cell->nbits) { 939 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 940 if (IS_ERR(buf)) 941 return PTR_ERR(buf); 942 } 943 944 rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); 945 946 /* free the tmp buffer */ 947 if (cell->bit_offset || cell->nbits) 948 kfree(buf); 949 950 if (IS_ERR_VALUE(rc)) 951 return rc; 952 953 return len; 954 } 955 EXPORT_SYMBOL_GPL(nvmem_cell_write); 956 957 /** 958 * nvmem_device_cell_read() - Read a given nvmem device and cell 959 * 960 * @nvmem: nvmem device to read from. 961 * @info: nvmem cell info to be read. 962 * @buf: buffer pointer which will be populated on successful read. 963 * 964 * Return: length of successful bytes read on success and negative 965 * error code on error. 966 */ 967 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 968 struct nvmem_cell_info *info, void *buf) 969 { 970 struct nvmem_cell cell; 971 int rc; 972 ssize_t len; 973 974 if (!nvmem || !nvmem->regmap) 975 return -EINVAL; 976 977 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 978 if (IS_ERR_VALUE(rc)) 979 return rc; 980 981 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 982 if (IS_ERR_VALUE(rc)) 983 return rc; 984 985 return len; 986 } 987 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 988 989 /** 990 * nvmem_device_cell_write() - Write cell to a given nvmem device 991 * 992 * @nvmem: nvmem device to be written to. 993 * @info: nvmem cell info to be written 994 * @buf: buffer to be written to cell. 995 * 996 * Return: length of bytes written or negative error code on failure. 997 * */ 998 int nvmem_device_cell_write(struct nvmem_device *nvmem, 999 struct nvmem_cell_info *info, void *buf) 1000 { 1001 struct nvmem_cell cell; 1002 int rc; 1003 1004 if (!nvmem || !nvmem->regmap) 1005 return -EINVAL; 1006 1007 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1008 if (IS_ERR_VALUE(rc)) 1009 return rc; 1010 1011 return nvmem_cell_write(&cell, buf, cell.bytes); 1012 } 1013 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1014 1015 /** 1016 * nvmem_device_read() - Read from a given nvmem device 1017 * 1018 * @nvmem: nvmem device to read from. 1019 * @offset: offset in nvmem device. 1020 * @bytes: number of bytes to read. 1021 * @buf: buffer pointer which will be populated on successful read. 1022 * 1023 * Return: length of successful bytes read on success and negative 1024 * error code on error. 1025 */ 1026 int nvmem_device_read(struct nvmem_device *nvmem, 1027 unsigned int offset, 1028 size_t bytes, void *buf) 1029 { 1030 int rc; 1031 1032 if (!nvmem || !nvmem->regmap) 1033 return -EINVAL; 1034 1035 rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes); 1036 1037 if (IS_ERR_VALUE(rc)) 1038 return rc; 1039 1040 return bytes; 1041 } 1042 EXPORT_SYMBOL_GPL(nvmem_device_read); 1043 1044 /** 1045 * nvmem_device_write() - Write cell to a given nvmem device 1046 * 1047 * @nvmem: nvmem device to be written to. 1048 * @offset: offset in nvmem device. 1049 * @bytes: number of bytes to write. 1050 * @buf: buffer to be written. 1051 * 1052 * Return: length of bytes written or negative error code on failure. 1053 * */ 1054 int nvmem_device_write(struct nvmem_device *nvmem, 1055 unsigned int offset, 1056 size_t bytes, void *buf) 1057 { 1058 int rc; 1059 1060 if (!nvmem || !nvmem->regmap) 1061 return -EINVAL; 1062 1063 rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes); 1064 1065 if (IS_ERR_VALUE(rc)) 1066 return rc; 1067 1068 1069 return bytes; 1070 } 1071 EXPORT_SYMBOL_GPL(nvmem_device_write); 1072 1073 static int __init nvmem_init(void) 1074 { 1075 return bus_register(&nvmem_bus_type); 1076 } 1077 1078 static void __exit nvmem_exit(void) 1079 { 1080 bus_unregister(&nvmem_bus_type); 1081 } 1082 1083 subsys_initcall(nvmem_init); 1084 module_exit(nvmem_exit); 1085 1086 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1087 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1088 MODULE_DESCRIPTION("nvmem Driver Core"); 1089 MODULE_LICENSE("GPL v2"); 1090