1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/of.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 21 #define CREATE_TRACE_POINTS 22 #include "trace.h" 23 24 #include "internal.h" 25 26 /* 27 * Sometimes for failures during very early init the trace 28 * infrastructure isn't available early enough to be used. For this 29 * sort of problem defining LOG_DEVICE will add printks for basic 30 * register I/O on a specific device. 31 */ 32 #undef LOG_DEVICE 33 34 #ifdef LOG_DEVICE 35 static inline bool regmap_should_log(struct regmap *map) 36 { 37 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 38 } 39 #else 40 static inline bool regmap_should_log(struct regmap *map) { return false; } 41 #endif 42 43 44 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 45 unsigned int mask, unsigned int val, 46 bool *change, bool force_write); 47 48 static int _regmap_bus_reg_read(void *context, unsigned int reg, 49 unsigned int *val); 50 static int _regmap_bus_read(void *context, unsigned int reg, 51 unsigned int *val); 52 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 53 unsigned int val); 54 static int _regmap_bus_reg_write(void *context, unsigned int reg, 55 unsigned int val); 56 static int _regmap_bus_raw_write(void *context, unsigned int reg, 57 unsigned int val); 58 59 bool regmap_reg_in_ranges(unsigned int reg, 60 const struct regmap_range *ranges, 61 unsigned int nranges) 62 { 63 const struct regmap_range *r; 64 int i; 65 66 for (i = 0, r = ranges; i < nranges; i++, r++) 67 if (regmap_reg_in_range(reg, r)) 68 return true; 69 return false; 70 } 71 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 72 73 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 74 const struct regmap_access_table *table) 75 { 76 /* Check "no ranges" first */ 77 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 78 return false; 79 80 /* In case zero "yes ranges" are supplied, any reg is OK */ 81 if (!table->n_yes_ranges) 82 return true; 83 84 return regmap_reg_in_ranges(reg, table->yes_ranges, 85 table->n_yes_ranges); 86 } 87 EXPORT_SYMBOL_GPL(regmap_check_range_table); 88 89 bool regmap_writeable(struct regmap *map, unsigned int reg) 90 { 91 if (map->max_register && reg > map->max_register) 92 return false; 93 94 if (map->writeable_reg) 95 return map->writeable_reg(map->dev, reg); 96 97 if (map->wr_table) 98 return regmap_check_range_table(map, reg, map->wr_table); 99 100 return true; 101 } 102 103 bool regmap_cached(struct regmap *map, unsigned int reg) 104 { 105 int ret; 106 unsigned int val; 107 108 if (map->cache_type == REGCACHE_NONE) 109 return false; 110 111 if (!map->cache_ops) 112 return false; 113 114 if (map->max_register && reg > map->max_register) 115 return false; 116 117 map->lock(map->lock_arg); 118 ret = regcache_read(map, reg, &val); 119 map->unlock(map->lock_arg); 120 if (ret) 121 return false; 122 123 return true; 124 } 125 126 bool regmap_readable(struct regmap *map, unsigned int reg) 127 { 128 if (!map->reg_read) 129 return false; 130 131 if (map->max_register && reg > map->max_register) 132 return false; 133 134 if (map->format.format_write) 135 return false; 136 137 if (map->readable_reg) 138 return map->readable_reg(map->dev, reg); 139 140 if (map->rd_table) 141 return regmap_check_range_table(map, reg, map->rd_table); 142 143 return true; 144 } 145 146 bool regmap_volatile(struct regmap *map, unsigned int reg) 147 { 148 if (!map->format.format_write && !regmap_readable(map, reg)) 149 return false; 150 151 if (map->volatile_reg) 152 return map->volatile_reg(map->dev, reg); 153 154 if (map->volatile_table) 155 return regmap_check_range_table(map, reg, map->volatile_table); 156 157 if (map->cache_ops) 158 return false; 159 else 160 return true; 161 } 162 163 bool regmap_precious(struct regmap *map, unsigned int reg) 164 { 165 if (!regmap_readable(map, reg)) 166 return false; 167 168 if (map->precious_reg) 169 return map->precious_reg(map->dev, reg); 170 171 if (map->precious_table) 172 return regmap_check_range_table(map, reg, map->precious_table); 173 174 return false; 175 } 176 177 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 178 { 179 if (map->writeable_noinc_reg) 180 return map->writeable_noinc_reg(map->dev, reg); 181 182 if (map->wr_noinc_table) 183 return regmap_check_range_table(map, reg, map->wr_noinc_table); 184 185 return true; 186 } 187 188 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 189 { 190 if (map->readable_noinc_reg) 191 return map->readable_noinc_reg(map->dev, reg); 192 193 if (map->rd_noinc_table) 194 return regmap_check_range_table(map, reg, map->rd_noinc_table); 195 196 return true; 197 } 198 199 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 200 size_t num) 201 { 202 unsigned int i; 203 204 for (i = 0; i < num; i++) 205 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 206 return false; 207 208 return true; 209 } 210 211 static void regmap_format_2_6_write(struct regmap *map, 212 unsigned int reg, unsigned int val) 213 { 214 u8 *out = map->work_buf; 215 216 *out = (reg << 6) | val; 217 } 218 219 static void regmap_format_4_12_write(struct regmap *map, 220 unsigned int reg, unsigned int val) 221 { 222 __be16 *out = map->work_buf; 223 *out = cpu_to_be16((reg << 12) | val); 224 } 225 226 static void regmap_format_7_9_write(struct regmap *map, 227 unsigned int reg, unsigned int val) 228 { 229 __be16 *out = map->work_buf; 230 *out = cpu_to_be16((reg << 9) | val); 231 } 232 233 static void regmap_format_10_14_write(struct regmap *map, 234 unsigned int reg, unsigned int val) 235 { 236 u8 *out = map->work_buf; 237 238 out[2] = val; 239 out[1] = (val >> 8) | (reg << 6); 240 out[0] = reg >> 2; 241 } 242 243 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 244 { 245 u8 *b = buf; 246 247 b[0] = val << shift; 248 } 249 250 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 251 { 252 __be16 *b = buf; 253 254 b[0] = cpu_to_be16(val << shift); 255 } 256 257 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 258 { 259 __le16 *b = buf; 260 261 b[0] = cpu_to_le16(val << shift); 262 } 263 264 static void regmap_format_16_native(void *buf, unsigned int val, 265 unsigned int shift) 266 { 267 *(u16 *)buf = val << shift; 268 } 269 270 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 271 { 272 u8 *b = buf; 273 274 val <<= shift; 275 276 b[0] = val >> 16; 277 b[1] = val >> 8; 278 b[2] = val; 279 } 280 281 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 282 { 283 __be32 *b = buf; 284 285 b[0] = cpu_to_be32(val << shift); 286 } 287 288 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 289 { 290 __le32 *b = buf; 291 292 b[0] = cpu_to_le32(val << shift); 293 } 294 295 static void regmap_format_32_native(void *buf, unsigned int val, 296 unsigned int shift) 297 { 298 *(u32 *)buf = val << shift; 299 } 300 301 #ifdef CONFIG_64BIT 302 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 303 { 304 __be64 *b = buf; 305 306 b[0] = cpu_to_be64((u64)val << shift); 307 } 308 309 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 310 { 311 __le64 *b = buf; 312 313 b[0] = cpu_to_le64((u64)val << shift); 314 } 315 316 static void regmap_format_64_native(void *buf, unsigned int val, 317 unsigned int shift) 318 { 319 *(u64 *)buf = (u64)val << shift; 320 } 321 #endif 322 323 static void regmap_parse_inplace_noop(void *buf) 324 { 325 } 326 327 static unsigned int regmap_parse_8(const void *buf) 328 { 329 const u8 *b = buf; 330 331 return b[0]; 332 } 333 334 static unsigned int regmap_parse_16_be(const void *buf) 335 { 336 const __be16 *b = buf; 337 338 return be16_to_cpu(b[0]); 339 } 340 341 static unsigned int regmap_parse_16_le(const void *buf) 342 { 343 const __le16 *b = buf; 344 345 return le16_to_cpu(b[0]); 346 } 347 348 static void regmap_parse_16_be_inplace(void *buf) 349 { 350 __be16 *b = buf; 351 352 b[0] = be16_to_cpu(b[0]); 353 } 354 355 static void regmap_parse_16_le_inplace(void *buf) 356 { 357 __le16 *b = buf; 358 359 b[0] = le16_to_cpu(b[0]); 360 } 361 362 static unsigned int regmap_parse_16_native(const void *buf) 363 { 364 return *(u16 *)buf; 365 } 366 367 static unsigned int regmap_parse_24(const void *buf) 368 { 369 const u8 *b = buf; 370 unsigned int ret = b[2]; 371 ret |= ((unsigned int)b[1]) << 8; 372 ret |= ((unsigned int)b[0]) << 16; 373 374 return ret; 375 } 376 377 static unsigned int regmap_parse_32_be(const void *buf) 378 { 379 const __be32 *b = buf; 380 381 return be32_to_cpu(b[0]); 382 } 383 384 static unsigned int regmap_parse_32_le(const void *buf) 385 { 386 const __le32 *b = buf; 387 388 return le32_to_cpu(b[0]); 389 } 390 391 static void regmap_parse_32_be_inplace(void *buf) 392 { 393 __be32 *b = buf; 394 395 b[0] = be32_to_cpu(b[0]); 396 } 397 398 static void regmap_parse_32_le_inplace(void *buf) 399 { 400 __le32 *b = buf; 401 402 b[0] = le32_to_cpu(b[0]); 403 } 404 405 static unsigned int regmap_parse_32_native(const void *buf) 406 { 407 return *(u32 *)buf; 408 } 409 410 #ifdef CONFIG_64BIT 411 static unsigned int regmap_parse_64_be(const void *buf) 412 { 413 const __be64 *b = buf; 414 415 return be64_to_cpu(b[0]); 416 } 417 418 static unsigned int regmap_parse_64_le(const void *buf) 419 { 420 const __le64 *b = buf; 421 422 return le64_to_cpu(b[0]); 423 } 424 425 static void regmap_parse_64_be_inplace(void *buf) 426 { 427 __be64 *b = buf; 428 429 b[0] = be64_to_cpu(b[0]); 430 } 431 432 static void regmap_parse_64_le_inplace(void *buf) 433 { 434 __le64 *b = buf; 435 436 b[0] = le64_to_cpu(b[0]); 437 } 438 439 static unsigned int regmap_parse_64_native(const void *buf) 440 { 441 return *(u64 *)buf; 442 } 443 #endif 444 445 static void regmap_lock_hwlock(void *__map) 446 { 447 struct regmap *map = __map; 448 449 hwspin_lock_timeout(map->hwlock, UINT_MAX); 450 } 451 452 static void regmap_lock_hwlock_irq(void *__map) 453 { 454 struct regmap *map = __map; 455 456 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 457 } 458 459 static void regmap_lock_hwlock_irqsave(void *__map) 460 { 461 struct regmap *map = __map; 462 463 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 464 &map->spinlock_flags); 465 } 466 467 static void regmap_unlock_hwlock(void *__map) 468 { 469 struct regmap *map = __map; 470 471 hwspin_unlock(map->hwlock); 472 } 473 474 static void regmap_unlock_hwlock_irq(void *__map) 475 { 476 struct regmap *map = __map; 477 478 hwspin_unlock_irq(map->hwlock); 479 } 480 481 static void regmap_unlock_hwlock_irqrestore(void *__map) 482 { 483 struct regmap *map = __map; 484 485 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 486 } 487 488 static void regmap_lock_unlock_none(void *__map) 489 { 490 491 } 492 493 static void regmap_lock_mutex(void *__map) 494 { 495 struct regmap *map = __map; 496 mutex_lock(&map->mutex); 497 } 498 499 static void regmap_unlock_mutex(void *__map) 500 { 501 struct regmap *map = __map; 502 mutex_unlock(&map->mutex); 503 } 504 505 static void regmap_lock_spinlock(void *__map) 506 __acquires(&map->spinlock) 507 { 508 struct regmap *map = __map; 509 unsigned long flags; 510 511 spin_lock_irqsave(&map->spinlock, flags); 512 map->spinlock_flags = flags; 513 } 514 515 static void regmap_unlock_spinlock(void *__map) 516 __releases(&map->spinlock) 517 { 518 struct regmap *map = __map; 519 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 520 } 521 522 static void dev_get_regmap_release(struct device *dev, void *res) 523 { 524 /* 525 * We don't actually have anything to do here; the goal here 526 * is not to manage the regmap but to provide a simple way to 527 * get the regmap back given a struct device. 528 */ 529 } 530 531 static bool _regmap_range_add(struct regmap *map, 532 struct regmap_range_node *data) 533 { 534 struct rb_root *root = &map->range_tree; 535 struct rb_node **new = &(root->rb_node), *parent = NULL; 536 537 while (*new) { 538 struct regmap_range_node *this = 539 rb_entry(*new, struct regmap_range_node, node); 540 541 parent = *new; 542 if (data->range_max < this->range_min) 543 new = &((*new)->rb_left); 544 else if (data->range_min > this->range_max) 545 new = &((*new)->rb_right); 546 else 547 return false; 548 } 549 550 rb_link_node(&data->node, parent, new); 551 rb_insert_color(&data->node, root); 552 553 return true; 554 } 555 556 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 557 unsigned int reg) 558 { 559 struct rb_node *node = map->range_tree.rb_node; 560 561 while (node) { 562 struct regmap_range_node *this = 563 rb_entry(node, struct regmap_range_node, node); 564 565 if (reg < this->range_min) 566 node = node->rb_left; 567 else if (reg > this->range_max) 568 node = node->rb_right; 569 else 570 return this; 571 } 572 573 return NULL; 574 } 575 576 static void regmap_range_exit(struct regmap *map) 577 { 578 struct rb_node *next; 579 struct regmap_range_node *range_node; 580 581 next = rb_first(&map->range_tree); 582 while (next) { 583 range_node = rb_entry(next, struct regmap_range_node, node); 584 next = rb_next(&range_node->node); 585 rb_erase(&range_node->node, &map->range_tree); 586 kfree(range_node); 587 } 588 589 kfree(map->selector_work_buf); 590 } 591 592 int regmap_attach_dev(struct device *dev, struct regmap *map, 593 const struct regmap_config *config) 594 { 595 struct regmap **m; 596 597 map->dev = dev; 598 599 regmap_debugfs_init(map, config->name); 600 601 /* Add a devres resource for dev_get_regmap() */ 602 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 603 if (!m) { 604 regmap_debugfs_exit(map); 605 return -ENOMEM; 606 } 607 *m = map; 608 devres_add(dev, m); 609 610 return 0; 611 } 612 EXPORT_SYMBOL_GPL(regmap_attach_dev); 613 614 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 615 const struct regmap_config *config) 616 { 617 enum regmap_endian endian; 618 619 /* Retrieve the endianness specification from the regmap config */ 620 endian = config->reg_format_endian; 621 622 /* If the regmap config specified a non-default value, use that */ 623 if (endian != REGMAP_ENDIAN_DEFAULT) 624 return endian; 625 626 /* Retrieve the endianness specification from the bus config */ 627 if (bus && bus->reg_format_endian_default) 628 endian = bus->reg_format_endian_default; 629 630 /* If the bus specified a non-default value, use that */ 631 if (endian != REGMAP_ENDIAN_DEFAULT) 632 return endian; 633 634 /* Use this if no other value was found */ 635 return REGMAP_ENDIAN_BIG; 636 } 637 638 enum regmap_endian regmap_get_val_endian(struct device *dev, 639 const struct regmap_bus *bus, 640 const struct regmap_config *config) 641 { 642 struct device_node *np; 643 enum regmap_endian endian; 644 645 /* Retrieve the endianness specification from the regmap config */ 646 endian = config->val_format_endian; 647 648 /* If the regmap config specified a non-default value, use that */ 649 if (endian != REGMAP_ENDIAN_DEFAULT) 650 return endian; 651 652 /* If the dev and dev->of_node exist try to get endianness from DT */ 653 if (dev && dev->of_node) { 654 np = dev->of_node; 655 656 /* Parse the device's DT node for an endianness specification */ 657 if (of_property_read_bool(np, "big-endian")) 658 endian = REGMAP_ENDIAN_BIG; 659 else if (of_property_read_bool(np, "little-endian")) 660 endian = REGMAP_ENDIAN_LITTLE; 661 else if (of_property_read_bool(np, "native-endian")) 662 endian = REGMAP_ENDIAN_NATIVE; 663 664 /* If the endianness was specified in DT, use that */ 665 if (endian != REGMAP_ENDIAN_DEFAULT) 666 return endian; 667 } 668 669 /* Retrieve the endianness specification from the bus config */ 670 if (bus && bus->val_format_endian_default) 671 endian = bus->val_format_endian_default; 672 673 /* If the bus specified a non-default value, use that */ 674 if (endian != REGMAP_ENDIAN_DEFAULT) 675 return endian; 676 677 /* Use this if no other value was found */ 678 return REGMAP_ENDIAN_BIG; 679 } 680 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 681 682 struct regmap *__regmap_init(struct device *dev, 683 const struct regmap_bus *bus, 684 void *bus_context, 685 const struct regmap_config *config, 686 struct lock_class_key *lock_key, 687 const char *lock_name) 688 { 689 struct regmap *map; 690 int ret = -EINVAL; 691 enum regmap_endian reg_endian, val_endian; 692 int i, j; 693 694 if (!config) 695 goto err; 696 697 map = kzalloc(sizeof(*map), GFP_KERNEL); 698 if (map == NULL) { 699 ret = -ENOMEM; 700 goto err; 701 } 702 703 if (config->name) { 704 map->name = kstrdup_const(config->name, GFP_KERNEL); 705 if (!map->name) { 706 ret = -ENOMEM; 707 goto err_map; 708 } 709 } 710 711 if (config->disable_locking) { 712 map->lock = map->unlock = regmap_lock_unlock_none; 713 regmap_debugfs_disable(map); 714 } else if (config->lock && config->unlock) { 715 map->lock = config->lock; 716 map->unlock = config->unlock; 717 map->lock_arg = config->lock_arg; 718 } else if (config->use_hwlock) { 719 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 720 if (!map->hwlock) { 721 ret = -ENXIO; 722 goto err_name; 723 } 724 725 switch (config->hwlock_mode) { 726 case HWLOCK_IRQSTATE: 727 map->lock = regmap_lock_hwlock_irqsave; 728 map->unlock = regmap_unlock_hwlock_irqrestore; 729 break; 730 case HWLOCK_IRQ: 731 map->lock = regmap_lock_hwlock_irq; 732 map->unlock = regmap_unlock_hwlock_irq; 733 break; 734 default: 735 map->lock = regmap_lock_hwlock; 736 map->unlock = regmap_unlock_hwlock; 737 break; 738 } 739 740 map->lock_arg = map; 741 } else { 742 if ((bus && bus->fast_io) || 743 config->fast_io) { 744 spin_lock_init(&map->spinlock); 745 map->lock = regmap_lock_spinlock; 746 map->unlock = regmap_unlock_spinlock; 747 lockdep_set_class_and_name(&map->spinlock, 748 lock_key, lock_name); 749 } else { 750 mutex_init(&map->mutex); 751 map->lock = regmap_lock_mutex; 752 map->unlock = regmap_unlock_mutex; 753 lockdep_set_class_and_name(&map->mutex, 754 lock_key, lock_name); 755 } 756 map->lock_arg = map; 757 } 758 759 /* 760 * When we write in fast-paths with regmap_bulk_write() don't allocate 761 * scratch buffers with sleeping allocations. 762 */ 763 if ((bus && bus->fast_io) || config->fast_io) 764 map->alloc_flags = GFP_ATOMIC; 765 else 766 map->alloc_flags = GFP_KERNEL; 767 768 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 769 map->format.pad_bytes = config->pad_bits / 8; 770 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 771 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 772 config->val_bits + config->pad_bits, 8); 773 map->reg_shift = config->pad_bits % 8; 774 if (config->reg_stride) 775 map->reg_stride = config->reg_stride; 776 else 777 map->reg_stride = 1; 778 if (is_power_of_2(map->reg_stride)) 779 map->reg_stride_order = ilog2(map->reg_stride); 780 else 781 map->reg_stride_order = -1; 782 map->use_single_read = config->use_single_read || !bus || !bus->read; 783 map->use_single_write = config->use_single_write || !bus || !bus->write; 784 map->can_multi_write = config->can_multi_write && bus && bus->write; 785 if (bus) { 786 map->max_raw_read = bus->max_raw_read; 787 map->max_raw_write = bus->max_raw_write; 788 } 789 map->dev = dev; 790 map->bus = bus; 791 map->bus_context = bus_context; 792 map->max_register = config->max_register; 793 map->wr_table = config->wr_table; 794 map->rd_table = config->rd_table; 795 map->volatile_table = config->volatile_table; 796 map->precious_table = config->precious_table; 797 map->wr_noinc_table = config->wr_noinc_table; 798 map->rd_noinc_table = config->rd_noinc_table; 799 map->writeable_reg = config->writeable_reg; 800 map->readable_reg = config->readable_reg; 801 map->volatile_reg = config->volatile_reg; 802 map->precious_reg = config->precious_reg; 803 map->writeable_noinc_reg = config->writeable_noinc_reg; 804 map->readable_noinc_reg = config->readable_noinc_reg; 805 map->cache_type = config->cache_type; 806 807 spin_lock_init(&map->async_lock); 808 INIT_LIST_HEAD(&map->async_list); 809 INIT_LIST_HEAD(&map->async_free); 810 init_waitqueue_head(&map->async_waitq); 811 812 if (config->read_flag_mask || 813 config->write_flag_mask || 814 config->zero_flag_mask) { 815 map->read_flag_mask = config->read_flag_mask; 816 map->write_flag_mask = config->write_flag_mask; 817 } else if (bus) { 818 map->read_flag_mask = bus->read_flag_mask; 819 } 820 821 if (!bus) { 822 map->reg_read = config->reg_read; 823 map->reg_write = config->reg_write; 824 825 map->defer_caching = false; 826 goto skip_format_initialization; 827 } else if (!bus->read || !bus->write) { 828 map->reg_read = _regmap_bus_reg_read; 829 map->reg_write = _regmap_bus_reg_write; 830 831 map->defer_caching = false; 832 goto skip_format_initialization; 833 } else { 834 map->reg_read = _regmap_bus_read; 835 map->reg_update_bits = bus->reg_update_bits; 836 } 837 838 reg_endian = regmap_get_reg_endian(bus, config); 839 val_endian = regmap_get_val_endian(dev, bus, config); 840 841 switch (config->reg_bits + map->reg_shift) { 842 case 2: 843 switch (config->val_bits) { 844 case 6: 845 map->format.format_write = regmap_format_2_6_write; 846 break; 847 default: 848 goto err_hwlock; 849 } 850 break; 851 852 case 4: 853 switch (config->val_bits) { 854 case 12: 855 map->format.format_write = regmap_format_4_12_write; 856 break; 857 default: 858 goto err_hwlock; 859 } 860 break; 861 862 case 7: 863 switch (config->val_bits) { 864 case 9: 865 map->format.format_write = regmap_format_7_9_write; 866 break; 867 default: 868 goto err_hwlock; 869 } 870 break; 871 872 case 10: 873 switch (config->val_bits) { 874 case 14: 875 map->format.format_write = regmap_format_10_14_write; 876 break; 877 default: 878 goto err_hwlock; 879 } 880 break; 881 882 case 8: 883 map->format.format_reg = regmap_format_8; 884 break; 885 886 case 16: 887 switch (reg_endian) { 888 case REGMAP_ENDIAN_BIG: 889 map->format.format_reg = regmap_format_16_be; 890 break; 891 case REGMAP_ENDIAN_LITTLE: 892 map->format.format_reg = regmap_format_16_le; 893 break; 894 case REGMAP_ENDIAN_NATIVE: 895 map->format.format_reg = regmap_format_16_native; 896 break; 897 default: 898 goto err_hwlock; 899 } 900 break; 901 902 case 24: 903 if (reg_endian != REGMAP_ENDIAN_BIG) 904 goto err_hwlock; 905 map->format.format_reg = regmap_format_24; 906 break; 907 908 case 32: 909 switch (reg_endian) { 910 case REGMAP_ENDIAN_BIG: 911 map->format.format_reg = regmap_format_32_be; 912 break; 913 case REGMAP_ENDIAN_LITTLE: 914 map->format.format_reg = regmap_format_32_le; 915 break; 916 case REGMAP_ENDIAN_NATIVE: 917 map->format.format_reg = regmap_format_32_native; 918 break; 919 default: 920 goto err_hwlock; 921 } 922 break; 923 924 #ifdef CONFIG_64BIT 925 case 64: 926 switch (reg_endian) { 927 case REGMAP_ENDIAN_BIG: 928 map->format.format_reg = regmap_format_64_be; 929 break; 930 case REGMAP_ENDIAN_LITTLE: 931 map->format.format_reg = regmap_format_64_le; 932 break; 933 case REGMAP_ENDIAN_NATIVE: 934 map->format.format_reg = regmap_format_64_native; 935 break; 936 default: 937 goto err_hwlock; 938 } 939 break; 940 #endif 941 942 default: 943 goto err_hwlock; 944 } 945 946 if (val_endian == REGMAP_ENDIAN_NATIVE) 947 map->format.parse_inplace = regmap_parse_inplace_noop; 948 949 switch (config->val_bits) { 950 case 8: 951 map->format.format_val = regmap_format_8; 952 map->format.parse_val = regmap_parse_8; 953 map->format.parse_inplace = regmap_parse_inplace_noop; 954 break; 955 case 16: 956 switch (val_endian) { 957 case REGMAP_ENDIAN_BIG: 958 map->format.format_val = regmap_format_16_be; 959 map->format.parse_val = regmap_parse_16_be; 960 map->format.parse_inplace = regmap_parse_16_be_inplace; 961 break; 962 case REGMAP_ENDIAN_LITTLE: 963 map->format.format_val = regmap_format_16_le; 964 map->format.parse_val = regmap_parse_16_le; 965 map->format.parse_inplace = regmap_parse_16_le_inplace; 966 break; 967 case REGMAP_ENDIAN_NATIVE: 968 map->format.format_val = regmap_format_16_native; 969 map->format.parse_val = regmap_parse_16_native; 970 break; 971 default: 972 goto err_hwlock; 973 } 974 break; 975 case 24: 976 if (val_endian != REGMAP_ENDIAN_BIG) 977 goto err_hwlock; 978 map->format.format_val = regmap_format_24; 979 map->format.parse_val = regmap_parse_24; 980 break; 981 case 32: 982 switch (val_endian) { 983 case REGMAP_ENDIAN_BIG: 984 map->format.format_val = regmap_format_32_be; 985 map->format.parse_val = regmap_parse_32_be; 986 map->format.parse_inplace = regmap_parse_32_be_inplace; 987 break; 988 case REGMAP_ENDIAN_LITTLE: 989 map->format.format_val = regmap_format_32_le; 990 map->format.parse_val = regmap_parse_32_le; 991 map->format.parse_inplace = regmap_parse_32_le_inplace; 992 break; 993 case REGMAP_ENDIAN_NATIVE: 994 map->format.format_val = regmap_format_32_native; 995 map->format.parse_val = regmap_parse_32_native; 996 break; 997 default: 998 goto err_hwlock; 999 } 1000 break; 1001 #ifdef CONFIG_64BIT 1002 case 64: 1003 switch (val_endian) { 1004 case REGMAP_ENDIAN_BIG: 1005 map->format.format_val = regmap_format_64_be; 1006 map->format.parse_val = regmap_parse_64_be; 1007 map->format.parse_inplace = regmap_parse_64_be_inplace; 1008 break; 1009 case REGMAP_ENDIAN_LITTLE: 1010 map->format.format_val = regmap_format_64_le; 1011 map->format.parse_val = regmap_parse_64_le; 1012 map->format.parse_inplace = regmap_parse_64_le_inplace; 1013 break; 1014 case REGMAP_ENDIAN_NATIVE: 1015 map->format.format_val = regmap_format_64_native; 1016 map->format.parse_val = regmap_parse_64_native; 1017 break; 1018 default: 1019 goto err_hwlock; 1020 } 1021 break; 1022 #endif 1023 } 1024 1025 if (map->format.format_write) { 1026 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1027 (val_endian != REGMAP_ENDIAN_BIG)) 1028 goto err_hwlock; 1029 map->use_single_write = true; 1030 } 1031 1032 if (!map->format.format_write && 1033 !(map->format.format_reg && map->format.format_val)) 1034 goto err_hwlock; 1035 1036 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1037 if (map->work_buf == NULL) { 1038 ret = -ENOMEM; 1039 goto err_hwlock; 1040 } 1041 1042 if (map->format.format_write) { 1043 map->defer_caching = false; 1044 map->reg_write = _regmap_bus_formatted_write; 1045 } else if (map->format.format_val) { 1046 map->defer_caching = true; 1047 map->reg_write = _regmap_bus_raw_write; 1048 } 1049 1050 skip_format_initialization: 1051 1052 map->range_tree = RB_ROOT; 1053 for (i = 0; i < config->num_ranges; i++) { 1054 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1055 struct regmap_range_node *new; 1056 1057 /* Sanity check */ 1058 if (range_cfg->range_max < range_cfg->range_min) { 1059 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1060 range_cfg->range_max, range_cfg->range_min); 1061 goto err_range; 1062 } 1063 1064 if (range_cfg->range_max > map->max_register) { 1065 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1066 range_cfg->range_max, map->max_register); 1067 goto err_range; 1068 } 1069 1070 if (range_cfg->selector_reg > map->max_register) { 1071 dev_err(map->dev, 1072 "Invalid range %d: selector out of map\n", i); 1073 goto err_range; 1074 } 1075 1076 if (range_cfg->window_len == 0) { 1077 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1078 i); 1079 goto err_range; 1080 } 1081 1082 /* Make sure, that this register range has no selector 1083 or data window within its boundary */ 1084 for (j = 0; j < config->num_ranges; j++) { 1085 unsigned sel_reg = config->ranges[j].selector_reg; 1086 unsigned win_min = config->ranges[j].window_start; 1087 unsigned win_max = win_min + 1088 config->ranges[j].window_len - 1; 1089 1090 /* Allow data window inside its own virtual range */ 1091 if (j == i) 1092 continue; 1093 1094 if (range_cfg->range_min <= sel_reg && 1095 sel_reg <= range_cfg->range_max) { 1096 dev_err(map->dev, 1097 "Range %d: selector for %d in window\n", 1098 i, j); 1099 goto err_range; 1100 } 1101 1102 if (!(win_max < range_cfg->range_min || 1103 win_min > range_cfg->range_max)) { 1104 dev_err(map->dev, 1105 "Range %d: window for %d in window\n", 1106 i, j); 1107 goto err_range; 1108 } 1109 } 1110 1111 new = kzalloc(sizeof(*new), GFP_KERNEL); 1112 if (new == NULL) { 1113 ret = -ENOMEM; 1114 goto err_range; 1115 } 1116 1117 new->map = map; 1118 new->name = range_cfg->name; 1119 new->range_min = range_cfg->range_min; 1120 new->range_max = range_cfg->range_max; 1121 new->selector_reg = range_cfg->selector_reg; 1122 new->selector_mask = range_cfg->selector_mask; 1123 new->selector_shift = range_cfg->selector_shift; 1124 new->window_start = range_cfg->window_start; 1125 new->window_len = range_cfg->window_len; 1126 1127 if (!_regmap_range_add(map, new)) { 1128 dev_err(map->dev, "Failed to add range %d\n", i); 1129 kfree(new); 1130 goto err_range; 1131 } 1132 1133 if (map->selector_work_buf == NULL) { 1134 map->selector_work_buf = 1135 kzalloc(map->format.buf_size, GFP_KERNEL); 1136 if (map->selector_work_buf == NULL) { 1137 ret = -ENOMEM; 1138 goto err_range; 1139 } 1140 } 1141 } 1142 1143 ret = regcache_init(map, config); 1144 if (ret != 0) 1145 goto err_range; 1146 1147 if (dev) { 1148 ret = regmap_attach_dev(dev, map, config); 1149 if (ret != 0) 1150 goto err_regcache; 1151 } else { 1152 regmap_debugfs_init(map, config->name); 1153 } 1154 1155 return map; 1156 1157 err_regcache: 1158 regcache_exit(map); 1159 err_range: 1160 regmap_range_exit(map); 1161 kfree(map->work_buf); 1162 err_hwlock: 1163 if (map->hwlock) 1164 hwspin_lock_free(map->hwlock); 1165 err_name: 1166 kfree_const(map->name); 1167 err_map: 1168 kfree(map); 1169 err: 1170 return ERR_PTR(ret); 1171 } 1172 EXPORT_SYMBOL_GPL(__regmap_init); 1173 1174 static void devm_regmap_release(struct device *dev, void *res) 1175 { 1176 regmap_exit(*(struct regmap **)res); 1177 } 1178 1179 struct regmap *__devm_regmap_init(struct device *dev, 1180 const struct regmap_bus *bus, 1181 void *bus_context, 1182 const struct regmap_config *config, 1183 struct lock_class_key *lock_key, 1184 const char *lock_name) 1185 { 1186 struct regmap **ptr, *regmap; 1187 1188 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1189 if (!ptr) 1190 return ERR_PTR(-ENOMEM); 1191 1192 regmap = __regmap_init(dev, bus, bus_context, config, 1193 lock_key, lock_name); 1194 if (!IS_ERR(regmap)) { 1195 *ptr = regmap; 1196 devres_add(dev, ptr); 1197 } else { 1198 devres_free(ptr); 1199 } 1200 1201 return regmap; 1202 } 1203 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1204 1205 static void regmap_field_init(struct regmap_field *rm_field, 1206 struct regmap *regmap, struct reg_field reg_field) 1207 { 1208 rm_field->regmap = regmap; 1209 rm_field->reg = reg_field.reg; 1210 rm_field->shift = reg_field.lsb; 1211 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1212 rm_field->id_size = reg_field.id_size; 1213 rm_field->id_offset = reg_field.id_offset; 1214 } 1215 1216 /** 1217 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1218 * 1219 * @dev: Device that will be interacted with 1220 * @regmap: regmap bank in which this register field is located. 1221 * @reg_field: Register field with in the bank. 1222 * 1223 * The return value will be an ERR_PTR() on error or a valid pointer 1224 * to a struct regmap_field. The regmap_field will be automatically freed 1225 * by the device management code. 1226 */ 1227 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1228 struct regmap *regmap, struct reg_field reg_field) 1229 { 1230 struct regmap_field *rm_field = devm_kzalloc(dev, 1231 sizeof(*rm_field), GFP_KERNEL); 1232 if (!rm_field) 1233 return ERR_PTR(-ENOMEM); 1234 1235 regmap_field_init(rm_field, regmap, reg_field); 1236 1237 return rm_field; 1238 1239 } 1240 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1241 1242 /** 1243 * devm_regmap_field_free() - Free a register field allocated using 1244 * devm_regmap_field_alloc. 1245 * 1246 * @dev: Device that will be interacted with 1247 * @field: regmap field which should be freed. 1248 * 1249 * Free register field allocated using devm_regmap_field_alloc(). Usually 1250 * drivers need not call this function, as the memory allocated via devm 1251 * will be freed as per device-driver life-cyle. 1252 */ 1253 void devm_regmap_field_free(struct device *dev, 1254 struct regmap_field *field) 1255 { 1256 devm_kfree(dev, field); 1257 } 1258 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1259 1260 /** 1261 * regmap_field_alloc() - Allocate and initialise a register field. 1262 * 1263 * @regmap: regmap bank in which this register field is located. 1264 * @reg_field: Register field with in the bank. 1265 * 1266 * The return value will be an ERR_PTR() on error or a valid pointer 1267 * to a struct regmap_field. The regmap_field should be freed by the 1268 * user once its finished working with it using regmap_field_free(). 1269 */ 1270 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1271 struct reg_field reg_field) 1272 { 1273 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1274 1275 if (!rm_field) 1276 return ERR_PTR(-ENOMEM); 1277 1278 regmap_field_init(rm_field, regmap, reg_field); 1279 1280 return rm_field; 1281 } 1282 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1283 1284 /** 1285 * regmap_field_free() - Free register field allocated using 1286 * regmap_field_alloc. 1287 * 1288 * @field: regmap field which should be freed. 1289 */ 1290 void regmap_field_free(struct regmap_field *field) 1291 { 1292 kfree(field); 1293 } 1294 EXPORT_SYMBOL_GPL(regmap_field_free); 1295 1296 /** 1297 * regmap_reinit_cache() - Reinitialise the current register cache 1298 * 1299 * @map: Register map to operate on. 1300 * @config: New configuration. Only the cache data will be used. 1301 * 1302 * Discard any existing register cache for the map and initialize a 1303 * new cache. This can be used to restore the cache to defaults or to 1304 * update the cache configuration to reflect runtime discovery of the 1305 * hardware. 1306 * 1307 * No explicit locking is done here, the user needs to ensure that 1308 * this function will not race with other calls to regmap. 1309 */ 1310 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1311 { 1312 regcache_exit(map); 1313 regmap_debugfs_exit(map); 1314 1315 map->max_register = config->max_register; 1316 map->writeable_reg = config->writeable_reg; 1317 map->readable_reg = config->readable_reg; 1318 map->volatile_reg = config->volatile_reg; 1319 map->precious_reg = config->precious_reg; 1320 map->writeable_noinc_reg = config->writeable_noinc_reg; 1321 map->readable_noinc_reg = config->readable_noinc_reg; 1322 map->cache_type = config->cache_type; 1323 1324 regmap_debugfs_init(map, config->name); 1325 1326 map->cache_bypass = false; 1327 map->cache_only = false; 1328 1329 return regcache_init(map, config); 1330 } 1331 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1332 1333 /** 1334 * regmap_exit() - Free a previously allocated register map 1335 * 1336 * @map: Register map to operate on. 1337 */ 1338 void regmap_exit(struct regmap *map) 1339 { 1340 struct regmap_async *async; 1341 1342 regcache_exit(map); 1343 regmap_debugfs_exit(map); 1344 regmap_range_exit(map); 1345 if (map->bus && map->bus->free_context) 1346 map->bus->free_context(map->bus_context); 1347 kfree(map->work_buf); 1348 while (!list_empty(&map->async_free)) { 1349 async = list_first_entry_or_null(&map->async_free, 1350 struct regmap_async, 1351 list); 1352 list_del(&async->list); 1353 kfree(async->work_buf); 1354 kfree(async); 1355 } 1356 if (map->hwlock) 1357 hwspin_lock_free(map->hwlock); 1358 kfree_const(map->name); 1359 kfree(map); 1360 } 1361 EXPORT_SYMBOL_GPL(regmap_exit); 1362 1363 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1364 { 1365 struct regmap **r = res; 1366 if (!r || !*r) { 1367 WARN_ON(!r || !*r); 1368 return 0; 1369 } 1370 1371 /* If the user didn't specify a name match any */ 1372 if (data) 1373 return (*r)->name == data; 1374 else 1375 return 1; 1376 } 1377 1378 /** 1379 * dev_get_regmap() - Obtain the regmap (if any) for a device 1380 * 1381 * @dev: Device to retrieve the map for 1382 * @name: Optional name for the register map, usually NULL. 1383 * 1384 * Returns the regmap for the device if one is present, or NULL. If 1385 * name is specified then it must match the name specified when 1386 * registering the device, if it is NULL then the first regmap found 1387 * will be used. Devices with multiple register maps are very rare, 1388 * generic code should normally not need to specify a name. 1389 */ 1390 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1391 { 1392 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1393 dev_get_regmap_match, (void *)name); 1394 1395 if (!r) 1396 return NULL; 1397 return *r; 1398 } 1399 EXPORT_SYMBOL_GPL(dev_get_regmap); 1400 1401 /** 1402 * regmap_get_device() - Obtain the device from a regmap 1403 * 1404 * @map: Register map to operate on. 1405 * 1406 * Returns the underlying device that the regmap has been created for. 1407 */ 1408 struct device *regmap_get_device(struct regmap *map) 1409 { 1410 return map->dev; 1411 } 1412 EXPORT_SYMBOL_GPL(regmap_get_device); 1413 1414 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1415 struct regmap_range_node *range, 1416 unsigned int val_num) 1417 { 1418 void *orig_work_buf; 1419 unsigned int win_offset; 1420 unsigned int win_page; 1421 bool page_chg; 1422 int ret; 1423 1424 win_offset = (*reg - range->range_min) % range->window_len; 1425 win_page = (*reg - range->range_min) / range->window_len; 1426 1427 if (val_num > 1) { 1428 /* Bulk write shouldn't cross range boundary */ 1429 if (*reg + val_num - 1 > range->range_max) 1430 return -EINVAL; 1431 1432 /* ... or single page boundary */ 1433 if (val_num > range->window_len - win_offset) 1434 return -EINVAL; 1435 } 1436 1437 /* It is possible to have selector register inside data window. 1438 In that case, selector register is located on every page and 1439 it needs no page switching, when accessed alone. */ 1440 if (val_num > 1 || 1441 range->window_start + win_offset != range->selector_reg) { 1442 /* Use separate work_buf during page switching */ 1443 orig_work_buf = map->work_buf; 1444 map->work_buf = map->selector_work_buf; 1445 1446 ret = _regmap_update_bits(map, range->selector_reg, 1447 range->selector_mask, 1448 win_page << range->selector_shift, 1449 &page_chg, false); 1450 1451 map->work_buf = orig_work_buf; 1452 1453 if (ret != 0) 1454 return ret; 1455 } 1456 1457 *reg = range->window_start + win_offset; 1458 1459 return 0; 1460 } 1461 1462 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1463 unsigned long mask) 1464 { 1465 u8 *buf; 1466 int i; 1467 1468 if (!mask || !map->work_buf) 1469 return; 1470 1471 buf = map->work_buf; 1472 1473 for (i = 0; i < max_bytes; i++) 1474 buf[i] |= (mask >> (8 * i)) & 0xff; 1475 } 1476 1477 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1478 const void *val, size_t val_len) 1479 { 1480 struct regmap_range_node *range; 1481 unsigned long flags; 1482 void *work_val = map->work_buf + map->format.reg_bytes + 1483 map->format.pad_bytes; 1484 void *buf; 1485 int ret = -ENOTSUPP; 1486 size_t len; 1487 int i; 1488 1489 WARN_ON(!map->bus); 1490 1491 /* Check for unwritable registers before we start */ 1492 for (i = 0; i < val_len / map->format.val_bytes; i++) 1493 if (!regmap_writeable(map, 1494 reg + regmap_get_offset(map, i))) 1495 return -EINVAL; 1496 1497 if (!map->cache_bypass && map->format.parse_val) { 1498 unsigned int ival; 1499 int val_bytes = map->format.val_bytes; 1500 for (i = 0; i < val_len / val_bytes; i++) { 1501 ival = map->format.parse_val(val + (i * val_bytes)); 1502 ret = regcache_write(map, 1503 reg + regmap_get_offset(map, i), 1504 ival); 1505 if (ret) { 1506 dev_err(map->dev, 1507 "Error in caching of register: %x ret: %d\n", 1508 reg + i, ret); 1509 return ret; 1510 } 1511 } 1512 if (map->cache_only) { 1513 map->cache_dirty = true; 1514 return 0; 1515 } 1516 } 1517 1518 range = _regmap_range_lookup(map, reg); 1519 if (range) { 1520 int val_num = val_len / map->format.val_bytes; 1521 int win_offset = (reg - range->range_min) % range->window_len; 1522 int win_residue = range->window_len - win_offset; 1523 1524 /* If the write goes beyond the end of the window split it */ 1525 while (val_num > win_residue) { 1526 dev_dbg(map->dev, "Writing window %d/%zu\n", 1527 win_residue, val_len / map->format.val_bytes); 1528 ret = _regmap_raw_write_impl(map, reg, val, 1529 win_residue * 1530 map->format.val_bytes); 1531 if (ret != 0) 1532 return ret; 1533 1534 reg += win_residue; 1535 val_num -= win_residue; 1536 val += win_residue * map->format.val_bytes; 1537 val_len -= win_residue * map->format.val_bytes; 1538 1539 win_offset = (reg - range->range_min) % 1540 range->window_len; 1541 win_residue = range->window_len - win_offset; 1542 } 1543 1544 ret = _regmap_select_page(map, ®, range, val_num); 1545 if (ret != 0) 1546 return ret; 1547 } 1548 1549 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1550 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1551 map->write_flag_mask); 1552 1553 /* 1554 * Essentially all I/O mechanisms will be faster with a single 1555 * buffer to write. Since register syncs often generate raw 1556 * writes of single registers optimise that case. 1557 */ 1558 if (val != work_val && val_len == map->format.val_bytes) { 1559 memcpy(work_val, val, map->format.val_bytes); 1560 val = work_val; 1561 } 1562 1563 if (map->async && map->bus->async_write) { 1564 struct regmap_async *async; 1565 1566 trace_regmap_async_write_start(map, reg, val_len); 1567 1568 spin_lock_irqsave(&map->async_lock, flags); 1569 async = list_first_entry_or_null(&map->async_free, 1570 struct regmap_async, 1571 list); 1572 if (async) 1573 list_del(&async->list); 1574 spin_unlock_irqrestore(&map->async_lock, flags); 1575 1576 if (!async) { 1577 async = map->bus->async_alloc(); 1578 if (!async) 1579 return -ENOMEM; 1580 1581 async->work_buf = kzalloc(map->format.buf_size, 1582 GFP_KERNEL | GFP_DMA); 1583 if (!async->work_buf) { 1584 kfree(async); 1585 return -ENOMEM; 1586 } 1587 } 1588 1589 async->map = map; 1590 1591 /* If the caller supplied the value we can use it safely. */ 1592 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1593 map->format.reg_bytes + map->format.val_bytes); 1594 1595 spin_lock_irqsave(&map->async_lock, flags); 1596 list_add_tail(&async->list, &map->async_list); 1597 spin_unlock_irqrestore(&map->async_lock, flags); 1598 1599 if (val != work_val) 1600 ret = map->bus->async_write(map->bus_context, 1601 async->work_buf, 1602 map->format.reg_bytes + 1603 map->format.pad_bytes, 1604 val, val_len, async); 1605 else 1606 ret = map->bus->async_write(map->bus_context, 1607 async->work_buf, 1608 map->format.reg_bytes + 1609 map->format.pad_bytes + 1610 val_len, NULL, 0, async); 1611 1612 if (ret != 0) { 1613 dev_err(map->dev, "Failed to schedule write: %d\n", 1614 ret); 1615 1616 spin_lock_irqsave(&map->async_lock, flags); 1617 list_move(&async->list, &map->async_free); 1618 spin_unlock_irqrestore(&map->async_lock, flags); 1619 } 1620 1621 return ret; 1622 } 1623 1624 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1625 1626 /* If we're doing a single register write we can probably just 1627 * send the work_buf directly, otherwise try to do a gather 1628 * write. 1629 */ 1630 if (val == work_val) 1631 ret = map->bus->write(map->bus_context, map->work_buf, 1632 map->format.reg_bytes + 1633 map->format.pad_bytes + 1634 val_len); 1635 else if (map->bus->gather_write) 1636 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1637 map->format.reg_bytes + 1638 map->format.pad_bytes, 1639 val, val_len); 1640 1641 /* If that didn't work fall back on linearising by hand. */ 1642 if (ret == -ENOTSUPP) { 1643 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1644 buf = kzalloc(len, GFP_KERNEL); 1645 if (!buf) 1646 return -ENOMEM; 1647 1648 memcpy(buf, map->work_buf, map->format.reg_bytes); 1649 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1650 val, val_len); 1651 ret = map->bus->write(map->bus_context, buf, len); 1652 1653 kfree(buf); 1654 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1655 /* regcache_drop_region() takes lock that we already have, 1656 * thus call map->cache_ops->drop() directly 1657 */ 1658 if (map->cache_ops && map->cache_ops->drop) 1659 map->cache_ops->drop(map, reg, reg + 1); 1660 } 1661 1662 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1663 1664 return ret; 1665 } 1666 1667 /** 1668 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1669 * 1670 * @map: Map to check. 1671 */ 1672 bool regmap_can_raw_write(struct regmap *map) 1673 { 1674 return map->bus && map->bus->write && map->format.format_val && 1675 map->format.format_reg; 1676 } 1677 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1678 1679 /** 1680 * regmap_get_raw_read_max - Get the maximum size we can read 1681 * 1682 * @map: Map to check. 1683 */ 1684 size_t regmap_get_raw_read_max(struct regmap *map) 1685 { 1686 return map->max_raw_read; 1687 } 1688 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1689 1690 /** 1691 * regmap_get_raw_write_max - Get the maximum size we can read 1692 * 1693 * @map: Map to check. 1694 */ 1695 size_t regmap_get_raw_write_max(struct regmap *map) 1696 { 1697 return map->max_raw_write; 1698 } 1699 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1700 1701 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1702 unsigned int val) 1703 { 1704 int ret; 1705 struct regmap_range_node *range; 1706 struct regmap *map = context; 1707 1708 WARN_ON(!map->bus || !map->format.format_write); 1709 1710 range = _regmap_range_lookup(map, reg); 1711 if (range) { 1712 ret = _regmap_select_page(map, ®, range, 1); 1713 if (ret != 0) 1714 return ret; 1715 } 1716 1717 map->format.format_write(map, reg, val); 1718 1719 trace_regmap_hw_write_start(map, reg, 1); 1720 1721 ret = map->bus->write(map->bus_context, map->work_buf, 1722 map->format.buf_size); 1723 1724 trace_regmap_hw_write_done(map, reg, 1); 1725 1726 return ret; 1727 } 1728 1729 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1730 unsigned int val) 1731 { 1732 struct regmap *map = context; 1733 1734 return map->bus->reg_write(map->bus_context, reg, val); 1735 } 1736 1737 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1738 unsigned int val) 1739 { 1740 struct regmap *map = context; 1741 1742 WARN_ON(!map->bus || !map->format.format_val); 1743 1744 map->format.format_val(map->work_buf + map->format.reg_bytes 1745 + map->format.pad_bytes, val, 0); 1746 return _regmap_raw_write_impl(map, reg, 1747 map->work_buf + 1748 map->format.reg_bytes + 1749 map->format.pad_bytes, 1750 map->format.val_bytes); 1751 } 1752 1753 static inline void *_regmap_map_get_context(struct regmap *map) 1754 { 1755 return (map->bus) ? map : map->bus_context; 1756 } 1757 1758 int _regmap_write(struct regmap *map, unsigned int reg, 1759 unsigned int val) 1760 { 1761 int ret; 1762 void *context = _regmap_map_get_context(map); 1763 1764 if (!regmap_writeable(map, reg)) 1765 return -EIO; 1766 1767 if (!map->cache_bypass && !map->defer_caching) { 1768 ret = regcache_write(map, reg, val); 1769 if (ret != 0) 1770 return ret; 1771 if (map->cache_only) { 1772 map->cache_dirty = true; 1773 return 0; 1774 } 1775 } 1776 1777 if (regmap_should_log(map)) 1778 dev_info(map->dev, "%x <= %x\n", reg, val); 1779 1780 trace_regmap_reg_write(map, reg, val); 1781 1782 return map->reg_write(context, reg, val); 1783 } 1784 1785 /** 1786 * regmap_write() - Write a value to a single register 1787 * 1788 * @map: Register map to write to 1789 * @reg: Register to write to 1790 * @val: Value to be written 1791 * 1792 * A value of zero will be returned on success, a negative errno will 1793 * be returned in error cases. 1794 */ 1795 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1796 { 1797 int ret; 1798 1799 if (!IS_ALIGNED(reg, map->reg_stride)) 1800 return -EINVAL; 1801 1802 map->lock(map->lock_arg); 1803 1804 ret = _regmap_write(map, reg, val); 1805 1806 map->unlock(map->lock_arg); 1807 1808 return ret; 1809 } 1810 EXPORT_SYMBOL_GPL(regmap_write); 1811 1812 /** 1813 * regmap_write_async() - Write a value to a single register asynchronously 1814 * 1815 * @map: Register map to write to 1816 * @reg: Register to write to 1817 * @val: Value to be written 1818 * 1819 * A value of zero will be returned on success, a negative errno will 1820 * be returned in error cases. 1821 */ 1822 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1823 { 1824 int ret; 1825 1826 if (!IS_ALIGNED(reg, map->reg_stride)) 1827 return -EINVAL; 1828 1829 map->lock(map->lock_arg); 1830 1831 map->async = true; 1832 1833 ret = _regmap_write(map, reg, val); 1834 1835 map->async = false; 1836 1837 map->unlock(map->lock_arg); 1838 1839 return ret; 1840 } 1841 EXPORT_SYMBOL_GPL(regmap_write_async); 1842 1843 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1844 const void *val, size_t val_len) 1845 { 1846 size_t val_bytes = map->format.val_bytes; 1847 size_t val_count = val_len / val_bytes; 1848 size_t chunk_count, chunk_bytes; 1849 size_t chunk_regs = val_count; 1850 int ret, i; 1851 1852 if (!val_count) 1853 return -EINVAL; 1854 1855 if (map->use_single_write) 1856 chunk_regs = 1; 1857 else if (map->max_raw_write && val_len > map->max_raw_write) 1858 chunk_regs = map->max_raw_write / val_bytes; 1859 1860 chunk_count = val_count / chunk_regs; 1861 chunk_bytes = chunk_regs * val_bytes; 1862 1863 /* Write as many bytes as possible with chunk_size */ 1864 for (i = 0; i < chunk_count; i++) { 1865 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); 1866 if (ret) 1867 return ret; 1868 1869 reg += regmap_get_offset(map, chunk_regs); 1870 val += chunk_bytes; 1871 val_len -= chunk_bytes; 1872 } 1873 1874 /* Write remaining bytes */ 1875 if (val_len) 1876 ret = _regmap_raw_write_impl(map, reg, val, val_len); 1877 1878 return ret; 1879 } 1880 1881 /** 1882 * regmap_raw_write() - Write raw values to one or more registers 1883 * 1884 * @map: Register map to write to 1885 * @reg: Initial register to write to 1886 * @val: Block of data to be written, laid out for direct transmission to the 1887 * device 1888 * @val_len: Length of data pointed to by val. 1889 * 1890 * This function is intended to be used for things like firmware 1891 * download where a large block of data needs to be transferred to the 1892 * device. No formatting will be done on the data provided. 1893 * 1894 * A value of zero will be returned on success, a negative errno will 1895 * be returned in error cases. 1896 */ 1897 int regmap_raw_write(struct regmap *map, unsigned int reg, 1898 const void *val, size_t val_len) 1899 { 1900 int ret; 1901 1902 if (!regmap_can_raw_write(map)) 1903 return -EINVAL; 1904 if (val_len % map->format.val_bytes) 1905 return -EINVAL; 1906 1907 map->lock(map->lock_arg); 1908 1909 ret = _regmap_raw_write(map, reg, val, val_len); 1910 1911 map->unlock(map->lock_arg); 1912 1913 return ret; 1914 } 1915 EXPORT_SYMBOL_GPL(regmap_raw_write); 1916 1917 /** 1918 * regmap_noinc_write(): Write data from a register without incrementing the 1919 * register number 1920 * 1921 * @map: Register map to write to 1922 * @reg: Register to write to 1923 * @val: Pointer to data buffer 1924 * @val_len: Length of output buffer in bytes. 1925 * 1926 * The regmap API usually assumes that bulk bus write operations will write a 1927 * range of registers. Some devices have certain registers for which a write 1928 * operation can write to an internal FIFO. 1929 * 1930 * The target register must be volatile but registers after it can be 1931 * completely unrelated cacheable registers. 1932 * 1933 * This will attempt multiple writes as required to write val_len bytes. 1934 * 1935 * A value of zero will be returned on success, a negative errno will be 1936 * returned in error cases. 1937 */ 1938 int regmap_noinc_write(struct regmap *map, unsigned int reg, 1939 const void *val, size_t val_len) 1940 { 1941 size_t write_len; 1942 int ret; 1943 1944 if (!map->bus) 1945 return -EINVAL; 1946 if (!map->bus->write) 1947 return -ENOTSUPP; 1948 if (val_len % map->format.val_bytes) 1949 return -EINVAL; 1950 if (!IS_ALIGNED(reg, map->reg_stride)) 1951 return -EINVAL; 1952 if (val_len == 0) 1953 return -EINVAL; 1954 1955 map->lock(map->lock_arg); 1956 1957 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 1958 ret = -EINVAL; 1959 goto out_unlock; 1960 } 1961 1962 while (val_len) { 1963 if (map->max_raw_write && map->max_raw_write < val_len) 1964 write_len = map->max_raw_write; 1965 else 1966 write_len = val_len; 1967 ret = _regmap_raw_write(map, reg, val, write_len); 1968 if (ret) 1969 goto out_unlock; 1970 val = ((u8 *)val) + write_len; 1971 val_len -= write_len; 1972 } 1973 1974 out_unlock: 1975 map->unlock(map->lock_arg); 1976 return ret; 1977 } 1978 EXPORT_SYMBOL_GPL(regmap_noinc_write); 1979 1980 /** 1981 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 1982 * register field. 1983 * 1984 * @field: Register field to write to 1985 * @mask: Bitmask to change 1986 * @val: Value to be written 1987 * @change: Boolean indicating if a write was done 1988 * @async: Boolean indicating asynchronously 1989 * @force: Boolean indicating use force update 1990 * 1991 * Perform a read/modify/write cycle on the register field with change, 1992 * async, force option. 1993 * 1994 * A value of zero will be returned on success, a negative errno will 1995 * be returned in error cases. 1996 */ 1997 int regmap_field_update_bits_base(struct regmap_field *field, 1998 unsigned int mask, unsigned int val, 1999 bool *change, bool async, bool force) 2000 { 2001 mask = (mask << field->shift) & field->mask; 2002 2003 return regmap_update_bits_base(field->regmap, field->reg, 2004 mask, val << field->shift, 2005 change, async, force); 2006 } 2007 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2008 2009 /** 2010 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2011 * register field with port ID 2012 * 2013 * @field: Register field to write to 2014 * @id: port ID 2015 * @mask: Bitmask to change 2016 * @val: Value to be written 2017 * @change: Boolean indicating if a write was done 2018 * @async: Boolean indicating asynchronously 2019 * @force: Boolean indicating use force update 2020 * 2021 * A value of zero will be returned on success, a negative errno will 2022 * be returned in error cases. 2023 */ 2024 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2025 unsigned int mask, unsigned int val, 2026 bool *change, bool async, bool force) 2027 { 2028 if (id >= field->id_size) 2029 return -EINVAL; 2030 2031 mask = (mask << field->shift) & field->mask; 2032 2033 return regmap_update_bits_base(field->regmap, 2034 field->reg + (field->id_offset * id), 2035 mask, val << field->shift, 2036 change, async, force); 2037 } 2038 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2039 2040 /** 2041 * regmap_bulk_write() - Write multiple registers to the device 2042 * 2043 * @map: Register map to write to 2044 * @reg: First register to be write from 2045 * @val: Block of data to be written, in native register size for device 2046 * @val_count: Number of registers to write 2047 * 2048 * This function is intended to be used for writing a large block of 2049 * data to the device either in single transfer or multiple transfer. 2050 * 2051 * A value of zero will be returned on success, a negative errno will 2052 * be returned in error cases. 2053 */ 2054 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2055 size_t val_count) 2056 { 2057 int ret = 0, i; 2058 size_t val_bytes = map->format.val_bytes; 2059 2060 if (!IS_ALIGNED(reg, map->reg_stride)) 2061 return -EINVAL; 2062 2063 /* 2064 * Some devices don't support bulk write, for them we have a series of 2065 * single write operations. 2066 */ 2067 if (!map->bus || !map->format.parse_inplace) { 2068 map->lock(map->lock_arg); 2069 for (i = 0; i < val_count; i++) { 2070 unsigned int ival; 2071 2072 switch (val_bytes) { 2073 case 1: 2074 ival = *(u8 *)(val + (i * val_bytes)); 2075 break; 2076 case 2: 2077 ival = *(u16 *)(val + (i * val_bytes)); 2078 break; 2079 case 4: 2080 ival = *(u32 *)(val + (i * val_bytes)); 2081 break; 2082 #ifdef CONFIG_64BIT 2083 case 8: 2084 ival = *(u64 *)(val + (i * val_bytes)); 2085 break; 2086 #endif 2087 default: 2088 ret = -EINVAL; 2089 goto out; 2090 } 2091 2092 ret = _regmap_write(map, 2093 reg + regmap_get_offset(map, i), 2094 ival); 2095 if (ret != 0) 2096 goto out; 2097 } 2098 out: 2099 map->unlock(map->lock_arg); 2100 } else { 2101 void *wval; 2102 2103 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2104 if (!wval) 2105 return -ENOMEM; 2106 2107 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2108 map->format.parse_inplace(wval + i); 2109 2110 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2111 2112 kfree(wval); 2113 } 2114 return ret; 2115 } 2116 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2117 2118 /* 2119 * _regmap_raw_multi_reg_write() 2120 * 2121 * the (register,newvalue) pairs in regs have not been formatted, but 2122 * they are all in the same page and have been changed to being page 2123 * relative. The page register has been written if that was necessary. 2124 */ 2125 static int _regmap_raw_multi_reg_write(struct regmap *map, 2126 const struct reg_sequence *regs, 2127 size_t num_regs) 2128 { 2129 int ret; 2130 void *buf; 2131 int i; 2132 u8 *u8; 2133 size_t val_bytes = map->format.val_bytes; 2134 size_t reg_bytes = map->format.reg_bytes; 2135 size_t pad_bytes = map->format.pad_bytes; 2136 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2137 size_t len = pair_size * num_regs; 2138 2139 if (!len) 2140 return -EINVAL; 2141 2142 buf = kzalloc(len, GFP_KERNEL); 2143 if (!buf) 2144 return -ENOMEM; 2145 2146 /* We have to linearise by hand. */ 2147 2148 u8 = buf; 2149 2150 for (i = 0; i < num_regs; i++) { 2151 unsigned int reg = regs[i].reg; 2152 unsigned int val = regs[i].def; 2153 trace_regmap_hw_write_start(map, reg, 1); 2154 map->format.format_reg(u8, reg, map->reg_shift); 2155 u8 += reg_bytes + pad_bytes; 2156 map->format.format_val(u8, val, 0); 2157 u8 += val_bytes; 2158 } 2159 u8 = buf; 2160 *u8 |= map->write_flag_mask; 2161 2162 ret = map->bus->write(map->bus_context, buf, len); 2163 2164 kfree(buf); 2165 2166 for (i = 0; i < num_regs; i++) { 2167 int reg = regs[i].reg; 2168 trace_regmap_hw_write_done(map, reg, 1); 2169 } 2170 return ret; 2171 } 2172 2173 static unsigned int _regmap_register_page(struct regmap *map, 2174 unsigned int reg, 2175 struct regmap_range_node *range) 2176 { 2177 unsigned int win_page = (reg - range->range_min) / range->window_len; 2178 2179 return win_page; 2180 } 2181 2182 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2183 struct reg_sequence *regs, 2184 size_t num_regs) 2185 { 2186 int ret; 2187 int i, n; 2188 struct reg_sequence *base; 2189 unsigned int this_page = 0; 2190 unsigned int page_change = 0; 2191 /* 2192 * the set of registers are not neccessarily in order, but 2193 * since the order of write must be preserved this algorithm 2194 * chops the set each time the page changes. This also applies 2195 * if there is a delay required at any point in the sequence. 2196 */ 2197 base = regs; 2198 for (i = 0, n = 0; i < num_regs; i++, n++) { 2199 unsigned int reg = regs[i].reg; 2200 struct regmap_range_node *range; 2201 2202 range = _regmap_range_lookup(map, reg); 2203 if (range) { 2204 unsigned int win_page = _regmap_register_page(map, reg, 2205 range); 2206 2207 if (i == 0) 2208 this_page = win_page; 2209 if (win_page != this_page) { 2210 this_page = win_page; 2211 page_change = 1; 2212 } 2213 } 2214 2215 /* If we have both a page change and a delay make sure to 2216 * write the regs and apply the delay before we change the 2217 * page. 2218 */ 2219 2220 if (page_change || regs[i].delay_us) { 2221 2222 /* For situations where the first write requires 2223 * a delay we need to make sure we don't call 2224 * raw_multi_reg_write with n=0 2225 * This can't occur with page breaks as we 2226 * never write on the first iteration 2227 */ 2228 if (regs[i].delay_us && i == 0) 2229 n = 1; 2230 2231 ret = _regmap_raw_multi_reg_write(map, base, n); 2232 if (ret != 0) 2233 return ret; 2234 2235 if (regs[i].delay_us) 2236 udelay(regs[i].delay_us); 2237 2238 base += n; 2239 n = 0; 2240 2241 if (page_change) { 2242 ret = _regmap_select_page(map, 2243 &base[n].reg, 2244 range, 1); 2245 if (ret != 0) 2246 return ret; 2247 2248 page_change = 0; 2249 } 2250 2251 } 2252 2253 } 2254 if (n > 0) 2255 return _regmap_raw_multi_reg_write(map, base, n); 2256 return 0; 2257 } 2258 2259 static int _regmap_multi_reg_write(struct regmap *map, 2260 const struct reg_sequence *regs, 2261 size_t num_regs) 2262 { 2263 int i; 2264 int ret; 2265 2266 if (!map->can_multi_write) { 2267 for (i = 0; i < num_regs; i++) { 2268 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2269 if (ret != 0) 2270 return ret; 2271 2272 if (regs[i].delay_us) 2273 udelay(regs[i].delay_us); 2274 } 2275 return 0; 2276 } 2277 2278 if (!map->format.parse_inplace) 2279 return -EINVAL; 2280 2281 if (map->writeable_reg) 2282 for (i = 0; i < num_regs; i++) { 2283 int reg = regs[i].reg; 2284 if (!map->writeable_reg(map->dev, reg)) 2285 return -EINVAL; 2286 if (!IS_ALIGNED(reg, map->reg_stride)) 2287 return -EINVAL; 2288 } 2289 2290 if (!map->cache_bypass) { 2291 for (i = 0; i < num_regs; i++) { 2292 unsigned int val = regs[i].def; 2293 unsigned int reg = regs[i].reg; 2294 ret = regcache_write(map, reg, val); 2295 if (ret) { 2296 dev_err(map->dev, 2297 "Error in caching of register: %x ret: %d\n", 2298 reg, ret); 2299 return ret; 2300 } 2301 } 2302 if (map->cache_only) { 2303 map->cache_dirty = true; 2304 return 0; 2305 } 2306 } 2307 2308 WARN_ON(!map->bus); 2309 2310 for (i = 0; i < num_regs; i++) { 2311 unsigned int reg = regs[i].reg; 2312 struct regmap_range_node *range; 2313 2314 /* Coalesce all the writes between a page break or a delay 2315 * in a sequence 2316 */ 2317 range = _regmap_range_lookup(map, reg); 2318 if (range || regs[i].delay_us) { 2319 size_t len = sizeof(struct reg_sequence)*num_regs; 2320 struct reg_sequence *base = kmemdup(regs, len, 2321 GFP_KERNEL); 2322 if (!base) 2323 return -ENOMEM; 2324 ret = _regmap_range_multi_paged_reg_write(map, base, 2325 num_regs); 2326 kfree(base); 2327 2328 return ret; 2329 } 2330 } 2331 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2332 } 2333 2334 /** 2335 * regmap_multi_reg_write() - Write multiple registers to the device 2336 * 2337 * @map: Register map to write to 2338 * @regs: Array of structures containing register,value to be written 2339 * @num_regs: Number of registers to write 2340 * 2341 * Write multiple registers to the device where the set of register, value 2342 * pairs are supplied in any order, possibly not all in a single range. 2343 * 2344 * The 'normal' block write mode will send ultimately send data on the 2345 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2346 * addressed. However, this alternative block multi write mode will send 2347 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2348 * must of course support the mode. 2349 * 2350 * A value of zero will be returned on success, a negative errno will be 2351 * returned in error cases. 2352 */ 2353 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2354 int num_regs) 2355 { 2356 int ret; 2357 2358 map->lock(map->lock_arg); 2359 2360 ret = _regmap_multi_reg_write(map, regs, num_regs); 2361 2362 map->unlock(map->lock_arg); 2363 2364 return ret; 2365 } 2366 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2367 2368 /** 2369 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2370 * device but not the cache 2371 * 2372 * @map: Register map to write to 2373 * @regs: Array of structures containing register,value to be written 2374 * @num_regs: Number of registers to write 2375 * 2376 * Write multiple registers to the device but not the cache where the set 2377 * of register are supplied in any order. 2378 * 2379 * This function is intended to be used for writing a large block of data 2380 * atomically to the device in single transfer for those I2C client devices 2381 * that implement this alternative block write mode. 2382 * 2383 * A value of zero will be returned on success, a negative errno will 2384 * be returned in error cases. 2385 */ 2386 int regmap_multi_reg_write_bypassed(struct regmap *map, 2387 const struct reg_sequence *regs, 2388 int num_regs) 2389 { 2390 int ret; 2391 bool bypass; 2392 2393 map->lock(map->lock_arg); 2394 2395 bypass = map->cache_bypass; 2396 map->cache_bypass = true; 2397 2398 ret = _regmap_multi_reg_write(map, regs, num_regs); 2399 2400 map->cache_bypass = bypass; 2401 2402 map->unlock(map->lock_arg); 2403 2404 return ret; 2405 } 2406 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2407 2408 /** 2409 * regmap_raw_write_async() - Write raw values to one or more registers 2410 * asynchronously 2411 * 2412 * @map: Register map to write to 2413 * @reg: Initial register to write to 2414 * @val: Block of data to be written, laid out for direct transmission to the 2415 * device. Must be valid until regmap_async_complete() is called. 2416 * @val_len: Length of data pointed to by val. 2417 * 2418 * This function is intended to be used for things like firmware 2419 * download where a large block of data needs to be transferred to the 2420 * device. No formatting will be done on the data provided. 2421 * 2422 * If supported by the underlying bus the write will be scheduled 2423 * asynchronously, helping maximise I/O speed on higher speed buses 2424 * like SPI. regmap_async_complete() can be called to ensure that all 2425 * asynchrnous writes have been completed. 2426 * 2427 * A value of zero will be returned on success, a negative errno will 2428 * be returned in error cases. 2429 */ 2430 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2431 const void *val, size_t val_len) 2432 { 2433 int ret; 2434 2435 if (val_len % map->format.val_bytes) 2436 return -EINVAL; 2437 if (!IS_ALIGNED(reg, map->reg_stride)) 2438 return -EINVAL; 2439 2440 map->lock(map->lock_arg); 2441 2442 map->async = true; 2443 2444 ret = _regmap_raw_write(map, reg, val, val_len); 2445 2446 map->async = false; 2447 2448 map->unlock(map->lock_arg); 2449 2450 return ret; 2451 } 2452 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2453 2454 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2455 unsigned int val_len) 2456 { 2457 struct regmap_range_node *range; 2458 int ret; 2459 2460 WARN_ON(!map->bus); 2461 2462 if (!map->bus || !map->bus->read) 2463 return -EINVAL; 2464 2465 range = _regmap_range_lookup(map, reg); 2466 if (range) { 2467 ret = _regmap_select_page(map, ®, range, 2468 val_len / map->format.val_bytes); 2469 if (ret != 0) 2470 return ret; 2471 } 2472 2473 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2474 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2475 map->read_flag_mask); 2476 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2477 2478 ret = map->bus->read(map->bus_context, map->work_buf, 2479 map->format.reg_bytes + map->format.pad_bytes, 2480 val, val_len); 2481 2482 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2483 2484 return ret; 2485 } 2486 2487 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2488 unsigned int *val) 2489 { 2490 struct regmap *map = context; 2491 2492 return map->bus->reg_read(map->bus_context, reg, val); 2493 } 2494 2495 static int _regmap_bus_read(void *context, unsigned int reg, 2496 unsigned int *val) 2497 { 2498 int ret; 2499 struct regmap *map = context; 2500 void *work_val = map->work_buf + map->format.reg_bytes + 2501 map->format.pad_bytes; 2502 2503 if (!map->format.parse_val) 2504 return -EINVAL; 2505 2506 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); 2507 if (ret == 0) 2508 *val = map->format.parse_val(work_val); 2509 2510 return ret; 2511 } 2512 2513 static int _regmap_read(struct regmap *map, unsigned int reg, 2514 unsigned int *val) 2515 { 2516 int ret; 2517 void *context = _regmap_map_get_context(map); 2518 2519 if (!map->cache_bypass) { 2520 ret = regcache_read(map, reg, val); 2521 if (ret == 0) 2522 return 0; 2523 } 2524 2525 if (map->cache_only) 2526 return -EBUSY; 2527 2528 if (!regmap_readable(map, reg)) 2529 return -EIO; 2530 2531 ret = map->reg_read(context, reg, val); 2532 if (ret == 0) { 2533 if (regmap_should_log(map)) 2534 dev_info(map->dev, "%x => %x\n", reg, *val); 2535 2536 trace_regmap_reg_read(map, reg, *val); 2537 2538 if (!map->cache_bypass) 2539 regcache_write(map, reg, *val); 2540 } 2541 2542 return ret; 2543 } 2544 2545 /** 2546 * regmap_read() - Read a value from a single register 2547 * 2548 * @map: Register map to read from 2549 * @reg: Register to be read from 2550 * @val: Pointer to store read value 2551 * 2552 * A value of zero will be returned on success, a negative errno will 2553 * be returned in error cases. 2554 */ 2555 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2556 { 2557 int ret; 2558 2559 if (!IS_ALIGNED(reg, map->reg_stride)) 2560 return -EINVAL; 2561 2562 map->lock(map->lock_arg); 2563 2564 ret = _regmap_read(map, reg, val); 2565 2566 map->unlock(map->lock_arg); 2567 2568 return ret; 2569 } 2570 EXPORT_SYMBOL_GPL(regmap_read); 2571 2572 /** 2573 * regmap_raw_read() - Read raw data from the device 2574 * 2575 * @map: Register map to read from 2576 * @reg: First register to be read from 2577 * @val: Pointer to store read value 2578 * @val_len: Size of data to read 2579 * 2580 * A value of zero will be returned on success, a negative errno will 2581 * be returned in error cases. 2582 */ 2583 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2584 size_t val_len) 2585 { 2586 size_t val_bytes = map->format.val_bytes; 2587 size_t val_count = val_len / val_bytes; 2588 unsigned int v; 2589 int ret, i; 2590 2591 if (!map->bus) 2592 return -EINVAL; 2593 if (val_len % map->format.val_bytes) 2594 return -EINVAL; 2595 if (!IS_ALIGNED(reg, map->reg_stride)) 2596 return -EINVAL; 2597 if (val_count == 0) 2598 return -EINVAL; 2599 2600 map->lock(map->lock_arg); 2601 2602 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2603 map->cache_type == REGCACHE_NONE) { 2604 size_t chunk_count, chunk_bytes; 2605 size_t chunk_regs = val_count; 2606 2607 if (!map->bus->read) { 2608 ret = -ENOTSUPP; 2609 goto out; 2610 } 2611 2612 if (map->use_single_read) 2613 chunk_regs = 1; 2614 else if (map->max_raw_read && val_len > map->max_raw_read) 2615 chunk_regs = map->max_raw_read / val_bytes; 2616 2617 chunk_count = val_count / chunk_regs; 2618 chunk_bytes = chunk_regs * val_bytes; 2619 2620 /* Read bytes that fit into whole chunks */ 2621 for (i = 0; i < chunk_count; i++) { 2622 ret = _regmap_raw_read(map, reg, val, chunk_bytes); 2623 if (ret != 0) 2624 goto out; 2625 2626 reg += regmap_get_offset(map, chunk_regs); 2627 val += chunk_bytes; 2628 val_len -= chunk_bytes; 2629 } 2630 2631 /* Read remaining bytes */ 2632 if (val_len) { 2633 ret = _regmap_raw_read(map, reg, val, val_len); 2634 if (ret != 0) 2635 goto out; 2636 } 2637 } else { 2638 /* Otherwise go word by word for the cache; should be low 2639 * cost as we expect to hit the cache. 2640 */ 2641 for (i = 0; i < val_count; i++) { 2642 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2643 &v); 2644 if (ret != 0) 2645 goto out; 2646 2647 map->format.format_val(val + (i * val_bytes), v, 0); 2648 } 2649 } 2650 2651 out: 2652 map->unlock(map->lock_arg); 2653 2654 return ret; 2655 } 2656 EXPORT_SYMBOL_GPL(regmap_raw_read); 2657 2658 /** 2659 * regmap_noinc_read(): Read data from a register without incrementing the 2660 * register number 2661 * 2662 * @map: Register map to read from 2663 * @reg: Register to read from 2664 * @val: Pointer to data buffer 2665 * @val_len: Length of output buffer in bytes. 2666 * 2667 * The regmap API usually assumes that bulk bus read operations will read a 2668 * range of registers. Some devices have certain registers for which a read 2669 * operation read will read from an internal FIFO. 2670 * 2671 * The target register must be volatile but registers after it can be 2672 * completely unrelated cacheable registers. 2673 * 2674 * This will attempt multiple reads as required to read val_len bytes. 2675 * 2676 * A value of zero will be returned on success, a negative errno will be 2677 * returned in error cases. 2678 */ 2679 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2680 void *val, size_t val_len) 2681 { 2682 size_t read_len; 2683 int ret; 2684 2685 if (!map->bus) 2686 return -EINVAL; 2687 if (!map->bus->read) 2688 return -ENOTSUPP; 2689 if (val_len % map->format.val_bytes) 2690 return -EINVAL; 2691 if (!IS_ALIGNED(reg, map->reg_stride)) 2692 return -EINVAL; 2693 if (val_len == 0) 2694 return -EINVAL; 2695 2696 map->lock(map->lock_arg); 2697 2698 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2699 ret = -EINVAL; 2700 goto out_unlock; 2701 } 2702 2703 while (val_len) { 2704 if (map->max_raw_read && map->max_raw_read < val_len) 2705 read_len = map->max_raw_read; 2706 else 2707 read_len = val_len; 2708 ret = _regmap_raw_read(map, reg, val, read_len); 2709 if (ret) 2710 goto out_unlock; 2711 val = ((u8 *)val) + read_len; 2712 val_len -= read_len; 2713 } 2714 2715 out_unlock: 2716 map->unlock(map->lock_arg); 2717 return ret; 2718 } 2719 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2720 2721 /** 2722 * regmap_field_read(): Read a value to a single register field 2723 * 2724 * @field: Register field to read from 2725 * @val: Pointer to store read value 2726 * 2727 * A value of zero will be returned on success, a negative errno will 2728 * be returned in error cases. 2729 */ 2730 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2731 { 2732 int ret; 2733 unsigned int reg_val; 2734 ret = regmap_read(field->regmap, field->reg, ®_val); 2735 if (ret != 0) 2736 return ret; 2737 2738 reg_val &= field->mask; 2739 reg_val >>= field->shift; 2740 *val = reg_val; 2741 2742 return ret; 2743 } 2744 EXPORT_SYMBOL_GPL(regmap_field_read); 2745 2746 /** 2747 * regmap_fields_read() - Read a value to a single register field with port ID 2748 * 2749 * @field: Register field to read from 2750 * @id: port ID 2751 * @val: Pointer to store read value 2752 * 2753 * A value of zero will be returned on success, a negative errno will 2754 * be returned in error cases. 2755 */ 2756 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2757 unsigned int *val) 2758 { 2759 int ret; 2760 unsigned int reg_val; 2761 2762 if (id >= field->id_size) 2763 return -EINVAL; 2764 2765 ret = regmap_read(field->regmap, 2766 field->reg + (field->id_offset * id), 2767 ®_val); 2768 if (ret != 0) 2769 return ret; 2770 2771 reg_val &= field->mask; 2772 reg_val >>= field->shift; 2773 *val = reg_val; 2774 2775 return ret; 2776 } 2777 EXPORT_SYMBOL_GPL(regmap_fields_read); 2778 2779 /** 2780 * regmap_bulk_read() - Read multiple registers from the device 2781 * 2782 * @map: Register map to read from 2783 * @reg: First register to be read from 2784 * @val: Pointer to store read value, in native register size for device 2785 * @val_count: Number of registers to read 2786 * 2787 * A value of zero will be returned on success, a negative errno will 2788 * be returned in error cases. 2789 */ 2790 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2791 size_t val_count) 2792 { 2793 int ret, i; 2794 size_t val_bytes = map->format.val_bytes; 2795 bool vol = regmap_volatile_range(map, reg, val_count); 2796 2797 if (!IS_ALIGNED(reg, map->reg_stride)) 2798 return -EINVAL; 2799 if (val_count == 0) 2800 return -EINVAL; 2801 2802 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2803 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2804 if (ret != 0) 2805 return ret; 2806 2807 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2808 map->format.parse_inplace(val + i); 2809 } else { 2810 #ifdef CONFIG_64BIT 2811 u64 *u64 = val; 2812 #endif 2813 u32 *u32 = val; 2814 u16 *u16 = val; 2815 u8 *u8 = val; 2816 2817 map->lock(map->lock_arg); 2818 2819 for (i = 0; i < val_count; i++) { 2820 unsigned int ival; 2821 2822 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2823 &ival); 2824 if (ret != 0) 2825 goto out; 2826 2827 switch (map->format.val_bytes) { 2828 #ifdef CONFIG_64BIT 2829 case 8: 2830 u64[i] = ival; 2831 break; 2832 #endif 2833 case 4: 2834 u32[i] = ival; 2835 break; 2836 case 2: 2837 u16[i] = ival; 2838 break; 2839 case 1: 2840 u8[i] = ival; 2841 break; 2842 default: 2843 ret = -EINVAL; 2844 goto out; 2845 } 2846 } 2847 2848 out: 2849 map->unlock(map->lock_arg); 2850 } 2851 2852 return ret; 2853 } 2854 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2855 2856 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2857 unsigned int mask, unsigned int val, 2858 bool *change, bool force_write) 2859 { 2860 int ret; 2861 unsigned int tmp, orig; 2862 2863 if (change) 2864 *change = false; 2865 2866 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2867 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2868 if (ret == 0 && change) 2869 *change = true; 2870 } else { 2871 ret = _regmap_read(map, reg, &orig); 2872 if (ret != 0) 2873 return ret; 2874 2875 tmp = orig & ~mask; 2876 tmp |= val & mask; 2877 2878 if (force_write || (tmp != orig)) { 2879 ret = _regmap_write(map, reg, tmp); 2880 if (ret == 0 && change) 2881 *change = true; 2882 } 2883 } 2884 2885 return ret; 2886 } 2887 2888 /** 2889 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2890 * 2891 * @map: Register map to update 2892 * @reg: Register to update 2893 * @mask: Bitmask to change 2894 * @val: New value for bitmask 2895 * @change: Boolean indicating if a write was done 2896 * @async: Boolean indicating asynchronously 2897 * @force: Boolean indicating use force update 2898 * 2899 * Perform a read/modify/write cycle on a register map with change, async, force 2900 * options. 2901 * 2902 * If async is true: 2903 * 2904 * With most buses the read must be done synchronously so this is most useful 2905 * for devices with a cache which do not need to interact with the hardware to 2906 * determine the current register value. 2907 * 2908 * Returns zero for success, a negative number on error. 2909 */ 2910 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2911 unsigned int mask, unsigned int val, 2912 bool *change, bool async, bool force) 2913 { 2914 int ret; 2915 2916 map->lock(map->lock_arg); 2917 2918 map->async = async; 2919 2920 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2921 2922 map->async = false; 2923 2924 map->unlock(map->lock_arg); 2925 2926 return ret; 2927 } 2928 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2929 2930 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2931 { 2932 struct regmap *map = async->map; 2933 bool wake; 2934 2935 trace_regmap_async_io_complete(map); 2936 2937 spin_lock(&map->async_lock); 2938 list_move(&async->list, &map->async_free); 2939 wake = list_empty(&map->async_list); 2940 2941 if (ret != 0) 2942 map->async_ret = ret; 2943 2944 spin_unlock(&map->async_lock); 2945 2946 if (wake) 2947 wake_up(&map->async_waitq); 2948 } 2949 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2950 2951 static int regmap_async_is_done(struct regmap *map) 2952 { 2953 unsigned long flags; 2954 int ret; 2955 2956 spin_lock_irqsave(&map->async_lock, flags); 2957 ret = list_empty(&map->async_list); 2958 spin_unlock_irqrestore(&map->async_lock, flags); 2959 2960 return ret; 2961 } 2962 2963 /** 2964 * regmap_async_complete - Ensure all asynchronous I/O has completed. 2965 * 2966 * @map: Map to operate on. 2967 * 2968 * Blocks until any pending asynchronous I/O has completed. Returns 2969 * an error code for any failed I/O operations. 2970 */ 2971 int regmap_async_complete(struct regmap *map) 2972 { 2973 unsigned long flags; 2974 int ret; 2975 2976 /* Nothing to do with no async support */ 2977 if (!map->bus || !map->bus->async_write) 2978 return 0; 2979 2980 trace_regmap_async_complete_start(map); 2981 2982 wait_event(map->async_waitq, regmap_async_is_done(map)); 2983 2984 spin_lock_irqsave(&map->async_lock, flags); 2985 ret = map->async_ret; 2986 map->async_ret = 0; 2987 spin_unlock_irqrestore(&map->async_lock, flags); 2988 2989 trace_regmap_async_complete_done(map); 2990 2991 return ret; 2992 } 2993 EXPORT_SYMBOL_GPL(regmap_async_complete); 2994 2995 /** 2996 * regmap_register_patch - Register and apply register updates to be applied 2997 * on device initialistion 2998 * 2999 * @map: Register map to apply updates to. 3000 * @regs: Values to update. 3001 * @num_regs: Number of entries in regs. 3002 * 3003 * Register a set of register updates to be applied to the device 3004 * whenever the device registers are synchronised with the cache and 3005 * apply them immediately. Typically this is used to apply 3006 * corrections to be applied to the device defaults on startup, such 3007 * as the updates some vendors provide to undocumented registers. 3008 * 3009 * The caller must ensure that this function cannot be called 3010 * concurrently with either itself or regcache_sync(). 3011 */ 3012 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3013 int num_regs) 3014 { 3015 struct reg_sequence *p; 3016 int ret; 3017 bool bypass; 3018 3019 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3020 num_regs)) 3021 return 0; 3022 3023 p = krealloc(map->patch, 3024 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3025 GFP_KERNEL); 3026 if (p) { 3027 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3028 map->patch = p; 3029 map->patch_regs += num_regs; 3030 } else { 3031 return -ENOMEM; 3032 } 3033 3034 map->lock(map->lock_arg); 3035 3036 bypass = map->cache_bypass; 3037 3038 map->cache_bypass = true; 3039 map->async = true; 3040 3041 ret = _regmap_multi_reg_write(map, regs, num_regs); 3042 3043 map->async = false; 3044 map->cache_bypass = bypass; 3045 3046 map->unlock(map->lock_arg); 3047 3048 regmap_async_complete(map); 3049 3050 return ret; 3051 } 3052 EXPORT_SYMBOL_GPL(regmap_register_patch); 3053 3054 /** 3055 * regmap_get_val_bytes() - Report the size of a register value 3056 * 3057 * @map: Register map to operate on. 3058 * 3059 * Report the size of a register value, mainly intended to for use by 3060 * generic infrastructure built on top of regmap. 3061 */ 3062 int regmap_get_val_bytes(struct regmap *map) 3063 { 3064 if (map->format.format_write) 3065 return -EINVAL; 3066 3067 return map->format.val_bytes; 3068 } 3069 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3070 3071 /** 3072 * regmap_get_max_register() - Report the max register value 3073 * 3074 * @map: Register map to operate on. 3075 * 3076 * Report the max register value, mainly intended to for use by 3077 * generic infrastructure built on top of regmap. 3078 */ 3079 int regmap_get_max_register(struct regmap *map) 3080 { 3081 return map->max_register ? map->max_register : -EINVAL; 3082 } 3083 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3084 3085 /** 3086 * regmap_get_reg_stride() - Report the register address stride 3087 * 3088 * @map: Register map to operate on. 3089 * 3090 * Report the register address stride, mainly intended to for use by 3091 * generic infrastructure built on top of regmap. 3092 */ 3093 int regmap_get_reg_stride(struct regmap *map) 3094 { 3095 return map->reg_stride; 3096 } 3097 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3098 3099 int regmap_parse_val(struct regmap *map, const void *buf, 3100 unsigned int *val) 3101 { 3102 if (!map->format.parse_val) 3103 return -EINVAL; 3104 3105 *val = map->format.parse_val(buf); 3106 3107 return 0; 3108 } 3109 EXPORT_SYMBOL_GPL(regmap_parse_val); 3110 3111 static int __init regmap_initcall(void) 3112 { 3113 regmap_debugfs_initcall(); 3114 3115 return 0; 3116 } 3117 postcore_initcall(regmap_initcall); 3118