1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/of.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 21 #define CREATE_TRACE_POINTS 22 #include "trace.h" 23 24 #include "internal.h" 25 26 /* 27 * Sometimes for failures during very early init the trace 28 * infrastructure isn't available early enough to be used. For this 29 * sort of problem defining LOG_DEVICE will add printks for basic 30 * register I/O on a specific device. 31 */ 32 #undef LOG_DEVICE 33 34 #ifdef LOG_DEVICE 35 static inline bool regmap_should_log(struct regmap *map) 36 { 37 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 38 } 39 #else 40 static inline bool regmap_should_log(struct regmap *map) { return false; } 41 #endif 42 43 44 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 45 unsigned int mask, unsigned int val, 46 bool *change, bool force_write); 47 48 static int _regmap_bus_reg_read(void *context, unsigned int reg, 49 unsigned int *val); 50 static int _regmap_bus_read(void *context, unsigned int reg, 51 unsigned int *val); 52 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 53 unsigned int val); 54 static int _regmap_bus_reg_write(void *context, unsigned int reg, 55 unsigned int val); 56 static int _regmap_bus_raw_write(void *context, unsigned int reg, 57 unsigned int val); 58 59 bool regmap_reg_in_ranges(unsigned int reg, 60 const struct regmap_range *ranges, 61 unsigned int nranges) 62 { 63 const struct regmap_range *r; 64 int i; 65 66 for (i = 0, r = ranges; i < nranges; i++, r++) 67 if (regmap_reg_in_range(reg, r)) 68 return true; 69 return false; 70 } 71 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 72 73 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 74 const struct regmap_access_table *table) 75 { 76 /* Check "no ranges" first */ 77 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 78 return false; 79 80 /* In case zero "yes ranges" are supplied, any reg is OK */ 81 if (!table->n_yes_ranges) 82 return true; 83 84 return regmap_reg_in_ranges(reg, table->yes_ranges, 85 table->n_yes_ranges); 86 } 87 EXPORT_SYMBOL_GPL(regmap_check_range_table); 88 89 bool regmap_writeable(struct regmap *map, unsigned int reg) 90 { 91 if (map->max_register && reg > map->max_register) 92 return false; 93 94 if (map->writeable_reg) 95 return map->writeable_reg(map->dev, reg); 96 97 if (map->wr_table) 98 return regmap_check_range_table(map, reg, map->wr_table); 99 100 return true; 101 } 102 103 bool regmap_cached(struct regmap *map, unsigned int reg) 104 { 105 int ret; 106 unsigned int val; 107 108 if (map->cache_type == REGCACHE_NONE) 109 return false; 110 111 if (!map->cache_ops) 112 return false; 113 114 if (map->max_register && reg > map->max_register) 115 return false; 116 117 map->lock(map->lock_arg); 118 ret = regcache_read(map, reg, &val); 119 map->unlock(map->lock_arg); 120 if (ret) 121 return false; 122 123 return true; 124 } 125 126 bool regmap_readable(struct regmap *map, unsigned int reg) 127 { 128 if (!map->reg_read) 129 return false; 130 131 if (map->max_register && reg > map->max_register) 132 return false; 133 134 if (map->format.format_write) 135 return false; 136 137 if (map->readable_reg) 138 return map->readable_reg(map->dev, reg); 139 140 if (map->rd_table) 141 return regmap_check_range_table(map, reg, map->rd_table); 142 143 return true; 144 } 145 146 bool regmap_volatile(struct regmap *map, unsigned int reg) 147 { 148 if (!map->format.format_write && !regmap_readable(map, reg)) 149 return false; 150 151 if (map->volatile_reg) 152 return map->volatile_reg(map->dev, reg); 153 154 if (map->volatile_table) 155 return regmap_check_range_table(map, reg, map->volatile_table); 156 157 if (map->cache_ops) 158 return false; 159 else 160 return true; 161 } 162 163 bool regmap_precious(struct regmap *map, unsigned int reg) 164 { 165 if (!regmap_readable(map, reg)) 166 return false; 167 168 if (map->precious_reg) 169 return map->precious_reg(map->dev, reg); 170 171 if (map->precious_table) 172 return regmap_check_range_table(map, reg, map->precious_table); 173 174 return false; 175 } 176 177 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 178 { 179 if (map->writeable_noinc_reg) 180 return map->writeable_noinc_reg(map->dev, reg); 181 182 if (map->wr_noinc_table) 183 return regmap_check_range_table(map, reg, map->wr_noinc_table); 184 185 return true; 186 } 187 188 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 189 { 190 if (map->readable_noinc_reg) 191 return map->readable_noinc_reg(map->dev, reg); 192 193 if (map->rd_noinc_table) 194 return regmap_check_range_table(map, reg, map->rd_noinc_table); 195 196 return true; 197 } 198 199 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 200 size_t num) 201 { 202 unsigned int i; 203 204 for (i = 0; i < num; i++) 205 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 206 return false; 207 208 return true; 209 } 210 211 static void regmap_format_2_6_write(struct regmap *map, 212 unsigned int reg, unsigned int val) 213 { 214 u8 *out = map->work_buf; 215 216 *out = (reg << 6) | val; 217 } 218 219 static void regmap_format_4_12_write(struct regmap *map, 220 unsigned int reg, unsigned int val) 221 { 222 __be16 *out = map->work_buf; 223 *out = cpu_to_be16((reg << 12) | val); 224 } 225 226 static void regmap_format_7_9_write(struct regmap *map, 227 unsigned int reg, unsigned int val) 228 { 229 __be16 *out = map->work_buf; 230 *out = cpu_to_be16((reg << 9) | val); 231 } 232 233 static void regmap_format_10_14_write(struct regmap *map, 234 unsigned int reg, unsigned int val) 235 { 236 u8 *out = map->work_buf; 237 238 out[2] = val; 239 out[1] = (val >> 8) | (reg << 6); 240 out[0] = reg >> 2; 241 } 242 243 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 244 { 245 u8 *b = buf; 246 247 b[0] = val << shift; 248 } 249 250 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 251 { 252 __be16 *b = buf; 253 254 b[0] = cpu_to_be16(val << shift); 255 } 256 257 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 258 { 259 __le16 *b = buf; 260 261 b[0] = cpu_to_le16(val << shift); 262 } 263 264 static void regmap_format_16_native(void *buf, unsigned int val, 265 unsigned int shift) 266 { 267 *(u16 *)buf = val << shift; 268 } 269 270 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 271 { 272 u8 *b = buf; 273 274 val <<= shift; 275 276 b[0] = val >> 16; 277 b[1] = val >> 8; 278 b[2] = val; 279 } 280 281 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 282 { 283 __be32 *b = buf; 284 285 b[0] = cpu_to_be32(val << shift); 286 } 287 288 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 289 { 290 __le32 *b = buf; 291 292 b[0] = cpu_to_le32(val << shift); 293 } 294 295 static void regmap_format_32_native(void *buf, unsigned int val, 296 unsigned int shift) 297 { 298 *(u32 *)buf = val << shift; 299 } 300 301 #ifdef CONFIG_64BIT 302 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 303 { 304 __be64 *b = buf; 305 306 b[0] = cpu_to_be64((u64)val << shift); 307 } 308 309 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 310 { 311 __le64 *b = buf; 312 313 b[0] = cpu_to_le64((u64)val << shift); 314 } 315 316 static void regmap_format_64_native(void *buf, unsigned int val, 317 unsigned int shift) 318 { 319 *(u64 *)buf = (u64)val << shift; 320 } 321 #endif 322 323 static void regmap_parse_inplace_noop(void *buf) 324 { 325 } 326 327 static unsigned int regmap_parse_8(const void *buf) 328 { 329 const u8 *b = buf; 330 331 return b[0]; 332 } 333 334 static unsigned int regmap_parse_16_be(const void *buf) 335 { 336 const __be16 *b = buf; 337 338 return be16_to_cpu(b[0]); 339 } 340 341 static unsigned int regmap_parse_16_le(const void *buf) 342 { 343 const __le16 *b = buf; 344 345 return le16_to_cpu(b[0]); 346 } 347 348 static void regmap_parse_16_be_inplace(void *buf) 349 { 350 __be16 *b = buf; 351 352 b[0] = be16_to_cpu(b[0]); 353 } 354 355 static void regmap_parse_16_le_inplace(void *buf) 356 { 357 __le16 *b = buf; 358 359 b[0] = le16_to_cpu(b[0]); 360 } 361 362 static unsigned int regmap_parse_16_native(const void *buf) 363 { 364 return *(u16 *)buf; 365 } 366 367 static unsigned int regmap_parse_24(const void *buf) 368 { 369 const u8 *b = buf; 370 unsigned int ret = b[2]; 371 ret |= ((unsigned int)b[1]) << 8; 372 ret |= ((unsigned int)b[0]) << 16; 373 374 return ret; 375 } 376 377 static unsigned int regmap_parse_32_be(const void *buf) 378 { 379 const __be32 *b = buf; 380 381 return be32_to_cpu(b[0]); 382 } 383 384 static unsigned int regmap_parse_32_le(const void *buf) 385 { 386 const __le32 *b = buf; 387 388 return le32_to_cpu(b[0]); 389 } 390 391 static void regmap_parse_32_be_inplace(void *buf) 392 { 393 __be32 *b = buf; 394 395 b[0] = be32_to_cpu(b[0]); 396 } 397 398 static void regmap_parse_32_le_inplace(void *buf) 399 { 400 __le32 *b = buf; 401 402 b[0] = le32_to_cpu(b[0]); 403 } 404 405 static unsigned int regmap_parse_32_native(const void *buf) 406 { 407 return *(u32 *)buf; 408 } 409 410 #ifdef CONFIG_64BIT 411 static unsigned int regmap_parse_64_be(const void *buf) 412 { 413 const __be64 *b = buf; 414 415 return be64_to_cpu(b[0]); 416 } 417 418 static unsigned int regmap_parse_64_le(const void *buf) 419 { 420 const __le64 *b = buf; 421 422 return le64_to_cpu(b[0]); 423 } 424 425 static void regmap_parse_64_be_inplace(void *buf) 426 { 427 __be64 *b = buf; 428 429 b[0] = be64_to_cpu(b[0]); 430 } 431 432 static void regmap_parse_64_le_inplace(void *buf) 433 { 434 __le64 *b = buf; 435 436 b[0] = le64_to_cpu(b[0]); 437 } 438 439 static unsigned int regmap_parse_64_native(const void *buf) 440 { 441 return *(u64 *)buf; 442 } 443 #endif 444 445 static void regmap_lock_hwlock(void *__map) 446 { 447 struct regmap *map = __map; 448 449 hwspin_lock_timeout(map->hwlock, UINT_MAX); 450 } 451 452 static void regmap_lock_hwlock_irq(void *__map) 453 { 454 struct regmap *map = __map; 455 456 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 457 } 458 459 static void regmap_lock_hwlock_irqsave(void *__map) 460 { 461 struct regmap *map = __map; 462 463 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 464 &map->spinlock_flags); 465 } 466 467 static void regmap_unlock_hwlock(void *__map) 468 { 469 struct regmap *map = __map; 470 471 hwspin_unlock(map->hwlock); 472 } 473 474 static void regmap_unlock_hwlock_irq(void *__map) 475 { 476 struct regmap *map = __map; 477 478 hwspin_unlock_irq(map->hwlock); 479 } 480 481 static void regmap_unlock_hwlock_irqrestore(void *__map) 482 { 483 struct regmap *map = __map; 484 485 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 486 } 487 488 static void regmap_lock_unlock_none(void *__map) 489 { 490 491 } 492 493 static void regmap_lock_mutex(void *__map) 494 { 495 struct regmap *map = __map; 496 mutex_lock(&map->mutex); 497 } 498 499 static void regmap_unlock_mutex(void *__map) 500 { 501 struct regmap *map = __map; 502 mutex_unlock(&map->mutex); 503 } 504 505 static void regmap_lock_spinlock(void *__map) 506 __acquires(&map->spinlock) 507 { 508 struct regmap *map = __map; 509 unsigned long flags; 510 511 spin_lock_irqsave(&map->spinlock, flags); 512 map->spinlock_flags = flags; 513 } 514 515 static void regmap_unlock_spinlock(void *__map) 516 __releases(&map->spinlock) 517 { 518 struct regmap *map = __map; 519 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 520 } 521 522 static void dev_get_regmap_release(struct device *dev, void *res) 523 { 524 /* 525 * We don't actually have anything to do here; the goal here 526 * is not to manage the regmap but to provide a simple way to 527 * get the regmap back given a struct device. 528 */ 529 } 530 531 static bool _regmap_range_add(struct regmap *map, 532 struct regmap_range_node *data) 533 { 534 struct rb_root *root = &map->range_tree; 535 struct rb_node **new = &(root->rb_node), *parent = NULL; 536 537 while (*new) { 538 struct regmap_range_node *this = 539 rb_entry(*new, struct regmap_range_node, node); 540 541 parent = *new; 542 if (data->range_max < this->range_min) 543 new = &((*new)->rb_left); 544 else if (data->range_min > this->range_max) 545 new = &((*new)->rb_right); 546 else 547 return false; 548 } 549 550 rb_link_node(&data->node, parent, new); 551 rb_insert_color(&data->node, root); 552 553 return true; 554 } 555 556 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 557 unsigned int reg) 558 { 559 struct rb_node *node = map->range_tree.rb_node; 560 561 while (node) { 562 struct regmap_range_node *this = 563 rb_entry(node, struct regmap_range_node, node); 564 565 if (reg < this->range_min) 566 node = node->rb_left; 567 else if (reg > this->range_max) 568 node = node->rb_right; 569 else 570 return this; 571 } 572 573 return NULL; 574 } 575 576 static void regmap_range_exit(struct regmap *map) 577 { 578 struct rb_node *next; 579 struct regmap_range_node *range_node; 580 581 next = rb_first(&map->range_tree); 582 while (next) { 583 range_node = rb_entry(next, struct regmap_range_node, node); 584 next = rb_next(&range_node->node); 585 rb_erase(&range_node->node, &map->range_tree); 586 kfree(range_node); 587 } 588 589 kfree(map->selector_work_buf); 590 } 591 592 int regmap_attach_dev(struct device *dev, struct regmap *map, 593 const struct regmap_config *config) 594 { 595 struct regmap **m; 596 597 map->dev = dev; 598 599 regmap_debugfs_init(map, config->name); 600 601 /* Add a devres resource for dev_get_regmap() */ 602 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 603 if (!m) { 604 regmap_debugfs_exit(map); 605 return -ENOMEM; 606 } 607 *m = map; 608 devres_add(dev, m); 609 610 return 0; 611 } 612 EXPORT_SYMBOL_GPL(regmap_attach_dev); 613 614 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 615 const struct regmap_config *config) 616 { 617 enum regmap_endian endian; 618 619 /* Retrieve the endianness specification from the regmap config */ 620 endian = config->reg_format_endian; 621 622 /* If the regmap config specified a non-default value, use that */ 623 if (endian != REGMAP_ENDIAN_DEFAULT) 624 return endian; 625 626 /* Retrieve the endianness specification from the bus config */ 627 if (bus && bus->reg_format_endian_default) 628 endian = bus->reg_format_endian_default; 629 630 /* If the bus specified a non-default value, use that */ 631 if (endian != REGMAP_ENDIAN_DEFAULT) 632 return endian; 633 634 /* Use this if no other value was found */ 635 return REGMAP_ENDIAN_BIG; 636 } 637 638 enum regmap_endian regmap_get_val_endian(struct device *dev, 639 const struct regmap_bus *bus, 640 const struct regmap_config *config) 641 { 642 struct device_node *np; 643 enum regmap_endian endian; 644 645 /* Retrieve the endianness specification from the regmap config */ 646 endian = config->val_format_endian; 647 648 /* If the regmap config specified a non-default value, use that */ 649 if (endian != REGMAP_ENDIAN_DEFAULT) 650 return endian; 651 652 /* If the dev and dev->of_node exist try to get endianness from DT */ 653 if (dev && dev->of_node) { 654 np = dev->of_node; 655 656 /* Parse the device's DT node for an endianness specification */ 657 if (of_property_read_bool(np, "big-endian")) 658 endian = REGMAP_ENDIAN_BIG; 659 else if (of_property_read_bool(np, "little-endian")) 660 endian = REGMAP_ENDIAN_LITTLE; 661 else if (of_property_read_bool(np, "native-endian")) 662 endian = REGMAP_ENDIAN_NATIVE; 663 664 /* If the endianness was specified in DT, use that */ 665 if (endian != REGMAP_ENDIAN_DEFAULT) 666 return endian; 667 } 668 669 /* Retrieve the endianness specification from the bus config */ 670 if (bus && bus->val_format_endian_default) 671 endian = bus->val_format_endian_default; 672 673 /* If the bus specified a non-default value, use that */ 674 if (endian != REGMAP_ENDIAN_DEFAULT) 675 return endian; 676 677 /* Use this if no other value was found */ 678 return REGMAP_ENDIAN_BIG; 679 } 680 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 681 682 struct regmap *__regmap_init(struct device *dev, 683 const struct regmap_bus *bus, 684 void *bus_context, 685 const struct regmap_config *config, 686 struct lock_class_key *lock_key, 687 const char *lock_name) 688 { 689 struct regmap *map; 690 int ret = -EINVAL; 691 enum regmap_endian reg_endian, val_endian; 692 int i, j; 693 694 if (!config) 695 goto err; 696 697 map = kzalloc(sizeof(*map), GFP_KERNEL); 698 if (map == NULL) { 699 ret = -ENOMEM; 700 goto err; 701 } 702 703 if (config->name) { 704 map->name = kstrdup_const(config->name, GFP_KERNEL); 705 if (!map->name) { 706 ret = -ENOMEM; 707 goto err_map; 708 } 709 } 710 711 if (config->disable_locking) { 712 map->lock = map->unlock = regmap_lock_unlock_none; 713 regmap_debugfs_disable(map); 714 } else if (config->lock && config->unlock) { 715 map->lock = config->lock; 716 map->unlock = config->unlock; 717 map->lock_arg = config->lock_arg; 718 } else if (config->use_hwlock) { 719 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 720 if (!map->hwlock) { 721 ret = -ENXIO; 722 goto err_name; 723 } 724 725 switch (config->hwlock_mode) { 726 case HWLOCK_IRQSTATE: 727 map->lock = regmap_lock_hwlock_irqsave; 728 map->unlock = regmap_unlock_hwlock_irqrestore; 729 break; 730 case HWLOCK_IRQ: 731 map->lock = regmap_lock_hwlock_irq; 732 map->unlock = regmap_unlock_hwlock_irq; 733 break; 734 default: 735 map->lock = regmap_lock_hwlock; 736 map->unlock = regmap_unlock_hwlock; 737 break; 738 } 739 740 map->lock_arg = map; 741 } else { 742 if ((bus && bus->fast_io) || 743 config->fast_io) { 744 spin_lock_init(&map->spinlock); 745 map->lock = regmap_lock_spinlock; 746 map->unlock = regmap_unlock_spinlock; 747 lockdep_set_class_and_name(&map->spinlock, 748 lock_key, lock_name); 749 } else { 750 mutex_init(&map->mutex); 751 map->lock = regmap_lock_mutex; 752 map->unlock = regmap_unlock_mutex; 753 lockdep_set_class_and_name(&map->mutex, 754 lock_key, lock_name); 755 } 756 map->lock_arg = map; 757 } 758 759 /* 760 * When we write in fast-paths with regmap_bulk_write() don't allocate 761 * scratch buffers with sleeping allocations. 762 */ 763 if ((bus && bus->fast_io) || config->fast_io) 764 map->alloc_flags = GFP_ATOMIC; 765 else 766 map->alloc_flags = GFP_KERNEL; 767 768 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 769 map->format.pad_bytes = config->pad_bits / 8; 770 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 771 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 772 config->val_bits + config->pad_bits, 8); 773 map->reg_shift = config->pad_bits % 8; 774 if (config->reg_stride) 775 map->reg_stride = config->reg_stride; 776 else 777 map->reg_stride = 1; 778 if (is_power_of_2(map->reg_stride)) 779 map->reg_stride_order = ilog2(map->reg_stride); 780 else 781 map->reg_stride_order = -1; 782 map->use_single_read = config->use_single_read || !bus || !bus->read; 783 map->use_single_write = config->use_single_write || !bus || !bus->write; 784 map->can_multi_write = config->can_multi_write && bus && bus->write; 785 if (bus) { 786 map->max_raw_read = bus->max_raw_read; 787 map->max_raw_write = bus->max_raw_write; 788 } 789 map->dev = dev; 790 map->bus = bus; 791 map->bus_context = bus_context; 792 map->max_register = config->max_register; 793 map->wr_table = config->wr_table; 794 map->rd_table = config->rd_table; 795 map->volatile_table = config->volatile_table; 796 map->precious_table = config->precious_table; 797 map->wr_noinc_table = config->wr_noinc_table; 798 map->rd_noinc_table = config->rd_noinc_table; 799 map->writeable_reg = config->writeable_reg; 800 map->readable_reg = config->readable_reg; 801 map->volatile_reg = config->volatile_reg; 802 map->precious_reg = config->precious_reg; 803 map->writeable_noinc_reg = config->writeable_noinc_reg; 804 map->readable_noinc_reg = config->readable_noinc_reg; 805 map->cache_type = config->cache_type; 806 807 spin_lock_init(&map->async_lock); 808 INIT_LIST_HEAD(&map->async_list); 809 INIT_LIST_HEAD(&map->async_free); 810 init_waitqueue_head(&map->async_waitq); 811 812 if (config->read_flag_mask || 813 config->write_flag_mask || 814 config->zero_flag_mask) { 815 map->read_flag_mask = config->read_flag_mask; 816 map->write_flag_mask = config->write_flag_mask; 817 } else if (bus) { 818 map->read_flag_mask = bus->read_flag_mask; 819 } 820 821 if (!bus) { 822 map->reg_read = config->reg_read; 823 map->reg_write = config->reg_write; 824 825 map->defer_caching = false; 826 goto skip_format_initialization; 827 } else if (!bus->read || !bus->write) { 828 map->reg_read = _regmap_bus_reg_read; 829 map->reg_write = _regmap_bus_reg_write; 830 map->reg_update_bits = bus->reg_update_bits; 831 832 map->defer_caching = false; 833 goto skip_format_initialization; 834 } else { 835 map->reg_read = _regmap_bus_read; 836 map->reg_update_bits = bus->reg_update_bits; 837 } 838 839 reg_endian = regmap_get_reg_endian(bus, config); 840 val_endian = regmap_get_val_endian(dev, bus, config); 841 842 switch (config->reg_bits + map->reg_shift) { 843 case 2: 844 switch (config->val_bits) { 845 case 6: 846 map->format.format_write = regmap_format_2_6_write; 847 break; 848 default: 849 goto err_hwlock; 850 } 851 break; 852 853 case 4: 854 switch (config->val_bits) { 855 case 12: 856 map->format.format_write = regmap_format_4_12_write; 857 break; 858 default: 859 goto err_hwlock; 860 } 861 break; 862 863 case 7: 864 switch (config->val_bits) { 865 case 9: 866 map->format.format_write = regmap_format_7_9_write; 867 break; 868 default: 869 goto err_hwlock; 870 } 871 break; 872 873 case 10: 874 switch (config->val_bits) { 875 case 14: 876 map->format.format_write = regmap_format_10_14_write; 877 break; 878 default: 879 goto err_hwlock; 880 } 881 break; 882 883 case 8: 884 map->format.format_reg = regmap_format_8; 885 break; 886 887 case 16: 888 switch (reg_endian) { 889 case REGMAP_ENDIAN_BIG: 890 map->format.format_reg = regmap_format_16_be; 891 break; 892 case REGMAP_ENDIAN_LITTLE: 893 map->format.format_reg = regmap_format_16_le; 894 break; 895 case REGMAP_ENDIAN_NATIVE: 896 map->format.format_reg = regmap_format_16_native; 897 break; 898 default: 899 goto err_hwlock; 900 } 901 break; 902 903 case 24: 904 if (reg_endian != REGMAP_ENDIAN_BIG) 905 goto err_hwlock; 906 map->format.format_reg = regmap_format_24; 907 break; 908 909 case 32: 910 switch (reg_endian) { 911 case REGMAP_ENDIAN_BIG: 912 map->format.format_reg = regmap_format_32_be; 913 break; 914 case REGMAP_ENDIAN_LITTLE: 915 map->format.format_reg = regmap_format_32_le; 916 break; 917 case REGMAP_ENDIAN_NATIVE: 918 map->format.format_reg = regmap_format_32_native; 919 break; 920 default: 921 goto err_hwlock; 922 } 923 break; 924 925 #ifdef CONFIG_64BIT 926 case 64: 927 switch (reg_endian) { 928 case REGMAP_ENDIAN_BIG: 929 map->format.format_reg = regmap_format_64_be; 930 break; 931 case REGMAP_ENDIAN_LITTLE: 932 map->format.format_reg = regmap_format_64_le; 933 break; 934 case REGMAP_ENDIAN_NATIVE: 935 map->format.format_reg = regmap_format_64_native; 936 break; 937 default: 938 goto err_hwlock; 939 } 940 break; 941 #endif 942 943 default: 944 goto err_hwlock; 945 } 946 947 if (val_endian == REGMAP_ENDIAN_NATIVE) 948 map->format.parse_inplace = regmap_parse_inplace_noop; 949 950 switch (config->val_bits) { 951 case 8: 952 map->format.format_val = regmap_format_8; 953 map->format.parse_val = regmap_parse_8; 954 map->format.parse_inplace = regmap_parse_inplace_noop; 955 break; 956 case 16: 957 switch (val_endian) { 958 case REGMAP_ENDIAN_BIG: 959 map->format.format_val = regmap_format_16_be; 960 map->format.parse_val = regmap_parse_16_be; 961 map->format.parse_inplace = regmap_parse_16_be_inplace; 962 break; 963 case REGMAP_ENDIAN_LITTLE: 964 map->format.format_val = regmap_format_16_le; 965 map->format.parse_val = regmap_parse_16_le; 966 map->format.parse_inplace = regmap_parse_16_le_inplace; 967 break; 968 case REGMAP_ENDIAN_NATIVE: 969 map->format.format_val = regmap_format_16_native; 970 map->format.parse_val = regmap_parse_16_native; 971 break; 972 default: 973 goto err_hwlock; 974 } 975 break; 976 case 24: 977 if (val_endian != REGMAP_ENDIAN_BIG) 978 goto err_hwlock; 979 map->format.format_val = regmap_format_24; 980 map->format.parse_val = regmap_parse_24; 981 break; 982 case 32: 983 switch (val_endian) { 984 case REGMAP_ENDIAN_BIG: 985 map->format.format_val = regmap_format_32_be; 986 map->format.parse_val = regmap_parse_32_be; 987 map->format.parse_inplace = regmap_parse_32_be_inplace; 988 break; 989 case REGMAP_ENDIAN_LITTLE: 990 map->format.format_val = regmap_format_32_le; 991 map->format.parse_val = regmap_parse_32_le; 992 map->format.parse_inplace = regmap_parse_32_le_inplace; 993 break; 994 case REGMAP_ENDIAN_NATIVE: 995 map->format.format_val = regmap_format_32_native; 996 map->format.parse_val = regmap_parse_32_native; 997 break; 998 default: 999 goto err_hwlock; 1000 } 1001 break; 1002 #ifdef CONFIG_64BIT 1003 case 64: 1004 switch (val_endian) { 1005 case REGMAP_ENDIAN_BIG: 1006 map->format.format_val = regmap_format_64_be; 1007 map->format.parse_val = regmap_parse_64_be; 1008 map->format.parse_inplace = regmap_parse_64_be_inplace; 1009 break; 1010 case REGMAP_ENDIAN_LITTLE: 1011 map->format.format_val = regmap_format_64_le; 1012 map->format.parse_val = regmap_parse_64_le; 1013 map->format.parse_inplace = regmap_parse_64_le_inplace; 1014 break; 1015 case REGMAP_ENDIAN_NATIVE: 1016 map->format.format_val = regmap_format_64_native; 1017 map->format.parse_val = regmap_parse_64_native; 1018 break; 1019 default: 1020 goto err_hwlock; 1021 } 1022 break; 1023 #endif 1024 } 1025 1026 if (map->format.format_write) { 1027 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1028 (val_endian != REGMAP_ENDIAN_BIG)) 1029 goto err_hwlock; 1030 map->use_single_write = true; 1031 } 1032 1033 if (!map->format.format_write && 1034 !(map->format.format_reg && map->format.format_val)) 1035 goto err_hwlock; 1036 1037 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1038 if (map->work_buf == NULL) { 1039 ret = -ENOMEM; 1040 goto err_hwlock; 1041 } 1042 1043 if (map->format.format_write) { 1044 map->defer_caching = false; 1045 map->reg_write = _regmap_bus_formatted_write; 1046 } else if (map->format.format_val) { 1047 map->defer_caching = true; 1048 map->reg_write = _regmap_bus_raw_write; 1049 } 1050 1051 skip_format_initialization: 1052 1053 map->range_tree = RB_ROOT; 1054 for (i = 0; i < config->num_ranges; i++) { 1055 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1056 struct regmap_range_node *new; 1057 1058 /* Sanity check */ 1059 if (range_cfg->range_max < range_cfg->range_min) { 1060 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1061 range_cfg->range_max, range_cfg->range_min); 1062 goto err_range; 1063 } 1064 1065 if (range_cfg->range_max > map->max_register) { 1066 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1067 range_cfg->range_max, map->max_register); 1068 goto err_range; 1069 } 1070 1071 if (range_cfg->selector_reg > map->max_register) { 1072 dev_err(map->dev, 1073 "Invalid range %d: selector out of map\n", i); 1074 goto err_range; 1075 } 1076 1077 if (range_cfg->window_len == 0) { 1078 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1079 i); 1080 goto err_range; 1081 } 1082 1083 /* Make sure, that this register range has no selector 1084 or data window within its boundary */ 1085 for (j = 0; j < config->num_ranges; j++) { 1086 unsigned sel_reg = config->ranges[j].selector_reg; 1087 unsigned win_min = config->ranges[j].window_start; 1088 unsigned win_max = win_min + 1089 config->ranges[j].window_len - 1; 1090 1091 /* Allow data window inside its own virtual range */ 1092 if (j == i) 1093 continue; 1094 1095 if (range_cfg->range_min <= sel_reg && 1096 sel_reg <= range_cfg->range_max) { 1097 dev_err(map->dev, 1098 "Range %d: selector for %d in window\n", 1099 i, j); 1100 goto err_range; 1101 } 1102 1103 if (!(win_max < range_cfg->range_min || 1104 win_min > range_cfg->range_max)) { 1105 dev_err(map->dev, 1106 "Range %d: window for %d in window\n", 1107 i, j); 1108 goto err_range; 1109 } 1110 } 1111 1112 new = kzalloc(sizeof(*new), GFP_KERNEL); 1113 if (new == NULL) { 1114 ret = -ENOMEM; 1115 goto err_range; 1116 } 1117 1118 new->map = map; 1119 new->name = range_cfg->name; 1120 new->range_min = range_cfg->range_min; 1121 new->range_max = range_cfg->range_max; 1122 new->selector_reg = range_cfg->selector_reg; 1123 new->selector_mask = range_cfg->selector_mask; 1124 new->selector_shift = range_cfg->selector_shift; 1125 new->window_start = range_cfg->window_start; 1126 new->window_len = range_cfg->window_len; 1127 1128 if (!_regmap_range_add(map, new)) { 1129 dev_err(map->dev, "Failed to add range %d\n", i); 1130 kfree(new); 1131 goto err_range; 1132 } 1133 1134 if (map->selector_work_buf == NULL) { 1135 map->selector_work_buf = 1136 kzalloc(map->format.buf_size, GFP_KERNEL); 1137 if (map->selector_work_buf == NULL) { 1138 ret = -ENOMEM; 1139 goto err_range; 1140 } 1141 } 1142 } 1143 1144 ret = regcache_init(map, config); 1145 if (ret != 0) 1146 goto err_range; 1147 1148 if (dev) { 1149 ret = regmap_attach_dev(dev, map, config); 1150 if (ret != 0) 1151 goto err_regcache; 1152 } else { 1153 regmap_debugfs_init(map, config->name); 1154 } 1155 1156 return map; 1157 1158 err_regcache: 1159 regcache_exit(map); 1160 err_range: 1161 regmap_range_exit(map); 1162 kfree(map->work_buf); 1163 err_hwlock: 1164 if (map->hwlock) 1165 hwspin_lock_free(map->hwlock); 1166 err_name: 1167 kfree_const(map->name); 1168 err_map: 1169 kfree(map); 1170 err: 1171 return ERR_PTR(ret); 1172 } 1173 EXPORT_SYMBOL_GPL(__regmap_init); 1174 1175 static void devm_regmap_release(struct device *dev, void *res) 1176 { 1177 regmap_exit(*(struct regmap **)res); 1178 } 1179 1180 struct regmap *__devm_regmap_init(struct device *dev, 1181 const struct regmap_bus *bus, 1182 void *bus_context, 1183 const struct regmap_config *config, 1184 struct lock_class_key *lock_key, 1185 const char *lock_name) 1186 { 1187 struct regmap **ptr, *regmap; 1188 1189 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1190 if (!ptr) 1191 return ERR_PTR(-ENOMEM); 1192 1193 regmap = __regmap_init(dev, bus, bus_context, config, 1194 lock_key, lock_name); 1195 if (!IS_ERR(regmap)) { 1196 *ptr = regmap; 1197 devres_add(dev, ptr); 1198 } else { 1199 devres_free(ptr); 1200 } 1201 1202 return regmap; 1203 } 1204 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1205 1206 static void regmap_field_init(struct regmap_field *rm_field, 1207 struct regmap *regmap, struct reg_field reg_field) 1208 { 1209 rm_field->regmap = regmap; 1210 rm_field->reg = reg_field.reg; 1211 rm_field->shift = reg_field.lsb; 1212 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1213 rm_field->id_size = reg_field.id_size; 1214 rm_field->id_offset = reg_field.id_offset; 1215 } 1216 1217 /** 1218 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1219 * 1220 * @dev: Device that will be interacted with 1221 * @regmap: regmap bank in which this register field is located. 1222 * @reg_field: Register field with in the bank. 1223 * 1224 * The return value will be an ERR_PTR() on error or a valid pointer 1225 * to a struct regmap_field. The regmap_field will be automatically freed 1226 * by the device management code. 1227 */ 1228 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1229 struct regmap *regmap, struct reg_field reg_field) 1230 { 1231 struct regmap_field *rm_field = devm_kzalloc(dev, 1232 sizeof(*rm_field), GFP_KERNEL); 1233 if (!rm_field) 1234 return ERR_PTR(-ENOMEM); 1235 1236 regmap_field_init(rm_field, regmap, reg_field); 1237 1238 return rm_field; 1239 1240 } 1241 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1242 1243 /** 1244 * devm_regmap_field_free() - Free a register field allocated using 1245 * devm_regmap_field_alloc. 1246 * 1247 * @dev: Device that will be interacted with 1248 * @field: regmap field which should be freed. 1249 * 1250 * Free register field allocated using devm_regmap_field_alloc(). Usually 1251 * drivers need not call this function, as the memory allocated via devm 1252 * will be freed as per device-driver life-cyle. 1253 */ 1254 void devm_regmap_field_free(struct device *dev, 1255 struct regmap_field *field) 1256 { 1257 devm_kfree(dev, field); 1258 } 1259 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1260 1261 /** 1262 * regmap_field_alloc() - Allocate and initialise a register field. 1263 * 1264 * @regmap: regmap bank in which this register field is located. 1265 * @reg_field: Register field with in the bank. 1266 * 1267 * The return value will be an ERR_PTR() on error or a valid pointer 1268 * to a struct regmap_field. The regmap_field should be freed by the 1269 * user once its finished working with it using regmap_field_free(). 1270 */ 1271 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1272 struct reg_field reg_field) 1273 { 1274 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1275 1276 if (!rm_field) 1277 return ERR_PTR(-ENOMEM); 1278 1279 regmap_field_init(rm_field, regmap, reg_field); 1280 1281 return rm_field; 1282 } 1283 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1284 1285 /** 1286 * regmap_field_free() - Free register field allocated using 1287 * regmap_field_alloc. 1288 * 1289 * @field: regmap field which should be freed. 1290 */ 1291 void regmap_field_free(struct regmap_field *field) 1292 { 1293 kfree(field); 1294 } 1295 EXPORT_SYMBOL_GPL(regmap_field_free); 1296 1297 /** 1298 * regmap_reinit_cache() - Reinitialise the current register cache 1299 * 1300 * @map: Register map to operate on. 1301 * @config: New configuration. Only the cache data will be used. 1302 * 1303 * Discard any existing register cache for the map and initialize a 1304 * new cache. This can be used to restore the cache to defaults or to 1305 * update the cache configuration to reflect runtime discovery of the 1306 * hardware. 1307 * 1308 * No explicit locking is done here, the user needs to ensure that 1309 * this function will not race with other calls to regmap. 1310 */ 1311 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1312 { 1313 regcache_exit(map); 1314 regmap_debugfs_exit(map); 1315 1316 map->max_register = config->max_register; 1317 map->writeable_reg = config->writeable_reg; 1318 map->readable_reg = config->readable_reg; 1319 map->volatile_reg = config->volatile_reg; 1320 map->precious_reg = config->precious_reg; 1321 map->writeable_noinc_reg = config->writeable_noinc_reg; 1322 map->readable_noinc_reg = config->readable_noinc_reg; 1323 map->cache_type = config->cache_type; 1324 1325 regmap_debugfs_init(map, config->name); 1326 1327 map->cache_bypass = false; 1328 map->cache_only = false; 1329 1330 return regcache_init(map, config); 1331 } 1332 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1333 1334 /** 1335 * regmap_exit() - Free a previously allocated register map 1336 * 1337 * @map: Register map to operate on. 1338 */ 1339 void regmap_exit(struct regmap *map) 1340 { 1341 struct regmap_async *async; 1342 1343 regcache_exit(map); 1344 regmap_debugfs_exit(map); 1345 regmap_range_exit(map); 1346 if (map->bus && map->bus->free_context) 1347 map->bus->free_context(map->bus_context); 1348 kfree(map->work_buf); 1349 while (!list_empty(&map->async_free)) { 1350 async = list_first_entry_or_null(&map->async_free, 1351 struct regmap_async, 1352 list); 1353 list_del(&async->list); 1354 kfree(async->work_buf); 1355 kfree(async); 1356 } 1357 if (map->hwlock) 1358 hwspin_lock_free(map->hwlock); 1359 kfree_const(map->name); 1360 kfree(map); 1361 } 1362 EXPORT_SYMBOL_GPL(regmap_exit); 1363 1364 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1365 { 1366 struct regmap **r = res; 1367 if (!r || !*r) { 1368 WARN_ON(!r || !*r); 1369 return 0; 1370 } 1371 1372 /* If the user didn't specify a name match any */ 1373 if (data) 1374 return (*r)->name == data; 1375 else 1376 return 1; 1377 } 1378 1379 /** 1380 * dev_get_regmap() - Obtain the regmap (if any) for a device 1381 * 1382 * @dev: Device to retrieve the map for 1383 * @name: Optional name for the register map, usually NULL. 1384 * 1385 * Returns the regmap for the device if one is present, or NULL. If 1386 * name is specified then it must match the name specified when 1387 * registering the device, if it is NULL then the first regmap found 1388 * will be used. Devices with multiple register maps are very rare, 1389 * generic code should normally not need to specify a name. 1390 */ 1391 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1392 { 1393 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1394 dev_get_regmap_match, (void *)name); 1395 1396 if (!r) 1397 return NULL; 1398 return *r; 1399 } 1400 EXPORT_SYMBOL_GPL(dev_get_regmap); 1401 1402 /** 1403 * regmap_get_device() - Obtain the device from a regmap 1404 * 1405 * @map: Register map to operate on. 1406 * 1407 * Returns the underlying device that the regmap has been created for. 1408 */ 1409 struct device *regmap_get_device(struct regmap *map) 1410 { 1411 return map->dev; 1412 } 1413 EXPORT_SYMBOL_GPL(regmap_get_device); 1414 1415 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1416 struct regmap_range_node *range, 1417 unsigned int val_num) 1418 { 1419 void *orig_work_buf; 1420 unsigned int win_offset; 1421 unsigned int win_page; 1422 bool page_chg; 1423 int ret; 1424 1425 win_offset = (*reg - range->range_min) % range->window_len; 1426 win_page = (*reg - range->range_min) / range->window_len; 1427 1428 if (val_num > 1) { 1429 /* Bulk write shouldn't cross range boundary */ 1430 if (*reg + val_num - 1 > range->range_max) 1431 return -EINVAL; 1432 1433 /* ... or single page boundary */ 1434 if (val_num > range->window_len - win_offset) 1435 return -EINVAL; 1436 } 1437 1438 /* It is possible to have selector register inside data window. 1439 In that case, selector register is located on every page and 1440 it needs no page switching, when accessed alone. */ 1441 if (val_num > 1 || 1442 range->window_start + win_offset != range->selector_reg) { 1443 /* Use separate work_buf during page switching */ 1444 orig_work_buf = map->work_buf; 1445 map->work_buf = map->selector_work_buf; 1446 1447 ret = _regmap_update_bits(map, range->selector_reg, 1448 range->selector_mask, 1449 win_page << range->selector_shift, 1450 &page_chg, false); 1451 1452 map->work_buf = orig_work_buf; 1453 1454 if (ret != 0) 1455 return ret; 1456 } 1457 1458 *reg = range->window_start + win_offset; 1459 1460 return 0; 1461 } 1462 1463 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1464 unsigned long mask) 1465 { 1466 u8 *buf; 1467 int i; 1468 1469 if (!mask || !map->work_buf) 1470 return; 1471 1472 buf = map->work_buf; 1473 1474 for (i = 0; i < max_bytes; i++) 1475 buf[i] |= (mask >> (8 * i)) & 0xff; 1476 } 1477 1478 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1479 const void *val, size_t val_len) 1480 { 1481 struct regmap_range_node *range; 1482 unsigned long flags; 1483 void *work_val = map->work_buf + map->format.reg_bytes + 1484 map->format.pad_bytes; 1485 void *buf; 1486 int ret = -ENOTSUPP; 1487 size_t len; 1488 int i; 1489 1490 WARN_ON(!map->bus); 1491 1492 /* Check for unwritable or noinc registers in range 1493 * before we start 1494 */ 1495 if (!regmap_writeable_noinc(map, reg)) { 1496 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1497 unsigned int element = 1498 reg + regmap_get_offset(map, i); 1499 if (!regmap_writeable(map, element) || 1500 regmap_writeable_noinc(map, element)) 1501 return -EINVAL; 1502 } 1503 } 1504 1505 if (!map->cache_bypass && map->format.parse_val) { 1506 unsigned int ival; 1507 int val_bytes = map->format.val_bytes; 1508 for (i = 0; i < val_len / val_bytes; i++) { 1509 ival = map->format.parse_val(val + (i * val_bytes)); 1510 ret = regcache_write(map, 1511 reg + regmap_get_offset(map, i), 1512 ival); 1513 if (ret) { 1514 dev_err(map->dev, 1515 "Error in caching of register: %x ret: %d\n", 1516 reg + i, ret); 1517 return ret; 1518 } 1519 } 1520 if (map->cache_only) { 1521 map->cache_dirty = true; 1522 return 0; 1523 } 1524 } 1525 1526 range = _regmap_range_lookup(map, reg); 1527 if (range) { 1528 int val_num = val_len / map->format.val_bytes; 1529 int win_offset = (reg - range->range_min) % range->window_len; 1530 int win_residue = range->window_len - win_offset; 1531 1532 /* If the write goes beyond the end of the window split it */ 1533 while (val_num > win_residue) { 1534 dev_dbg(map->dev, "Writing window %d/%zu\n", 1535 win_residue, val_len / map->format.val_bytes); 1536 ret = _regmap_raw_write_impl(map, reg, val, 1537 win_residue * 1538 map->format.val_bytes); 1539 if (ret != 0) 1540 return ret; 1541 1542 reg += win_residue; 1543 val_num -= win_residue; 1544 val += win_residue * map->format.val_bytes; 1545 val_len -= win_residue * map->format.val_bytes; 1546 1547 win_offset = (reg - range->range_min) % 1548 range->window_len; 1549 win_residue = range->window_len - win_offset; 1550 } 1551 1552 ret = _regmap_select_page(map, ®, range, val_num); 1553 if (ret != 0) 1554 return ret; 1555 } 1556 1557 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1558 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1559 map->write_flag_mask); 1560 1561 /* 1562 * Essentially all I/O mechanisms will be faster with a single 1563 * buffer to write. Since register syncs often generate raw 1564 * writes of single registers optimise that case. 1565 */ 1566 if (val != work_val && val_len == map->format.val_bytes) { 1567 memcpy(work_val, val, map->format.val_bytes); 1568 val = work_val; 1569 } 1570 1571 if (map->async && map->bus->async_write) { 1572 struct regmap_async *async; 1573 1574 trace_regmap_async_write_start(map, reg, val_len); 1575 1576 spin_lock_irqsave(&map->async_lock, flags); 1577 async = list_first_entry_or_null(&map->async_free, 1578 struct regmap_async, 1579 list); 1580 if (async) 1581 list_del(&async->list); 1582 spin_unlock_irqrestore(&map->async_lock, flags); 1583 1584 if (!async) { 1585 async = map->bus->async_alloc(); 1586 if (!async) 1587 return -ENOMEM; 1588 1589 async->work_buf = kzalloc(map->format.buf_size, 1590 GFP_KERNEL | GFP_DMA); 1591 if (!async->work_buf) { 1592 kfree(async); 1593 return -ENOMEM; 1594 } 1595 } 1596 1597 async->map = map; 1598 1599 /* If the caller supplied the value we can use it safely. */ 1600 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1601 map->format.reg_bytes + map->format.val_bytes); 1602 1603 spin_lock_irqsave(&map->async_lock, flags); 1604 list_add_tail(&async->list, &map->async_list); 1605 spin_unlock_irqrestore(&map->async_lock, flags); 1606 1607 if (val != work_val) 1608 ret = map->bus->async_write(map->bus_context, 1609 async->work_buf, 1610 map->format.reg_bytes + 1611 map->format.pad_bytes, 1612 val, val_len, async); 1613 else 1614 ret = map->bus->async_write(map->bus_context, 1615 async->work_buf, 1616 map->format.reg_bytes + 1617 map->format.pad_bytes + 1618 val_len, NULL, 0, async); 1619 1620 if (ret != 0) { 1621 dev_err(map->dev, "Failed to schedule write: %d\n", 1622 ret); 1623 1624 spin_lock_irqsave(&map->async_lock, flags); 1625 list_move(&async->list, &map->async_free); 1626 spin_unlock_irqrestore(&map->async_lock, flags); 1627 } 1628 1629 return ret; 1630 } 1631 1632 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1633 1634 /* If we're doing a single register write we can probably just 1635 * send the work_buf directly, otherwise try to do a gather 1636 * write. 1637 */ 1638 if (val == work_val) 1639 ret = map->bus->write(map->bus_context, map->work_buf, 1640 map->format.reg_bytes + 1641 map->format.pad_bytes + 1642 val_len); 1643 else if (map->bus->gather_write) 1644 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1645 map->format.reg_bytes + 1646 map->format.pad_bytes, 1647 val, val_len); 1648 else 1649 ret = -ENOTSUPP; 1650 1651 /* If that didn't work fall back on linearising by hand. */ 1652 if (ret == -ENOTSUPP) { 1653 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1654 buf = kzalloc(len, GFP_KERNEL); 1655 if (!buf) 1656 return -ENOMEM; 1657 1658 memcpy(buf, map->work_buf, map->format.reg_bytes); 1659 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1660 val, val_len); 1661 ret = map->bus->write(map->bus_context, buf, len); 1662 1663 kfree(buf); 1664 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1665 /* regcache_drop_region() takes lock that we already have, 1666 * thus call map->cache_ops->drop() directly 1667 */ 1668 if (map->cache_ops && map->cache_ops->drop) 1669 map->cache_ops->drop(map, reg, reg + 1); 1670 } 1671 1672 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1673 1674 return ret; 1675 } 1676 1677 /** 1678 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1679 * 1680 * @map: Map to check. 1681 */ 1682 bool regmap_can_raw_write(struct regmap *map) 1683 { 1684 return map->bus && map->bus->write && map->format.format_val && 1685 map->format.format_reg; 1686 } 1687 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1688 1689 /** 1690 * regmap_get_raw_read_max - Get the maximum size we can read 1691 * 1692 * @map: Map to check. 1693 */ 1694 size_t regmap_get_raw_read_max(struct regmap *map) 1695 { 1696 return map->max_raw_read; 1697 } 1698 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1699 1700 /** 1701 * regmap_get_raw_write_max - Get the maximum size we can read 1702 * 1703 * @map: Map to check. 1704 */ 1705 size_t regmap_get_raw_write_max(struct regmap *map) 1706 { 1707 return map->max_raw_write; 1708 } 1709 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1710 1711 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1712 unsigned int val) 1713 { 1714 int ret; 1715 struct regmap_range_node *range; 1716 struct regmap *map = context; 1717 1718 WARN_ON(!map->bus || !map->format.format_write); 1719 1720 range = _regmap_range_lookup(map, reg); 1721 if (range) { 1722 ret = _regmap_select_page(map, ®, range, 1); 1723 if (ret != 0) 1724 return ret; 1725 } 1726 1727 map->format.format_write(map, reg, val); 1728 1729 trace_regmap_hw_write_start(map, reg, 1); 1730 1731 ret = map->bus->write(map->bus_context, map->work_buf, 1732 map->format.buf_size); 1733 1734 trace_regmap_hw_write_done(map, reg, 1); 1735 1736 return ret; 1737 } 1738 1739 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1740 unsigned int val) 1741 { 1742 struct regmap *map = context; 1743 1744 return map->bus->reg_write(map->bus_context, reg, val); 1745 } 1746 1747 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1748 unsigned int val) 1749 { 1750 struct regmap *map = context; 1751 1752 WARN_ON(!map->bus || !map->format.format_val); 1753 1754 map->format.format_val(map->work_buf + map->format.reg_bytes 1755 + map->format.pad_bytes, val, 0); 1756 return _regmap_raw_write_impl(map, reg, 1757 map->work_buf + 1758 map->format.reg_bytes + 1759 map->format.pad_bytes, 1760 map->format.val_bytes); 1761 } 1762 1763 static inline void *_regmap_map_get_context(struct regmap *map) 1764 { 1765 return (map->bus) ? map : map->bus_context; 1766 } 1767 1768 int _regmap_write(struct regmap *map, unsigned int reg, 1769 unsigned int val) 1770 { 1771 int ret; 1772 void *context = _regmap_map_get_context(map); 1773 1774 if (!regmap_writeable(map, reg)) 1775 return -EIO; 1776 1777 if (!map->cache_bypass && !map->defer_caching) { 1778 ret = regcache_write(map, reg, val); 1779 if (ret != 0) 1780 return ret; 1781 if (map->cache_only) { 1782 map->cache_dirty = true; 1783 return 0; 1784 } 1785 } 1786 1787 if (regmap_should_log(map)) 1788 dev_info(map->dev, "%x <= %x\n", reg, val); 1789 1790 trace_regmap_reg_write(map, reg, val); 1791 1792 return map->reg_write(context, reg, val); 1793 } 1794 1795 /** 1796 * regmap_write() - Write a value to a single register 1797 * 1798 * @map: Register map to write to 1799 * @reg: Register to write to 1800 * @val: Value to be written 1801 * 1802 * A value of zero will be returned on success, a negative errno will 1803 * be returned in error cases. 1804 */ 1805 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1806 { 1807 int ret; 1808 1809 if (!IS_ALIGNED(reg, map->reg_stride)) 1810 return -EINVAL; 1811 1812 map->lock(map->lock_arg); 1813 1814 ret = _regmap_write(map, reg, val); 1815 1816 map->unlock(map->lock_arg); 1817 1818 return ret; 1819 } 1820 EXPORT_SYMBOL_GPL(regmap_write); 1821 1822 /** 1823 * regmap_write_async() - Write a value to a single register asynchronously 1824 * 1825 * @map: Register map to write to 1826 * @reg: Register to write to 1827 * @val: Value to be written 1828 * 1829 * A value of zero will be returned on success, a negative errno will 1830 * be returned in error cases. 1831 */ 1832 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1833 { 1834 int ret; 1835 1836 if (!IS_ALIGNED(reg, map->reg_stride)) 1837 return -EINVAL; 1838 1839 map->lock(map->lock_arg); 1840 1841 map->async = true; 1842 1843 ret = _regmap_write(map, reg, val); 1844 1845 map->async = false; 1846 1847 map->unlock(map->lock_arg); 1848 1849 return ret; 1850 } 1851 EXPORT_SYMBOL_GPL(regmap_write_async); 1852 1853 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1854 const void *val, size_t val_len) 1855 { 1856 size_t val_bytes = map->format.val_bytes; 1857 size_t val_count = val_len / val_bytes; 1858 size_t chunk_count, chunk_bytes; 1859 size_t chunk_regs = val_count; 1860 int ret, i; 1861 1862 if (!val_count) 1863 return -EINVAL; 1864 1865 if (map->use_single_write) 1866 chunk_regs = 1; 1867 else if (map->max_raw_write && val_len > map->max_raw_write) 1868 chunk_regs = map->max_raw_write / val_bytes; 1869 1870 chunk_count = val_count / chunk_regs; 1871 chunk_bytes = chunk_regs * val_bytes; 1872 1873 /* Write as many bytes as possible with chunk_size */ 1874 for (i = 0; i < chunk_count; i++) { 1875 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); 1876 if (ret) 1877 return ret; 1878 1879 reg += regmap_get_offset(map, chunk_regs); 1880 val += chunk_bytes; 1881 val_len -= chunk_bytes; 1882 } 1883 1884 /* Write remaining bytes */ 1885 if (val_len) 1886 ret = _regmap_raw_write_impl(map, reg, val, val_len); 1887 1888 return ret; 1889 } 1890 1891 /** 1892 * regmap_raw_write() - Write raw values to one or more registers 1893 * 1894 * @map: Register map to write to 1895 * @reg: Initial register to write to 1896 * @val: Block of data to be written, laid out for direct transmission to the 1897 * device 1898 * @val_len: Length of data pointed to by val. 1899 * 1900 * This function is intended to be used for things like firmware 1901 * download where a large block of data needs to be transferred to the 1902 * device. No formatting will be done on the data provided. 1903 * 1904 * A value of zero will be returned on success, a negative errno will 1905 * be returned in error cases. 1906 */ 1907 int regmap_raw_write(struct regmap *map, unsigned int reg, 1908 const void *val, size_t val_len) 1909 { 1910 int ret; 1911 1912 if (!regmap_can_raw_write(map)) 1913 return -EINVAL; 1914 if (val_len % map->format.val_bytes) 1915 return -EINVAL; 1916 1917 map->lock(map->lock_arg); 1918 1919 ret = _regmap_raw_write(map, reg, val, val_len); 1920 1921 map->unlock(map->lock_arg); 1922 1923 return ret; 1924 } 1925 EXPORT_SYMBOL_GPL(regmap_raw_write); 1926 1927 /** 1928 * regmap_noinc_write(): Write data from a register without incrementing the 1929 * register number 1930 * 1931 * @map: Register map to write to 1932 * @reg: Register to write to 1933 * @val: Pointer to data buffer 1934 * @val_len: Length of output buffer in bytes. 1935 * 1936 * The regmap API usually assumes that bulk bus write operations will write a 1937 * range of registers. Some devices have certain registers for which a write 1938 * operation can write to an internal FIFO. 1939 * 1940 * The target register must be volatile but registers after it can be 1941 * completely unrelated cacheable registers. 1942 * 1943 * This will attempt multiple writes as required to write val_len bytes. 1944 * 1945 * A value of zero will be returned on success, a negative errno will be 1946 * returned in error cases. 1947 */ 1948 int regmap_noinc_write(struct regmap *map, unsigned int reg, 1949 const void *val, size_t val_len) 1950 { 1951 size_t write_len; 1952 int ret; 1953 1954 if (!map->bus) 1955 return -EINVAL; 1956 if (!map->bus->write) 1957 return -ENOTSUPP; 1958 if (val_len % map->format.val_bytes) 1959 return -EINVAL; 1960 if (!IS_ALIGNED(reg, map->reg_stride)) 1961 return -EINVAL; 1962 if (val_len == 0) 1963 return -EINVAL; 1964 1965 map->lock(map->lock_arg); 1966 1967 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 1968 ret = -EINVAL; 1969 goto out_unlock; 1970 } 1971 1972 while (val_len) { 1973 if (map->max_raw_write && map->max_raw_write < val_len) 1974 write_len = map->max_raw_write; 1975 else 1976 write_len = val_len; 1977 ret = _regmap_raw_write(map, reg, val, write_len); 1978 if (ret) 1979 goto out_unlock; 1980 val = ((u8 *)val) + write_len; 1981 val_len -= write_len; 1982 } 1983 1984 out_unlock: 1985 map->unlock(map->lock_arg); 1986 return ret; 1987 } 1988 EXPORT_SYMBOL_GPL(regmap_noinc_write); 1989 1990 /** 1991 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 1992 * register field. 1993 * 1994 * @field: Register field to write to 1995 * @mask: Bitmask to change 1996 * @val: Value to be written 1997 * @change: Boolean indicating if a write was done 1998 * @async: Boolean indicating asynchronously 1999 * @force: Boolean indicating use force update 2000 * 2001 * Perform a read/modify/write cycle on the register field with change, 2002 * async, force option. 2003 * 2004 * A value of zero will be returned on success, a negative errno will 2005 * be returned in error cases. 2006 */ 2007 int regmap_field_update_bits_base(struct regmap_field *field, 2008 unsigned int mask, unsigned int val, 2009 bool *change, bool async, bool force) 2010 { 2011 mask = (mask << field->shift) & field->mask; 2012 2013 return regmap_update_bits_base(field->regmap, field->reg, 2014 mask, val << field->shift, 2015 change, async, force); 2016 } 2017 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2018 2019 /** 2020 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2021 * register field with port ID 2022 * 2023 * @field: Register field to write to 2024 * @id: port ID 2025 * @mask: Bitmask to change 2026 * @val: Value to be written 2027 * @change: Boolean indicating if a write was done 2028 * @async: Boolean indicating asynchronously 2029 * @force: Boolean indicating use force update 2030 * 2031 * A value of zero will be returned on success, a negative errno will 2032 * be returned in error cases. 2033 */ 2034 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2035 unsigned int mask, unsigned int val, 2036 bool *change, bool async, bool force) 2037 { 2038 if (id >= field->id_size) 2039 return -EINVAL; 2040 2041 mask = (mask << field->shift) & field->mask; 2042 2043 return regmap_update_bits_base(field->regmap, 2044 field->reg + (field->id_offset * id), 2045 mask, val << field->shift, 2046 change, async, force); 2047 } 2048 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2049 2050 /** 2051 * regmap_bulk_write() - Write multiple registers to the device 2052 * 2053 * @map: Register map to write to 2054 * @reg: First register to be write from 2055 * @val: Block of data to be written, in native register size for device 2056 * @val_count: Number of registers to write 2057 * 2058 * This function is intended to be used for writing a large block of 2059 * data to the device either in single transfer or multiple transfer. 2060 * 2061 * A value of zero will be returned on success, a negative errno will 2062 * be returned in error cases. 2063 */ 2064 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2065 size_t val_count) 2066 { 2067 int ret = 0, i; 2068 size_t val_bytes = map->format.val_bytes; 2069 2070 if (!IS_ALIGNED(reg, map->reg_stride)) 2071 return -EINVAL; 2072 2073 /* 2074 * Some devices don't support bulk write, for them we have a series of 2075 * single write operations. 2076 */ 2077 if (!map->bus || !map->format.parse_inplace) { 2078 map->lock(map->lock_arg); 2079 for (i = 0; i < val_count; i++) { 2080 unsigned int ival; 2081 2082 switch (val_bytes) { 2083 case 1: 2084 ival = *(u8 *)(val + (i * val_bytes)); 2085 break; 2086 case 2: 2087 ival = *(u16 *)(val + (i * val_bytes)); 2088 break; 2089 case 4: 2090 ival = *(u32 *)(val + (i * val_bytes)); 2091 break; 2092 #ifdef CONFIG_64BIT 2093 case 8: 2094 ival = *(u64 *)(val + (i * val_bytes)); 2095 break; 2096 #endif 2097 default: 2098 ret = -EINVAL; 2099 goto out; 2100 } 2101 2102 ret = _regmap_write(map, 2103 reg + regmap_get_offset(map, i), 2104 ival); 2105 if (ret != 0) 2106 goto out; 2107 } 2108 out: 2109 map->unlock(map->lock_arg); 2110 } else { 2111 void *wval; 2112 2113 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2114 if (!wval) 2115 return -ENOMEM; 2116 2117 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2118 map->format.parse_inplace(wval + i); 2119 2120 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2121 2122 kfree(wval); 2123 } 2124 return ret; 2125 } 2126 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2127 2128 /* 2129 * _regmap_raw_multi_reg_write() 2130 * 2131 * the (register,newvalue) pairs in regs have not been formatted, but 2132 * they are all in the same page and have been changed to being page 2133 * relative. The page register has been written if that was necessary. 2134 */ 2135 static int _regmap_raw_multi_reg_write(struct regmap *map, 2136 const struct reg_sequence *regs, 2137 size_t num_regs) 2138 { 2139 int ret; 2140 void *buf; 2141 int i; 2142 u8 *u8; 2143 size_t val_bytes = map->format.val_bytes; 2144 size_t reg_bytes = map->format.reg_bytes; 2145 size_t pad_bytes = map->format.pad_bytes; 2146 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2147 size_t len = pair_size * num_regs; 2148 2149 if (!len) 2150 return -EINVAL; 2151 2152 buf = kzalloc(len, GFP_KERNEL); 2153 if (!buf) 2154 return -ENOMEM; 2155 2156 /* We have to linearise by hand. */ 2157 2158 u8 = buf; 2159 2160 for (i = 0; i < num_regs; i++) { 2161 unsigned int reg = regs[i].reg; 2162 unsigned int val = regs[i].def; 2163 trace_regmap_hw_write_start(map, reg, 1); 2164 map->format.format_reg(u8, reg, map->reg_shift); 2165 u8 += reg_bytes + pad_bytes; 2166 map->format.format_val(u8, val, 0); 2167 u8 += val_bytes; 2168 } 2169 u8 = buf; 2170 *u8 |= map->write_flag_mask; 2171 2172 ret = map->bus->write(map->bus_context, buf, len); 2173 2174 kfree(buf); 2175 2176 for (i = 0; i < num_regs; i++) { 2177 int reg = regs[i].reg; 2178 trace_regmap_hw_write_done(map, reg, 1); 2179 } 2180 return ret; 2181 } 2182 2183 static unsigned int _regmap_register_page(struct regmap *map, 2184 unsigned int reg, 2185 struct regmap_range_node *range) 2186 { 2187 unsigned int win_page = (reg - range->range_min) / range->window_len; 2188 2189 return win_page; 2190 } 2191 2192 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2193 struct reg_sequence *regs, 2194 size_t num_regs) 2195 { 2196 int ret; 2197 int i, n; 2198 struct reg_sequence *base; 2199 unsigned int this_page = 0; 2200 unsigned int page_change = 0; 2201 /* 2202 * the set of registers are not neccessarily in order, but 2203 * since the order of write must be preserved this algorithm 2204 * chops the set each time the page changes. This also applies 2205 * if there is a delay required at any point in the sequence. 2206 */ 2207 base = regs; 2208 for (i = 0, n = 0; i < num_regs; i++, n++) { 2209 unsigned int reg = regs[i].reg; 2210 struct regmap_range_node *range; 2211 2212 range = _regmap_range_lookup(map, reg); 2213 if (range) { 2214 unsigned int win_page = _regmap_register_page(map, reg, 2215 range); 2216 2217 if (i == 0) 2218 this_page = win_page; 2219 if (win_page != this_page) { 2220 this_page = win_page; 2221 page_change = 1; 2222 } 2223 } 2224 2225 /* If we have both a page change and a delay make sure to 2226 * write the regs and apply the delay before we change the 2227 * page. 2228 */ 2229 2230 if (page_change || regs[i].delay_us) { 2231 2232 /* For situations where the first write requires 2233 * a delay we need to make sure we don't call 2234 * raw_multi_reg_write with n=0 2235 * This can't occur with page breaks as we 2236 * never write on the first iteration 2237 */ 2238 if (regs[i].delay_us && i == 0) 2239 n = 1; 2240 2241 ret = _regmap_raw_multi_reg_write(map, base, n); 2242 if (ret != 0) 2243 return ret; 2244 2245 if (regs[i].delay_us) 2246 udelay(regs[i].delay_us); 2247 2248 base += n; 2249 n = 0; 2250 2251 if (page_change) { 2252 ret = _regmap_select_page(map, 2253 &base[n].reg, 2254 range, 1); 2255 if (ret != 0) 2256 return ret; 2257 2258 page_change = 0; 2259 } 2260 2261 } 2262 2263 } 2264 if (n > 0) 2265 return _regmap_raw_multi_reg_write(map, base, n); 2266 return 0; 2267 } 2268 2269 static int _regmap_multi_reg_write(struct regmap *map, 2270 const struct reg_sequence *regs, 2271 size_t num_regs) 2272 { 2273 int i; 2274 int ret; 2275 2276 if (!map->can_multi_write) { 2277 for (i = 0; i < num_regs; i++) { 2278 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2279 if (ret != 0) 2280 return ret; 2281 2282 if (regs[i].delay_us) 2283 udelay(regs[i].delay_us); 2284 } 2285 return 0; 2286 } 2287 2288 if (!map->format.parse_inplace) 2289 return -EINVAL; 2290 2291 if (map->writeable_reg) 2292 for (i = 0; i < num_regs; i++) { 2293 int reg = regs[i].reg; 2294 if (!map->writeable_reg(map->dev, reg)) 2295 return -EINVAL; 2296 if (!IS_ALIGNED(reg, map->reg_stride)) 2297 return -EINVAL; 2298 } 2299 2300 if (!map->cache_bypass) { 2301 for (i = 0; i < num_regs; i++) { 2302 unsigned int val = regs[i].def; 2303 unsigned int reg = regs[i].reg; 2304 ret = regcache_write(map, reg, val); 2305 if (ret) { 2306 dev_err(map->dev, 2307 "Error in caching of register: %x ret: %d\n", 2308 reg, ret); 2309 return ret; 2310 } 2311 } 2312 if (map->cache_only) { 2313 map->cache_dirty = true; 2314 return 0; 2315 } 2316 } 2317 2318 WARN_ON(!map->bus); 2319 2320 for (i = 0; i < num_regs; i++) { 2321 unsigned int reg = regs[i].reg; 2322 struct regmap_range_node *range; 2323 2324 /* Coalesce all the writes between a page break or a delay 2325 * in a sequence 2326 */ 2327 range = _regmap_range_lookup(map, reg); 2328 if (range || regs[i].delay_us) { 2329 size_t len = sizeof(struct reg_sequence)*num_regs; 2330 struct reg_sequence *base = kmemdup(regs, len, 2331 GFP_KERNEL); 2332 if (!base) 2333 return -ENOMEM; 2334 ret = _regmap_range_multi_paged_reg_write(map, base, 2335 num_regs); 2336 kfree(base); 2337 2338 return ret; 2339 } 2340 } 2341 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2342 } 2343 2344 /** 2345 * regmap_multi_reg_write() - Write multiple registers to the device 2346 * 2347 * @map: Register map to write to 2348 * @regs: Array of structures containing register,value to be written 2349 * @num_regs: Number of registers to write 2350 * 2351 * Write multiple registers to the device where the set of register, value 2352 * pairs are supplied in any order, possibly not all in a single range. 2353 * 2354 * The 'normal' block write mode will send ultimately send data on the 2355 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2356 * addressed. However, this alternative block multi write mode will send 2357 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2358 * must of course support the mode. 2359 * 2360 * A value of zero will be returned on success, a negative errno will be 2361 * returned in error cases. 2362 */ 2363 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2364 int num_regs) 2365 { 2366 int ret; 2367 2368 map->lock(map->lock_arg); 2369 2370 ret = _regmap_multi_reg_write(map, regs, num_regs); 2371 2372 map->unlock(map->lock_arg); 2373 2374 return ret; 2375 } 2376 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2377 2378 /** 2379 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2380 * device but not the cache 2381 * 2382 * @map: Register map to write to 2383 * @regs: Array of structures containing register,value to be written 2384 * @num_regs: Number of registers to write 2385 * 2386 * Write multiple registers to the device but not the cache where the set 2387 * of register are supplied in any order. 2388 * 2389 * This function is intended to be used for writing a large block of data 2390 * atomically to the device in single transfer for those I2C client devices 2391 * that implement this alternative block write mode. 2392 * 2393 * A value of zero will be returned on success, a negative errno will 2394 * be returned in error cases. 2395 */ 2396 int regmap_multi_reg_write_bypassed(struct regmap *map, 2397 const struct reg_sequence *regs, 2398 int num_regs) 2399 { 2400 int ret; 2401 bool bypass; 2402 2403 map->lock(map->lock_arg); 2404 2405 bypass = map->cache_bypass; 2406 map->cache_bypass = true; 2407 2408 ret = _regmap_multi_reg_write(map, regs, num_regs); 2409 2410 map->cache_bypass = bypass; 2411 2412 map->unlock(map->lock_arg); 2413 2414 return ret; 2415 } 2416 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2417 2418 /** 2419 * regmap_raw_write_async() - Write raw values to one or more registers 2420 * asynchronously 2421 * 2422 * @map: Register map to write to 2423 * @reg: Initial register to write to 2424 * @val: Block of data to be written, laid out for direct transmission to the 2425 * device. Must be valid until regmap_async_complete() is called. 2426 * @val_len: Length of data pointed to by val. 2427 * 2428 * This function is intended to be used for things like firmware 2429 * download where a large block of data needs to be transferred to the 2430 * device. No formatting will be done on the data provided. 2431 * 2432 * If supported by the underlying bus the write will be scheduled 2433 * asynchronously, helping maximise I/O speed on higher speed buses 2434 * like SPI. regmap_async_complete() can be called to ensure that all 2435 * asynchrnous writes have been completed. 2436 * 2437 * A value of zero will be returned on success, a negative errno will 2438 * be returned in error cases. 2439 */ 2440 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2441 const void *val, size_t val_len) 2442 { 2443 int ret; 2444 2445 if (val_len % map->format.val_bytes) 2446 return -EINVAL; 2447 if (!IS_ALIGNED(reg, map->reg_stride)) 2448 return -EINVAL; 2449 2450 map->lock(map->lock_arg); 2451 2452 map->async = true; 2453 2454 ret = _regmap_raw_write(map, reg, val, val_len); 2455 2456 map->async = false; 2457 2458 map->unlock(map->lock_arg); 2459 2460 return ret; 2461 } 2462 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2463 2464 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2465 unsigned int val_len) 2466 { 2467 struct regmap_range_node *range; 2468 int ret; 2469 2470 WARN_ON(!map->bus); 2471 2472 if (!map->bus || !map->bus->read) 2473 return -EINVAL; 2474 2475 range = _regmap_range_lookup(map, reg); 2476 if (range) { 2477 ret = _regmap_select_page(map, ®, range, 2478 val_len / map->format.val_bytes); 2479 if (ret != 0) 2480 return ret; 2481 } 2482 2483 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2484 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2485 map->read_flag_mask); 2486 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2487 2488 ret = map->bus->read(map->bus_context, map->work_buf, 2489 map->format.reg_bytes + map->format.pad_bytes, 2490 val, val_len); 2491 2492 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2493 2494 return ret; 2495 } 2496 2497 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2498 unsigned int *val) 2499 { 2500 struct regmap *map = context; 2501 2502 return map->bus->reg_read(map->bus_context, reg, val); 2503 } 2504 2505 static int _regmap_bus_read(void *context, unsigned int reg, 2506 unsigned int *val) 2507 { 2508 int ret; 2509 struct regmap *map = context; 2510 void *work_val = map->work_buf + map->format.reg_bytes + 2511 map->format.pad_bytes; 2512 2513 if (!map->format.parse_val) 2514 return -EINVAL; 2515 2516 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); 2517 if (ret == 0) 2518 *val = map->format.parse_val(work_val); 2519 2520 return ret; 2521 } 2522 2523 static int _regmap_read(struct regmap *map, unsigned int reg, 2524 unsigned int *val) 2525 { 2526 int ret; 2527 void *context = _regmap_map_get_context(map); 2528 2529 if (!map->cache_bypass) { 2530 ret = regcache_read(map, reg, val); 2531 if (ret == 0) 2532 return 0; 2533 } 2534 2535 if (map->cache_only) 2536 return -EBUSY; 2537 2538 if (!regmap_readable(map, reg)) 2539 return -EIO; 2540 2541 ret = map->reg_read(context, reg, val); 2542 if (ret == 0) { 2543 if (regmap_should_log(map)) 2544 dev_info(map->dev, "%x => %x\n", reg, *val); 2545 2546 trace_regmap_reg_read(map, reg, *val); 2547 2548 if (!map->cache_bypass) 2549 regcache_write(map, reg, *val); 2550 } 2551 2552 return ret; 2553 } 2554 2555 /** 2556 * regmap_read() - Read a value from a single register 2557 * 2558 * @map: Register map to read from 2559 * @reg: Register to be read from 2560 * @val: Pointer to store read value 2561 * 2562 * A value of zero will be returned on success, a negative errno will 2563 * be returned in error cases. 2564 */ 2565 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2566 { 2567 int ret; 2568 2569 if (!IS_ALIGNED(reg, map->reg_stride)) 2570 return -EINVAL; 2571 2572 map->lock(map->lock_arg); 2573 2574 ret = _regmap_read(map, reg, val); 2575 2576 map->unlock(map->lock_arg); 2577 2578 return ret; 2579 } 2580 EXPORT_SYMBOL_GPL(regmap_read); 2581 2582 /** 2583 * regmap_raw_read() - Read raw data from the device 2584 * 2585 * @map: Register map to read from 2586 * @reg: First register to be read from 2587 * @val: Pointer to store read value 2588 * @val_len: Size of data to read 2589 * 2590 * A value of zero will be returned on success, a negative errno will 2591 * be returned in error cases. 2592 */ 2593 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2594 size_t val_len) 2595 { 2596 size_t val_bytes = map->format.val_bytes; 2597 size_t val_count = val_len / val_bytes; 2598 unsigned int v; 2599 int ret, i; 2600 2601 if (!map->bus) 2602 return -EINVAL; 2603 if (val_len % map->format.val_bytes) 2604 return -EINVAL; 2605 if (!IS_ALIGNED(reg, map->reg_stride)) 2606 return -EINVAL; 2607 if (val_count == 0) 2608 return -EINVAL; 2609 2610 map->lock(map->lock_arg); 2611 2612 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2613 map->cache_type == REGCACHE_NONE) { 2614 size_t chunk_count, chunk_bytes; 2615 size_t chunk_regs = val_count; 2616 2617 if (!map->bus->read) { 2618 ret = -ENOTSUPP; 2619 goto out; 2620 } 2621 2622 if (map->use_single_read) 2623 chunk_regs = 1; 2624 else if (map->max_raw_read && val_len > map->max_raw_read) 2625 chunk_regs = map->max_raw_read / val_bytes; 2626 2627 chunk_count = val_count / chunk_regs; 2628 chunk_bytes = chunk_regs * val_bytes; 2629 2630 /* Read bytes that fit into whole chunks */ 2631 for (i = 0; i < chunk_count; i++) { 2632 ret = _regmap_raw_read(map, reg, val, chunk_bytes); 2633 if (ret != 0) 2634 goto out; 2635 2636 reg += regmap_get_offset(map, chunk_regs); 2637 val += chunk_bytes; 2638 val_len -= chunk_bytes; 2639 } 2640 2641 /* Read remaining bytes */ 2642 if (val_len) { 2643 ret = _regmap_raw_read(map, reg, val, val_len); 2644 if (ret != 0) 2645 goto out; 2646 } 2647 } else { 2648 /* Otherwise go word by word for the cache; should be low 2649 * cost as we expect to hit the cache. 2650 */ 2651 for (i = 0; i < val_count; i++) { 2652 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2653 &v); 2654 if (ret != 0) 2655 goto out; 2656 2657 map->format.format_val(val + (i * val_bytes), v, 0); 2658 } 2659 } 2660 2661 out: 2662 map->unlock(map->lock_arg); 2663 2664 return ret; 2665 } 2666 EXPORT_SYMBOL_GPL(regmap_raw_read); 2667 2668 /** 2669 * regmap_noinc_read(): Read data from a register without incrementing the 2670 * register number 2671 * 2672 * @map: Register map to read from 2673 * @reg: Register to read from 2674 * @val: Pointer to data buffer 2675 * @val_len: Length of output buffer in bytes. 2676 * 2677 * The regmap API usually assumes that bulk bus read operations will read a 2678 * range of registers. Some devices have certain registers for which a read 2679 * operation read will read from an internal FIFO. 2680 * 2681 * The target register must be volatile but registers after it can be 2682 * completely unrelated cacheable registers. 2683 * 2684 * This will attempt multiple reads as required to read val_len bytes. 2685 * 2686 * A value of zero will be returned on success, a negative errno will be 2687 * returned in error cases. 2688 */ 2689 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2690 void *val, size_t val_len) 2691 { 2692 size_t read_len; 2693 int ret; 2694 2695 if (!map->bus) 2696 return -EINVAL; 2697 if (!map->bus->read) 2698 return -ENOTSUPP; 2699 if (val_len % map->format.val_bytes) 2700 return -EINVAL; 2701 if (!IS_ALIGNED(reg, map->reg_stride)) 2702 return -EINVAL; 2703 if (val_len == 0) 2704 return -EINVAL; 2705 2706 map->lock(map->lock_arg); 2707 2708 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2709 ret = -EINVAL; 2710 goto out_unlock; 2711 } 2712 2713 while (val_len) { 2714 if (map->max_raw_read && map->max_raw_read < val_len) 2715 read_len = map->max_raw_read; 2716 else 2717 read_len = val_len; 2718 ret = _regmap_raw_read(map, reg, val, read_len); 2719 if (ret) 2720 goto out_unlock; 2721 val = ((u8 *)val) + read_len; 2722 val_len -= read_len; 2723 } 2724 2725 out_unlock: 2726 map->unlock(map->lock_arg); 2727 return ret; 2728 } 2729 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2730 2731 /** 2732 * regmap_field_read(): Read a value to a single register field 2733 * 2734 * @field: Register field to read from 2735 * @val: Pointer to store read value 2736 * 2737 * A value of zero will be returned on success, a negative errno will 2738 * be returned in error cases. 2739 */ 2740 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2741 { 2742 int ret; 2743 unsigned int reg_val; 2744 ret = regmap_read(field->regmap, field->reg, ®_val); 2745 if (ret != 0) 2746 return ret; 2747 2748 reg_val &= field->mask; 2749 reg_val >>= field->shift; 2750 *val = reg_val; 2751 2752 return ret; 2753 } 2754 EXPORT_SYMBOL_GPL(regmap_field_read); 2755 2756 /** 2757 * regmap_fields_read() - Read a value to a single register field with port ID 2758 * 2759 * @field: Register field to read from 2760 * @id: port ID 2761 * @val: Pointer to store read value 2762 * 2763 * A value of zero will be returned on success, a negative errno will 2764 * be returned in error cases. 2765 */ 2766 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2767 unsigned int *val) 2768 { 2769 int ret; 2770 unsigned int reg_val; 2771 2772 if (id >= field->id_size) 2773 return -EINVAL; 2774 2775 ret = regmap_read(field->regmap, 2776 field->reg + (field->id_offset * id), 2777 ®_val); 2778 if (ret != 0) 2779 return ret; 2780 2781 reg_val &= field->mask; 2782 reg_val >>= field->shift; 2783 *val = reg_val; 2784 2785 return ret; 2786 } 2787 EXPORT_SYMBOL_GPL(regmap_fields_read); 2788 2789 /** 2790 * regmap_bulk_read() - Read multiple registers from the device 2791 * 2792 * @map: Register map to read from 2793 * @reg: First register to be read from 2794 * @val: Pointer to store read value, in native register size for device 2795 * @val_count: Number of registers to read 2796 * 2797 * A value of zero will be returned on success, a negative errno will 2798 * be returned in error cases. 2799 */ 2800 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2801 size_t val_count) 2802 { 2803 int ret, i; 2804 size_t val_bytes = map->format.val_bytes; 2805 bool vol = regmap_volatile_range(map, reg, val_count); 2806 2807 if (!IS_ALIGNED(reg, map->reg_stride)) 2808 return -EINVAL; 2809 if (val_count == 0) 2810 return -EINVAL; 2811 2812 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2813 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2814 if (ret != 0) 2815 return ret; 2816 2817 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2818 map->format.parse_inplace(val + i); 2819 } else { 2820 #ifdef CONFIG_64BIT 2821 u64 *u64 = val; 2822 #endif 2823 u32 *u32 = val; 2824 u16 *u16 = val; 2825 u8 *u8 = val; 2826 2827 map->lock(map->lock_arg); 2828 2829 for (i = 0; i < val_count; i++) { 2830 unsigned int ival; 2831 2832 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2833 &ival); 2834 if (ret != 0) 2835 goto out; 2836 2837 switch (map->format.val_bytes) { 2838 #ifdef CONFIG_64BIT 2839 case 8: 2840 u64[i] = ival; 2841 break; 2842 #endif 2843 case 4: 2844 u32[i] = ival; 2845 break; 2846 case 2: 2847 u16[i] = ival; 2848 break; 2849 case 1: 2850 u8[i] = ival; 2851 break; 2852 default: 2853 ret = -EINVAL; 2854 goto out; 2855 } 2856 } 2857 2858 out: 2859 map->unlock(map->lock_arg); 2860 } 2861 2862 return ret; 2863 } 2864 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2865 2866 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2867 unsigned int mask, unsigned int val, 2868 bool *change, bool force_write) 2869 { 2870 int ret; 2871 unsigned int tmp, orig; 2872 2873 if (change) 2874 *change = false; 2875 2876 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2877 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2878 if (ret == 0 && change) 2879 *change = true; 2880 } else { 2881 ret = _regmap_read(map, reg, &orig); 2882 if (ret != 0) 2883 return ret; 2884 2885 tmp = orig & ~mask; 2886 tmp |= val & mask; 2887 2888 if (force_write || (tmp != orig)) { 2889 ret = _regmap_write(map, reg, tmp); 2890 if (ret == 0 && change) 2891 *change = true; 2892 } 2893 } 2894 2895 return ret; 2896 } 2897 2898 /** 2899 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2900 * 2901 * @map: Register map to update 2902 * @reg: Register to update 2903 * @mask: Bitmask to change 2904 * @val: New value for bitmask 2905 * @change: Boolean indicating if a write was done 2906 * @async: Boolean indicating asynchronously 2907 * @force: Boolean indicating use force update 2908 * 2909 * Perform a read/modify/write cycle on a register map with change, async, force 2910 * options. 2911 * 2912 * If async is true: 2913 * 2914 * With most buses the read must be done synchronously so this is most useful 2915 * for devices with a cache which do not need to interact with the hardware to 2916 * determine the current register value. 2917 * 2918 * Returns zero for success, a negative number on error. 2919 */ 2920 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2921 unsigned int mask, unsigned int val, 2922 bool *change, bool async, bool force) 2923 { 2924 int ret; 2925 2926 map->lock(map->lock_arg); 2927 2928 map->async = async; 2929 2930 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2931 2932 map->async = false; 2933 2934 map->unlock(map->lock_arg); 2935 2936 return ret; 2937 } 2938 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2939 2940 /** 2941 * regmap_test_bits() - Check if all specified bits are set in a register. 2942 * 2943 * @map: Register map to operate on 2944 * @reg: Register to read from 2945 * @bits: Bits to test 2946 * 2947 * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the 2948 * tested bits is not set and 1 if all tested bits are set. 2949 */ 2950 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 2951 { 2952 unsigned int val, ret; 2953 2954 ret = regmap_read(map, reg, &val); 2955 if (ret) 2956 return ret; 2957 2958 return (val & bits) == bits; 2959 } 2960 EXPORT_SYMBOL_GPL(regmap_test_bits); 2961 2962 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2963 { 2964 struct regmap *map = async->map; 2965 bool wake; 2966 2967 trace_regmap_async_io_complete(map); 2968 2969 spin_lock(&map->async_lock); 2970 list_move(&async->list, &map->async_free); 2971 wake = list_empty(&map->async_list); 2972 2973 if (ret != 0) 2974 map->async_ret = ret; 2975 2976 spin_unlock(&map->async_lock); 2977 2978 if (wake) 2979 wake_up(&map->async_waitq); 2980 } 2981 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2982 2983 static int regmap_async_is_done(struct regmap *map) 2984 { 2985 unsigned long flags; 2986 int ret; 2987 2988 spin_lock_irqsave(&map->async_lock, flags); 2989 ret = list_empty(&map->async_list); 2990 spin_unlock_irqrestore(&map->async_lock, flags); 2991 2992 return ret; 2993 } 2994 2995 /** 2996 * regmap_async_complete - Ensure all asynchronous I/O has completed. 2997 * 2998 * @map: Map to operate on. 2999 * 3000 * Blocks until any pending asynchronous I/O has completed. Returns 3001 * an error code for any failed I/O operations. 3002 */ 3003 int regmap_async_complete(struct regmap *map) 3004 { 3005 unsigned long flags; 3006 int ret; 3007 3008 /* Nothing to do with no async support */ 3009 if (!map->bus || !map->bus->async_write) 3010 return 0; 3011 3012 trace_regmap_async_complete_start(map); 3013 3014 wait_event(map->async_waitq, regmap_async_is_done(map)); 3015 3016 spin_lock_irqsave(&map->async_lock, flags); 3017 ret = map->async_ret; 3018 map->async_ret = 0; 3019 spin_unlock_irqrestore(&map->async_lock, flags); 3020 3021 trace_regmap_async_complete_done(map); 3022 3023 return ret; 3024 } 3025 EXPORT_SYMBOL_GPL(regmap_async_complete); 3026 3027 /** 3028 * regmap_register_patch - Register and apply register updates to be applied 3029 * on device initialistion 3030 * 3031 * @map: Register map to apply updates to. 3032 * @regs: Values to update. 3033 * @num_regs: Number of entries in regs. 3034 * 3035 * Register a set of register updates to be applied to the device 3036 * whenever the device registers are synchronised with the cache and 3037 * apply them immediately. Typically this is used to apply 3038 * corrections to be applied to the device defaults on startup, such 3039 * as the updates some vendors provide to undocumented registers. 3040 * 3041 * The caller must ensure that this function cannot be called 3042 * concurrently with either itself or regcache_sync(). 3043 */ 3044 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3045 int num_regs) 3046 { 3047 struct reg_sequence *p; 3048 int ret; 3049 bool bypass; 3050 3051 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3052 num_regs)) 3053 return 0; 3054 3055 p = krealloc(map->patch, 3056 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3057 GFP_KERNEL); 3058 if (p) { 3059 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3060 map->patch = p; 3061 map->patch_regs += num_regs; 3062 } else { 3063 return -ENOMEM; 3064 } 3065 3066 map->lock(map->lock_arg); 3067 3068 bypass = map->cache_bypass; 3069 3070 map->cache_bypass = true; 3071 map->async = true; 3072 3073 ret = _regmap_multi_reg_write(map, regs, num_regs); 3074 3075 map->async = false; 3076 map->cache_bypass = bypass; 3077 3078 map->unlock(map->lock_arg); 3079 3080 regmap_async_complete(map); 3081 3082 return ret; 3083 } 3084 EXPORT_SYMBOL_GPL(regmap_register_patch); 3085 3086 /** 3087 * regmap_get_val_bytes() - Report the size of a register value 3088 * 3089 * @map: Register map to operate on. 3090 * 3091 * Report the size of a register value, mainly intended to for use by 3092 * generic infrastructure built on top of regmap. 3093 */ 3094 int regmap_get_val_bytes(struct regmap *map) 3095 { 3096 if (map->format.format_write) 3097 return -EINVAL; 3098 3099 return map->format.val_bytes; 3100 } 3101 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3102 3103 /** 3104 * regmap_get_max_register() - Report the max register value 3105 * 3106 * @map: Register map to operate on. 3107 * 3108 * Report the max register value, mainly intended to for use by 3109 * generic infrastructure built on top of regmap. 3110 */ 3111 int regmap_get_max_register(struct regmap *map) 3112 { 3113 return map->max_register ? map->max_register : -EINVAL; 3114 } 3115 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3116 3117 /** 3118 * regmap_get_reg_stride() - Report the register address stride 3119 * 3120 * @map: Register map to operate on. 3121 * 3122 * Report the register address stride, mainly intended to for use by 3123 * generic infrastructure built on top of regmap. 3124 */ 3125 int regmap_get_reg_stride(struct regmap *map) 3126 { 3127 return map->reg_stride; 3128 } 3129 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3130 3131 int regmap_parse_val(struct regmap *map, const void *buf, 3132 unsigned int *val) 3133 { 3134 if (!map->format.parse_val) 3135 return -EINVAL; 3136 3137 *val = map->format.parse_val(buf); 3138 3139 return 0; 3140 } 3141 EXPORT_SYMBOL_GPL(regmap_parse_val); 3142 3143 static int __init regmap_initcall(void) 3144 { 3145 regmap_debugfs_initcall(); 3146 3147 return 0; 3148 } 3149 postcore_initcall(regmap_initcall); 3150