1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/property.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 #include <asm/unaligned.h> 21 22 #define CREATE_TRACE_POINTS 23 #include "trace.h" 24 25 #include "internal.h" 26 27 /* 28 * Sometimes for failures during very early init the trace 29 * infrastructure isn't available early enough to be used. For this 30 * sort of problem defining LOG_DEVICE will add printks for basic 31 * register I/O on a specific device. 32 */ 33 #undef LOG_DEVICE 34 35 #ifdef LOG_DEVICE 36 static inline bool regmap_should_log(struct regmap *map) 37 { 38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 39 } 40 #else 41 static inline bool regmap_should_log(struct regmap *map) { return false; } 42 #endif 43 44 45 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 46 unsigned int mask, unsigned int val, 47 bool *change, bool force_write); 48 49 static int _regmap_bus_reg_read(void *context, unsigned int reg, 50 unsigned int *val); 51 static int _regmap_bus_read(void *context, unsigned int reg, 52 unsigned int *val); 53 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 54 unsigned int val); 55 static int _regmap_bus_reg_write(void *context, unsigned int reg, 56 unsigned int val); 57 static int _regmap_bus_raw_write(void *context, unsigned int reg, 58 unsigned int val); 59 60 bool regmap_reg_in_ranges(unsigned int reg, 61 const struct regmap_range *ranges, 62 unsigned int nranges) 63 { 64 const struct regmap_range *r; 65 int i; 66 67 for (i = 0, r = ranges; i < nranges; i++, r++) 68 if (regmap_reg_in_range(reg, r)) 69 return true; 70 return false; 71 } 72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 73 74 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 75 const struct regmap_access_table *table) 76 { 77 /* Check "no ranges" first */ 78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 79 return false; 80 81 /* In case zero "yes ranges" are supplied, any reg is OK */ 82 if (!table->n_yes_ranges) 83 return true; 84 85 return regmap_reg_in_ranges(reg, table->yes_ranges, 86 table->n_yes_ranges); 87 } 88 EXPORT_SYMBOL_GPL(regmap_check_range_table); 89 90 bool regmap_writeable(struct regmap *map, unsigned int reg) 91 { 92 if (map->max_register && reg > map->max_register) 93 return false; 94 95 if (map->writeable_reg) 96 return map->writeable_reg(map->dev, reg); 97 98 if (map->wr_table) 99 return regmap_check_range_table(map, reg, map->wr_table); 100 101 return true; 102 } 103 104 bool regmap_cached(struct regmap *map, unsigned int reg) 105 { 106 int ret; 107 unsigned int val; 108 109 if (map->cache_type == REGCACHE_NONE) 110 return false; 111 112 if (!map->cache_ops) 113 return false; 114 115 if (map->max_register && reg > map->max_register) 116 return false; 117 118 map->lock(map->lock_arg); 119 ret = regcache_read(map, reg, &val); 120 map->unlock(map->lock_arg); 121 if (ret) 122 return false; 123 124 return true; 125 } 126 127 bool regmap_readable(struct regmap *map, unsigned int reg) 128 { 129 if (!map->reg_read) 130 return false; 131 132 if (map->max_register && reg > map->max_register) 133 return false; 134 135 if (map->format.format_write) 136 return false; 137 138 if (map->readable_reg) 139 return map->readable_reg(map->dev, reg); 140 141 if (map->rd_table) 142 return regmap_check_range_table(map, reg, map->rd_table); 143 144 return true; 145 } 146 147 bool regmap_volatile(struct regmap *map, unsigned int reg) 148 { 149 if (!map->format.format_write && !regmap_readable(map, reg)) 150 return false; 151 152 if (map->volatile_reg) 153 return map->volatile_reg(map->dev, reg); 154 155 if (map->volatile_table) 156 return regmap_check_range_table(map, reg, map->volatile_table); 157 158 if (map->cache_ops) 159 return false; 160 else 161 return true; 162 } 163 164 bool regmap_precious(struct regmap *map, unsigned int reg) 165 { 166 if (!regmap_readable(map, reg)) 167 return false; 168 169 if (map->precious_reg) 170 return map->precious_reg(map->dev, reg); 171 172 if (map->precious_table) 173 return regmap_check_range_table(map, reg, map->precious_table); 174 175 return false; 176 } 177 178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 179 { 180 if (map->writeable_noinc_reg) 181 return map->writeable_noinc_reg(map->dev, reg); 182 183 if (map->wr_noinc_table) 184 return regmap_check_range_table(map, reg, map->wr_noinc_table); 185 186 return true; 187 } 188 189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 190 { 191 if (map->readable_noinc_reg) 192 return map->readable_noinc_reg(map->dev, reg); 193 194 if (map->rd_noinc_table) 195 return regmap_check_range_table(map, reg, map->rd_noinc_table); 196 197 return true; 198 } 199 200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 201 size_t num) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num; i++) 206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 207 return false; 208 209 return true; 210 } 211 212 static void regmap_format_2_6_write(struct regmap *map, 213 unsigned int reg, unsigned int val) 214 { 215 u8 *out = map->work_buf; 216 217 *out = (reg << 6) | val; 218 } 219 220 static void regmap_format_4_12_write(struct regmap *map, 221 unsigned int reg, unsigned int val) 222 { 223 __be16 *out = map->work_buf; 224 *out = cpu_to_be16((reg << 12) | val); 225 } 226 227 static void regmap_format_7_9_write(struct regmap *map, 228 unsigned int reg, unsigned int val) 229 { 230 __be16 *out = map->work_buf; 231 *out = cpu_to_be16((reg << 9) | val); 232 } 233 234 static void regmap_format_10_14_write(struct regmap *map, 235 unsigned int reg, unsigned int val) 236 { 237 u8 *out = map->work_buf; 238 239 out[2] = val; 240 out[1] = (val >> 8) | (reg << 6); 241 out[0] = reg >> 2; 242 } 243 244 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 245 { 246 u8 *b = buf; 247 248 b[0] = val << shift; 249 } 250 251 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 252 { 253 put_unaligned_be16(val << shift, buf); 254 } 255 256 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 257 { 258 put_unaligned_le16(val << shift, buf); 259 } 260 261 static void regmap_format_16_native(void *buf, unsigned int val, 262 unsigned int shift) 263 { 264 u16 v = val << shift; 265 266 memcpy(buf, &v, sizeof(v)); 267 } 268 269 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 270 { 271 u8 *b = buf; 272 273 val <<= shift; 274 275 b[0] = val >> 16; 276 b[1] = val >> 8; 277 b[2] = val; 278 } 279 280 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 281 { 282 put_unaligned_be32(val << shift, buf); 283 } 284 285 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 286 { 287 put_unaligned_le32(val << shift, buf); 288 } 289 290 static void regmap_format_32_native(void *buf, unsigned int val, 291 unsigned int shift) 292 { 293 u32 v = val << shift; 294 295 memcpy(buf, &v, sizeof(v)); 296 } 297 298 #ifdef CONFIG_64BIT 299 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 300 { 301 put_unaligned_be64((u64) val << shift, buf); 302 } 303 304 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 305 { 306 put_unaligned_le64((u64) val << shift, buf); 307 } 308 309 static void regmap_format_64_native(void *buf, unsigned int val, 310 unsigned int shift) 311 { 312 u64 v = (u64) val << shift; 313 314 memcpy(buf, &v, sizeof(v)); 315 } 316 #endif 317 318 static void regmap_parse_inplace_noop(void *buf) 319 { 320 } 321 322 static unsigned int regmap_parse_8(const void *buf) 323 { 324 const u8 *b = buf; 325 326 return b[0]; 327 } 328 329 static unsigned int regmap_parse_16_be(const void *buf) 330 { 331 return get_unaligned_be16(buf); 332 } 333 334 static unsigned int regmap_parse_16_le(const void *buf) 335 { 336 return get_unaligned_le16(buf); 337 } 338 339 static void regmap_parse_16_be_inplace(void *buf) 340 { 341 u16 v = get_unaligned_be16(buf); 342 343 memcpy(buf, &v, sizeof(v)); 344 } 345 346 static void regmap_parse_16_le_inplace(void *buf) 347 { 348 u16 v = get_unaligned_le16(buf); 349 350 memcpy(buf, &v, sizeof(v)); 351 } 352 353 static unsigned int regmap_parse_16_native(const void *buf) 354 { 355 u16 v; 356 357 memcpy(&v, buf, sizeof(v)); 358 return v; 359 } 360 361 static unsigned int regmap_parse_24(const void *buf) 362 { 363 const u8 *b = buf; 364 unsigned int ret = b[2]; 365 ret |= ((unsigned int)b[1]) << 8; 366 ret |= ((unsigned int)b[0]) << 16; 367 368 return ret; 369 } 370 371 static unsigned int regmap_parse_32_be(const void *buf) 372 { 373 return get_unaligned_be32(buf); 374 } 375 376 static unsigned int regmap_parse_32_le(const void *buf) 377 { 378 return get_unaligned_le32(buf); 379 } 380 381 static void regmap_parse_32_be_inplace(void *buf) 382 { 383 u32 v = get_unaligned_be32(buf); 384 385 memcpy(buf, &v, sizeof(v)); 386 } 387 388 static void regmap_parse_32_le_inplace(void *buf) 389 { 390 u32 v = get_unaligned_le32(buf); 391 392 memcpy(buf, &v, sizeof(v)); 393 } 394 395 static unsigned int regmap_parse_32_native(const void *buf) 396 { 397 u32 v; 398 399 memcpy(&v, buf, sizeof(v)); 400 return v; 401 } 402 403 #ifdef CONFIG_64BIT 404 static unsigned int regmap_parse_64_be(const void *buf) 405 { 406 return get_unaligned_be64(buf); 407 } 408 409 static unsigned int regmap_parse_64_le(const void *buf) 410 { 411 return get_unaligned_le64(buf); 412 } 413 414 static void regmap_parse_64_be_inplace(void *buf) 415 { 416 u64 v = get_unaligned_be64(buf); 417 418 memcpy(buf, &v, sizeof(v)); 419 } 420 421 static void regmap_parse_64_le_inplace(void *buf) 422 { 423 u64 v = get_unaligned_le64(buf); 424 425 memcpy(buf, &v, sizeof(v)); 426 } 427 428 static unsigned int regmap_parse_64_native(const void *buf) 429 { 430 u64 v; 431 432 memcpy(&v, buf, sizeof(v)); 433 return v; 434 } 435 #endif 436 437 static void regmap_lock_hwlock(void *__map) 438 { 439 struct regmap *map = __map; 440 441 hwspin_lock_timeout(map->hwlock, UINT_MAX); 442 } 443 444 static void regmap_lock_hwlock_irq(void *__map) 445 { 446 struct regmap *map = __map; 447 448 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 449 } 450 451 static void regmap_lock_hwlock_irqsave(void *__map) 452 { 453 struct regmap *map = __map; 454 455 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 456 &map->spinlock_flags); 457 } 458 459 static void regmap_unlock_hwlock(void *__map) 460 { 461 struct regmap *map = __map; 462 463 hwspin_unlock(map->hwlock); 464 } 465 466 static void regmap_unlock_hwlock_irq(void *__map) 467 { 468 struct regmap *map = __map; 469 470 hwspin_unlock_irq(map->hwlock); 471 } 472 473 static void regmap_unlock_hwlock_irqrestore(void *__map) 474 { 475 struct regmap *map = __map; 476 477 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 478 } 479 480 static void regmap_lock_unlock_none(void *__map) 481 { 482 483 } 484 485 static void regmap_lock_mutex(void *__map) 486 { 487 struct regmap *map = __map; 488 mutex_lock(&map->mutex); 489 } 490 491 static void regmap_unlock_mutex(void *__map) 492 { 493 struct regmap *map = __map; 494 mutex_unlock(&map->mutex); 495 } 496 497 static void regmap_lock_spinlock(void *__map) 498 __acquires(&map->spinlock) 499 { 500 struct regmap *map = __map; 501 unsigned long flags; 502 503 spin_lock_irqsave(&map->spinlock, flags); 504 map->spinlock_flags = flags; 505 } 506 507 static void regmap_unlock_spinlock(void *__map) 508 __releases(&map->spinlock) 509 { 510 struct regmap *map = __map; 511 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 512 } 513 514 static void dev_get_regmap_release(struct device *dev, void *res) 515 { 516 /* 517 * We don't actually have anything to do here; the goal here 518 * is not to manage the regmap but to provide a simple way to 519 * get the regmap back given a struct device. 520 */ 521 } 522 523 static bool _regmap_range_add(struct regmap *map, 524 struct regmap_range_node *data) 525 { 526 struct rb_root *root = &map->range_tree; 527 struct rb_node **new = &(root->rb_node), *parent = NULL; 528 529 while (*new) { 530 struct regmap_range_node *this = 531 rb_entry(*new, struct regmap_range_node, node); 532 533 parent = *new; 534 if (data->range_max < this->range_min) 535 new = &((*new)->rb_left); 536 else if (data->range_min > this->range_max) 537 new = &((*new)->rb_right); 538 else 539 return false; 540 } 541 542 rb_link_node(&data->node, parent, new); 543 rb_insert_color(&data->node, root); 544 545 return true; 546 } 547 548 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 549 unsigned int reg) 550 { 551 struct rb_node *node = map->range_tree.rb_node; 552 553 while (node) { 554 struct regmap_range_node *this = 555 rb_entry(node, struct regmap_range_node, node); 556 557 if (reg < this->range_min) 558 node = node->rb_left; 559 else if (reg > this->range_max) 560 node = node->rb_right; 561 else 562 return this; 563 } 564 565 return NULL; 566 } 567 568 static void regmap_range_exit(struct regmap *map) 569 { 570 struct rb_node *next; 571 struct regmap_range_node *range_node; 572 573 next = rb_first(&map->range_tree); 574 while (next) { 575 range_node = rb_entry(next, struct regmap_range_node, node); 576 next = rb_next(&range_node->node); 577 rb_erase(&range_node->node, &map->range_tree); 578 kfree(range_node); 579 } 580 581 kfree(map->selector_work_buf); 582 } 583 584 int regmap_attach_dev(struct device *dev, struct regmap *map, 585 const struct regmap_config *config) 586 { 587 struct regmap **m; 588 589 map->dev = dev; 590 591 regmap_debugfs_init(map, config->name); 592 593 /* Add a devres resource for dev_get_regmap() */ 594 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 595 if (!m) { 596 regmap_debugfs_exit(map); 597 return -ENOMEM; 598 } 599 *m = map; 600 devres_add(dev, m); 601 602 return 0; 603 } 604 EXPORT_SYMBOL_GPL(regmap_attach_dev); 605 606 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 607 const struct regmap_config *config) 608 { 609 enum regmap_endian endian; 610 611 /* Retrieve the endianness specification from the regmap config */ 612 endian = config->reg_format_endian; 613 614 /* If the regmap config specified a non-default value, use that */ 615 if (endian != REGMAP_ENDIAN_DEFAULT) 616 return endian; 617 618 /* Retrieve the endianness specification from the bus config */ 619 if (bus && bus->reg_format_endian_default) 620 endian = bus->reg_format_endian_default; 621 622 /* If the bus specified a non-default value, use that */ 623 if (endian != REGMAP_ENDIAN_DEFAULT) 624 return endian; 625 626 /* Use this if no other value was found */ 627 return REGMAP_ENDIAN_BIG; 628 } 629 630 enum regmap_endian regmap_get_val_endian(struct device *dev, 631 const struct regmap_bus *bus, 632 const struct regmap_config *config) 633 { 634 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; 635 enum regmap_endian endian; 636 637 /* Retrieve the endianness specification from the regmap config */ 638 endian = config->val_format_endian; 639 640 /* If the regmap config specified a non-default value, use that */ 641 if (endian != REGMAP_ENDIAN_DEFAULT) 642 return endian; 643 644 /* If the firmware node exist try to get endianness from it */ 645 if (fwnode_property_read_bool(fwnode, "big-endian")) 646 endian = REGMAP_ENDIAN_BIG; 647 else if (fwnode_property_read_bool(fwnode, "little-endian")) 648 endian = REGMAP_ENDIAN_LITTLE; 649 else if (fwnode_property_read_bool(fwnode, "native-endian")) 650 endian = REGMAP_ENDIAN_NATIVE; 651 652 /* If the endianness was specified in fwnode, use that */ 653 if (endian != REGMAP_ENDIAN_DEFAULT) 654 return endian; 655 656 /* Retrieve the endianness specification from the bus config */ 657 if (bus && bus->val_format_endian_default) 658 endian = bus->val_format_endian_default; 659 660 /* If the bus specified a non-default value, use that */ 661 if (endian != REGMAP_ENDIAN_DEFAULT) 662 return endian; 663 664 /* Use this if no other value was found */ 665 return REGMAP_ENDIAN_BIG; 666 } 667 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 668 669 struct regmap *__regmap_init(struct device *dev, 670 const struct regmap_bus *bus, 671 void *bus_context, 672 const struct regmap_config *config, 673 struct lock_class_key *lock_key, 674 const char *lock_name) 675 { 676 struct regmap *map; 677 int ret = -EINVAL; 678 enum regmap_endian reg_endian, val_endian; 679 int i, j; 680 681 if (!config) 682 goto err; 683 684 map = kzalloc(sizeof(*map), GFP_KERNEL); 685 if (map == NULL) { 686 ret = -ENOMEM; 687 goto err; 688 } 689 690 if (config->name) { 691 map->name = kstrdup_const(config->name, GFP_KERNEL); 692 if (!map->name) { 693 ret = -ENOMEM; 694 goto err_map; 695 } 696 } 697 698 if (config->disable_locking) { 699 map->lock = map->unlock = regmap_lock_unlock_none; 700 regmap_debugfs_disable(map); 701 } else if (config->lock && config->unlock) { 702 map->lock = config->lock; 703 map->unlock = config->unlock; 704 map->lock_arg = config->lock_arg; 705 } else if (config->use_hwlock) { 706 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 707 if (!map->hwlock) { 708 ret = -ENXIO; 709 goto err_name; 710 } 711 712 switch (config->hwlock_mode) { 713 case HWLOCK_IRQSTATE: 714 map->lock = regmap_lock_hwlock_irqsave; 715 map->unlock = regmap_unlock_hwlock_irqrestore; 716 break; 717 case HWLOCK_IRQ: 718 map->lock = regmap_lock_hwlock_irq; 719 map->unlock = regmap_unlock_hwlock_irq; 720 break; 721 default: 722 map->lock = regmap_lock_hwlock; 723 map->unlock = regmap_unlock_hwlock; 724 break; 725 } 726 727 map->lock_arg = map; 728 } else { 729 if ((bus && bus->fast_io) || 730 config->fast_io) { 731 spin_lock_init(&map->spinlock); 732 map->lock = regmap_lock_spinlock; 733 map->unlock = regmap_unlock_spinlock; 734 lockdep_set_class_and_name(&map->spinlock, 735 lock_key, lock_name); 736 } else { 737 mutex_init(&map->mutex); 738 map->lock = regmap_lock_mutex; 739 map->unlock = regmap_unlock_mutex; 740 lockdep_set_class_and_name(&map->mutex, 741 lock_key, lock_name); 742 } 743 map->lock_arg = map; 744 } 745 746 /* 747 * When we write in fast-paths with regmap_bulk_write() don't allocate 748 * scratch buffers with sleeping allocations. 749 */ 750 if ((bus && bus->fast_io) || config->fast_io) 751 map->alloc_flags = GFP_ATOMIC; 752 else 753 map->alloc_flags = GFP_KERNEL; 754 755 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 756 map->format.pad_bytes = config->pad_bits / 8; 757 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 758 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 759 config->val_bits + config->pad_bits, 8); 760 map->reg_shift = config->pad_bits % 8; 761 if (config->reg_stride) 762 map->reg_stride = config->reg_stride; 763 else 764 map->reg_stride = 1; 765 if (is_power_of_2(map->reg_stride)) 766 map->reg_stride_order = ilog2(map->reg_stride); 767 else 768 map->reg_stride_order = -1; 769 map->use_single_read = config->use_single_read || !bus || !bus->read; 770 map->use_single_write = config->use_single_write || !bus || !bus->write; 771 map->can_multi_write = config->can_multi_write && bus && bus->write; 772 if (bus) { 773 map->max_raw_read = bus->max_raw_read; 774 map->max_raw_write = bus->max_raw_write; 775 } 776 map->dev = dev; 777 map->bus = bus; 778 map->bus_context = bus_context; 779 map->max_register = config->max_register; 780 map->wr_table = config->wr_table; 781 map->rd_table = config->rd_table; 782 map->volatile_table = config->volatile_table; 783 map->precious_table = config->precious_table; 784 map->wr_noinc_table = config->wr_noinc_table; 785 map->rd_noinc_table = config->rd_noinc_table; 786 map->writeable_reg = config->writeable_reg; 787 map->readable_reg = config->readable_reg; 788 map->volatile_reg = config->volatile_reg; 789 map->precious_reg = config->precious_reg; 790 map->writeable_noinc_reg = config->writeable_noinc_reg; 791 map->readable_noinc_reg = config->readable_noinc_reg; 792 map->cache_type = config->cache_type; 793 794 spin_lock_init(&map->async_lock); 795 INIT_LIST_HEAD(&map->async_list); 796 INIT_LIST_HEAD(&map->async_free); 797 init_waitqueue_head(&map->async_waitq); 798 799 if (config->read_flag_mask || 800 config->write_flag_mask || 801 config->zero_flag_mask) { 802 map->read_flag_mask = config->read_flag_mask; 803 map->write_flag_mask = config->write_flag_mask; 804 } else if (bus) { 805 map->read_flag_mask = bus->read_flag_mask; 806 } 807 808 if (!bus) { 809 map->reg_read = config->reg_read; 810 map->reg_write = config->reg_write; 811 812 map->defer_caching = false; 813 goto skip_format_initialization; 814 } else if (!bus->read || !bus->write) { 815 map->reg_read = _regmap_bus_reg_read; 816 map->reg_write = _regmap_bus_reg_write; 817 map->reg_update_bits = bus->reg_update_bits; 818 819 map->defer_caching = false; 820 goto skip_format_initialization; 821 } else { 822 map->reg_read = _regmap_bus_read; 823 map->reg_update_bits = bus->reg_update_bits; 824 } 825 826 reg_endian = regmap_get_reg_endian(bus, config); 827 val_endian = regmap_get_val_endian(dev, bus, config); 828 829 switch (config->reg_bits + map->reg_shift) { 830 case 2: 831 switch (config->val_bits) { 832 case 6: 833 map->format.format_write = regmap_format_2_6_write; 834 break; 835 default: 836 goto err_hwlock; 837 } 838 break; 839 840 case 4: 841 switch (config->val_bits) { 842 case 12: 843 map->format.format_write = regmap_format_4_12_write; 844 break; 845 default: 846 goto err_hwlock; 847 } 848 break; 849 850 case 7: 851 switch (config->val_bits) { 852 case 9: 853 map->format.format_write = regmap_format_7_9_write; 854 break; 855 default: 856 goto err_hwlock; 857 } 858 break; 859 860 case 10: 861 switch (config->val_bits) { 862 case 14: 863 map->format.format_write = regmap_format_10_14_write; 864 break; 865 default: 866 goto err_hwlock; 867 } 868 break; 869 870 case 8: 871 map->format.format_reg = regmap_format_8; 872 break; 873 874 case 16: 875 switch (reg_endian) { 876 case REGMAP_ENDIAN_BIG: 877 map->format.format_reg = regmap_format_16_be; 878 break; 879 case REGMAP_ENDIAN_LITTLE: 880 map->format.format_reg = regmap_format_16_le; 881 break; 882 case REGMAP_ENDIAN_NATIVE: 883 map->format.format_reg = regmap_format_16_native; 884 break; 885 default: 886 goto err_hwlock; 887 } 888 break; 889 890 case 24: 891 if (reg_endian != REGMAP_ENDIAN_BIG) 892 goto err_hwlock; 893 map->format.format_reg = regmap_format_24; 894 break; 895 896 case 32: 897 switch (reg_endian) { 898 case REGMAP_ENDIAN_BIG: 899 map->format.format_reg = regmap_format_32_be; 900 break; 901 case REGMAP_ENDIAN_LITTLE: 902 map->format.format_reg = regmap_format_32_le; 903 break; 904 case REGMAP_ENDIAN_NATIVE: 905 map->format.format_reg = regmap_format_32_native; 906 break; 907 default: 908 goto err_hwlock; 909 } 910 break; 911 912 #ifdef CONFIG_64BIT 913 case 64: 914 switch (reg_endian) { 915 case REGMAP_ENDIAN_BIG: 916 map->format.format_reg = regmap_format_64_be; 917 break; 918 case REGMAP_ENDIAN_LITTLE: 919 map->format.format_reg = regmap_format_64_le; 920 break; 921 case REGMAP_ENDIAN_NATIVE: 922 map->format.format_reg = regmap_format_64_native; 923 break; 924 default: 925 goto err_hwlock; 926 } 927 break; 928 #endif 929 930 default: 931 goto err_hwlock; 932 } 933 934 if (val_endian == REGMAP_ENDIAN_NATIVE) 935 map->format.parse_inplace = regmap_parse_inplace_noop; 936 937 switch (config->val_bits) { 938 case 8: 939 map->format.format_val = regmap_format_8; 940 map->format.parse_val = regmap_parse_8; 941 map->format.parse_inplace = regmap_parse_inplace_noop; 942 break; 943 case 16: 944 switch (val_endian) { 945 case REGMAP_ENDIAN_BIG: 946 map->format.format_val = regmap_format_16_be; 947 map->format.parse_val = regmap_parse_16_be; 948 map->format.parse_inplace = regmap_parse_16_be_inplace; 949 break; 950 case REGMAP_ENDIAN_LITTLE: 951 map->format.format_val = regmap_format_16_le; 952 map->format.parse_val = regmap_parse_16_le; 953 map->format.parse_inplace = regmap_parse_16_le_inplace; 954 break; 955 case REGMAP_ENDIAN_NATIVE: 956 map->format.format_val = regmap_format_16_native; 957 map->format.parse_val = regmap_parse_16_native; 958 break; 959 default: 960 goto err_hwlock; 961 } 962 break; 963 case 24: 964 if (val_endian != REGMAP_ENDIAN_BIG) 965 goto err_hwlock; 966 map->format.format_val = regmap_format_24; 967 map->format.parse_val = regmap_parse_24; 968 break; 969 case 32: 970 switch (val_endian) { 971 case REGMAP_ENDIAN_BIG: 972 map->format.format_val = regmap_format_32_be; 973 map->format.parse_val = regmap_parse_32_be; 974 map->format.parse_inplace = regmap_parse_32_be_inplace; 975 break; 976 case REGMAP_ENDIAN_LITTLE: 977 map->format.format_val = regmap_format_32_le; 978 map->format.parse_val = regmap_parse_32_le; 979 map->format.parse_inplace = regmap_parse_32_le_inplace; 980 break; 981 case REGMAP_ENDIAN_NATIVE: 982 map->format.format_val = regmap_format_32_native; 983 map->format.parse_val = regmap_parse_32_native; 984 break; 985 default: 986 goto err_hwlock; 987 } 988 break; 989 #ifdef CONFIG_64BIT 990 case 64: 991 switch (val_endian) { 992 case REGMAP_ENDIAN_BIG: 993 map->format.format_val = regmap_format_64_be; 994 map->format.parse_val = regmap_parse_64_be; 995 map->format.parse_inplace = regmap_parse_64_be_inplace; 996 break; 997 case REGMAP_ENDIAN_LITTLE: 998 map->format.format_val = regmap_format_64_le; 999 map->format.parse_val = regmap_parse_64_le; 1000 map->format.parse_inplace = regmap_parse_64_le_inplace; 1001 break; 1002 case REGMAP_ENDIAN_NATIVE: 1003 map->format.format_val = regmap_format_64_native; 1004 map->format.parse_val = regmap_parse_64_native; 1005 break; 1006 default: 1007 goto err_hwlock; 1008 } 1009 break; 1010 #endif 1011 } 1012 1013 if (map->format.format_write) { 1014 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1015 (val_endian != REGMAP_ENDIAN_BIG)) 1016 goto err_hwlock; 1017 map->use_single_write = true; 1018 } 1019 1020 if (!map->format.format_write && 1021 !(map->format.format_reg && map->format.format_val)) 1022 goto err_hwlock; 1023 1024 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1025 if (map->work_buf == NULL) { 1026 ret = -ENOMEM; 1027 goto err_hwlock; 1028 } 1029 1030 if (map->format.format_write) { 1031 map->defer_caching = false; 1032 map->reg_write = _regmap_bus_formatted_write; 1033 } else if (map->format.format_val) { 1034 map->defer_caching = true; 1035 map->reg_write = _regmap_bus_raw_write; 1036 } 1037 1038 skip_format_initialization: 1039 1040 map->range_tree = RB_ROOT; 1041 for (i = 0; i < config->num_ranges; i++) { 1042 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1043 struct regmap_range_node *new; 1044 1045 /* Sanity check */ 1046 if (range_cfg->range_max < range_cfg->range_min) { 1047 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1048 range_cfg->range_max, range_cfg->range_min); 1049 goto err_range; 1050 } 1051 1052 if (range_cfg->range_max > map->max_register) { 1053 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1054 range_cfg->range_max, map->max_register); 1055 goto err_range; 1056 } 1057 1058 if (range_cfg->selector_reg > map->max_register) { 1059 dev_err(map->dev, 1060 "Invalid range %d: selector out of map\n", i); 1061 goto err_range; 1062 } 1063 1064 if (range_cfg->window_len == 0) { 1065 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1066 i); 1067 goto err_range; 1068 } 1069 1070 /* Make sure, that this register range has no selector 1071 or data window within its boundary */ 1072 for (j = 0; j < config->num_ranges; j++) { 1073 unsigned sel_reg = config->ranges[j].selector_reg; 1074 unsigned win_min = config->ranges[j].window_start; 1075 unsigned win_max = win_min + 1076 config->ranges[j].window_len - 1; 1077 1078 /* Allow data window inside its own virtual range */ 1079 if (j == i) 1080 continue; 1081 1082 if (range_cfg->range_min <= sel_reg && 1083 sel_reg <= range_cfg->range_max) { 1084 dev_err(map->dev, 1085 "Range %d: selector for %d in window\n", 1086 i, j); 1087 goto err_range; 1088 } 1089 1090 if (!(win_max < range_cfg->range_min || 1091 win_min > range_cfg->range_max)) { 1092 dev_err(map->dev, 1093 "Range %d: window for %d in window\n", 1094 i, j); 1095 goto err_range; 1096 } 1097 } 1098 1099 new = kzalloc(sizeof(*new), GFP_KERNEL); 1100 if (new == NULL) { 1101 ret = -ENOMEM; 1102 goto err_range; 1103 } 1104 1105 new->map = map; 1106 new->name = range_cfg->name; 1107 new->range_min = range_cfg->range_min; 1108 new->range_max = range_cfg->range_max; 1109 new->selector_reg = range_cfg->selector_reg; 1110 new->selector_mask = range_cfg->selector_mask; 1111 new->selector_shift = range_cfg->selector_shift; 1112 new->window_start = range_cfg->window_start; 1113 new->window_len = range_cfg->window_len; 1114 1115 if (!_regmap_range_add(map, new)) { 1116 dev_err(map->dev, "Failed to add range %d\n", i); 1117 kfree(new); 1118 goto err_range; 1119 } 1120 1121 if (map->selector_work_buf == NULL) { 1122 map->selector_work_buf = 1123 kzalloc(map->format.buf_size, GFP_KERNEL); 1124 if (map->selector_work_buf == NULL) { 1125 ret = -ENOMEM; 1126 goto err_range; 1127 } 1128 } 1129 } 1130 1131 ret = regcache_init(map, config); 1132 if (ret != 0) 1133 goto err_range; 1134 1135 if (dev) { 1136 ret = regmap_attach_dev(dev, map, config); 1137 if (ret != 0) 1138 goto err_regcache; 1139 } else { 1140 regmap_debugfs_init(map, config->name); 1141 } 1142 1143 return map; 1144 1145 err_regcache: 1146 regcache_exit(map); 1147 err_range: 1148 regmap_range_exit(map); 1149 kfree(map->work_buf); 1150 err_hwlock: 1151 if (map->hwlock) 1152 hwspin_lock_free(map->hwlock); 1153 err_name: 1154 kfree_const(map->name); 1155 err_map: 1156 kfree(map); 1157 err: 1158 return ERR_PTR(ret); 1159 } 1160 EXPORT_SYMBOL_GPL(__regmap_init); 1161 1162 static void devm_regmap_release(struct device *dev, void *res) 1163 { 1164 regmap_exit(*(struct regmap **)res); 1165 } 1166 1167 struct regmap *__devm_regmap_init(struct device *dev, 1168 const struct regmap_bus *bus, 1169 void *bus_context, 1170 const struct regmap_config *config, 1171 struct lock_class_key *lock_key, 1172 const char *lock_name) 1173 { 1174 struct regmap **ptr, *regmap; 1175 1176 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1177 if (!ptr) 1178 return ERR_PTR(-ENOMEM); 1179 1180 regmap = __regmap_init(dev, bus, bus_context, config, 1181 lock_key, lock_name); 1182 if (!IS_ERR(regmap)) { 1183 *ptr = regmap; 1184 devres_add(dev, ptr); 1185 } else { 1186 devres_free(ptr); 1187 } 1188 1189 return regmap; 1190 } 1191 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1192 1193 static void regmap_field_init(struct regmap_field *rm_field, 1194 struct regmap *regmap, struct reg_field reg_field) 1195 { 1196 rm_field->regmap = regmap; 1197 rm_field->reg = reg_field.reg; 1198 rm_field->shift = reg_field.lsb; 1199 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1200 rm_field->id_size = reg_field.id_size; 1201 rm_field->id_offset = reg_field.id_offset; 1202 } 1203 1204 /** 1205 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1206 * 1207 * @dev: Device that will be interacted with 1208 * @regmap: regmap bank in which this register field is located. 1209 * @reg_field: Register field with in the bank. 1210 * 1211 * The return value will be an ERR_PTR() on error or a valid pointer 1212 * to a struct regmap_field. The regmap_field will be automatically freed 1213 * by the device management code. 1214 */ 1215 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1216 struct regmap *regmap, struct reg_field reg_field) 1217 { 1218 struct regmap_field *rm_field = devm_kzalloc(dev, 1219 sizeof(*rm_field), GFP_KERNEL); 1220 if (!rm_field) 1221 return ERR_PTR(-ENOMEM); 1222 1223 regmap_field_init(rm_field, regmap, reg_field); 1224 1225 return rm_field; 1226 1227 } 1228 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1229 1230 /** 1231 * devm_regmap_field_free() - Free a register field allocated using 1232 * devm_regmap_field_alloc. 1233 * 1234 * @dev: Device that will be interacted with 1235 * @field: regmap field which should be freed. 1236 * 1237 * Free register field allocated using devm_regmap_field_alloc(). Usually 1238 * drivers need not call this function, as the memory allocated via devm 1239 * will be freed as per device-driver life-cyle. 1240 */ 1241 void devm_regmap_field_free(struct device *dev, 1242 struct regmap_field *field) 1243 { 1244 devm_kfree(dev, field); 1245 } 1246 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1247 1248 /** 1249 * regmap_field_alloc() - Allocate and initialise a register field. 1250 * 1251 * @regmap: regmap bank in which this register field is located. 1252 * @reg_field: Register field with in the bank. 1253 * 1254 * The return value will be an ERR_PTR() on error or a valid pointer 1255 * to a struct regmap_field. The regmap_field should be freed by the 1256 * user once its finished working with it using regmap_field_free(). 1257 */ 1258 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1259 struct reg_field reg_field) 1260 { 1261 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1262 1263 if (!rm_field) 1264 return ERR_PTR(-ENOMEM); 1265 1266 regmap_field_init(rm_field, regmap, reg_field); 1267 1268 return rm_field; 1269 } 1270 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1271 1272 /** 1273 * regmap_field_free() - Free register field allocated using 1274 * regmap_field_alloc. 1275 * 1276 * @field: regmap field which should be freed. 1277 */ 1278 void regmap_field_free(struct regmap_field *field) 1279 { 1280 kfree(field); 1281 } 1282 EXPORT_SYMBOL_GPL(regmap_field_free); 1283 1284 /** 1285 * regmap_reinit_cache() - Reinitialise the current register cache 1286 * 1287 * @map: Register map to operate on. 1288 * @config: New configuration. Only the cache data will be used. 1289 * 1290 * Discard any existing register cache for the map and initialize a 1291 * new cache. This can be used to restore the cache to defaults or to 1292 * update the cache configuration to reflect runtime discovery of the 1293 * hardware. 1294 * 1295 * No explicit locking is done here, the user needs to ensure that 1296 * this function will not race with other calls to regmap. 1297 */ 1298 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1299 { 1300 regcache_exit(map); 1301 regmap_debugfs_exit(map); 1302 1303 map->max_register = config->max_register; 1304 map->writeable_reg = config->writeable_reg; 1305 map->readable_reg = config->readable_reg; 1306 map->volatile_reg = config->volatile_reg; 1307 map->precious_reg = config->precious_reg; 1308 map->writeable_noinc_reg = config->writeable_noinc_reg; 1309 map->readable_noinc_reg = config->readable_noinc_reg; 1310 map->cache_type = config->cache_type; 1311 1312 regmap_debugfs_init(map, config->name); 1313 1314 map->cache_bypass = false; 1315 map->cache_only = false; 1316 1317 return regcache_init(map, config); 1318 } 1319 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1320 1321 /** 1322 * regmap_exit() - Free a previously allocated register map 1323 * 1324 * @map: Register map to operate on. 1325 */ 1326 void regmap_exit(struct regmap *map) 1327 { 1328 struct regmap_async *async; 1329 1330 regcache_exit(map); 1331 regmap_debugfs_exit(map); 1332 regmap_range_exit(map); 1333 if (map->bus && map->bus->free_context) 1334 map->bus->free_context(map->bus_context); 1335 kfree(map->work_buf); 1336 while (!list_empty(&map->async_free)) { 1337 async = list_first_entry_or_null(&map->async_free, 1338 struct regmap_async, 1339 list); 1340 list_del(&async->list); 1341 kfree(async->work_buf); 1342 kfree(async); 1343 } 1344 if (map->hwlock) 1345 hwspin_lock_free(map->hwlock); 1346 kfree_const(map->name); 1347 kfree(map->patch); 1348 kfree(map); 1349 } 1350 EXPORT_SYMBOL_GPL(regmap_exit); 1351 1352 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1353 { 1354 struct regmap **r = res; 1355 if (!r || !*r) { 1356 WARN_ON(!r || !*r); 1357 return 0; 1358 } 1359 1360 /* If the user didn't specify a name match any */ 1361 if (data) 1362 return !strcmp((*r)->name, data); 1363 else 1364 return 1; 1365 } 1366 1367 /** 1368 * dev_get_regmap() - Obtain the regmap (if any) for a device 1369 * 1370 * @dev: Device to retrieve the map for 1371 * @name: Optional name for the register map, usually NULL. 1372 * 1373 * Returns the regmap for the device if one is present, or NULL. If 1374 * name is specified then it must match the name specified when 1375 * registering the device, if it is NULL then the first regmap found 1376 * will be used. Devices with multiple register maps are very rare, 1377 * generic code should normally not need to specify a name. 1378 */ 1379 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1380 { 1381 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1382 dev_get_regmap_match, (void *)name); 1383 1384 if (!r) 1385 return NULL; 1386 return *r; 1387 } 1388 EXPORT_SYMBOL_GPL(dev_get_regmap); 1389 1390 /** 1391 * regmap_get_device() - Obtain the device from a regmap 1392 * 1393 * @map: Register map to operate on. 1394 * 1395 * Returns the underlying device that the regmap has been created for. 1396 */ 1397 struct device *regmap_get_device(struct regmap *map) 1398 { 1399 return map->dev; 1400 } 1401 EXPORT_SYMBOL_GPL(regmap_get_device); 1402 1403 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1404 struct regmap_range_node *range, 1405 unsigned int val_num) 1406 { 1407 void *orig_work_buf; 1408 unsigned int win_offset; 1409 unsigned int win_page; 1410 bool page_chg; 1411 int ret; 1412 1413 win_offset = (*reg - range->range_min) % range->window_len; 1414 win_page = (*reg - range->range_min) / range->window_len; 1415 1416 if (val_num > 1) { 1417 /* Bulk write shouldn't cross range boundary */ 1418 if (*reg + val_num - 1 > range->range_max) 1419 return -EINVAL; 1420 1421 /* ... or single page boundary */ 1422 if (val_num > range->window_len - win_offset) 1423 return -EINVAL; 1424 } 1425 1426 /* It is possible to have selector register inside data window. 1427 In that case, selector register is located on every page and 1428 it needs no page switching, when accessed alone. */ 1429 if (val_num > 1 || 1430 range->window_start + win_offset != range->selector_reg) { 1431 /* Use separate work_buf during page switching */ 1432 orig_work_buf = map->work_buf; 1433 map->work_buf = map->selector_work_buf; 1434 1435 ret = _regmap_update_bits(map, range->selector_reg, 1436 range->selector_mask, 1437 win_page << range->selector_shift, 1438 &page_chg, false); 1439 1440 map->work_buf = orig_work_buf; 1441 1442 if (ret != 0) 1443 return ret; 1444 } 1445 1446 *reg = range->window_start + win_offset; 1447 1448 return 0; 1449 } 1450 1451 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1452 unsigned long mask) 1453 { 1454 u8 *buf; 1455 int i; 1456 1457 if (!mask || !map->work_buf) 1458 return; 1459 1460 buf = map->work_buf; 1461 1462 for (i = 0; i < max_bytes; i++) 1463 buf[i] |= (mask >> (8 * i)) & 0xff; 1464 } 1465 1466 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1467 const void *val, size_t val_len) 1468 { 1469 struct regmap_range_node *range; 1470 unsigned long flags; 1471 void *work_val = map->work_buf + map->format.reg_bytes + 1472 map->format.pad_bytes; 1473 void *buf; 1474 int ret = -ENOTSUPP; 1475 size_t len; 1476 int i; 1477 1478 WARN_ON(!map->bus); 1479 1480 /* Check for unwritable or noinc registers in range 1481 * before we start 1482 */ 1483 if (!regmap_writeable_noinc(map, reg)) { 1484 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1485 unsigned int element = 1486 reg + regmap_get_offset(map, i); 1487 if (!regmap_writeable(map, element) || 1488 regmap_writeable_noinc(map, element)) 1489 return -EINVAL; 1490 } 1491 } 1492 1493 if (!map->cache_bypass && map->format.parse_val) { 1494 unsigned int ival; 1495 int val_bytes = map->format.val_bytes; 1496 for (i = 0; i < val_len / val_bytes; i++) { 1497 ival = map->format.parse_val(val + (i * val_bytes)); 1498 ret = regcache_write(map, 1499 reg + regmap_get_offset(map, i), 1500 ival); 1501 if (ret) { 1502 dev_err(map->dev, 1503 "Error in caching of register: %x ret: %d\n", 1504 reg + i, ret); 1505 return ret; 1506 } 1507 } 1508 if (map->cache_only) { 1509 map->cache_dirty = true; 1510 return 0; 1511 } 1512 } 1513 1514 range = _regmap_range_lookup(map, reg); 1515 if (range) { 1516 int val_num = val_len / map->format.val_bytes; 1517 int win_offset = (reg - range->range_min) % range->window_len; 1518 int win_residue = range->window_len - win_offset; 1519 1520 /* If the write goes beyond the end of the window split it */ 1521 while (val_num > win_residue) { 1522 dev_dbg(map->dev, "Writing window %d/%zu\n", 1523 win_residue, val_len / map->format.val_bytes); 1524 ret = _regmap_raw_write_impl(map, reg, val, 1525 win_residue * 1526 map->format.val_bytes); 1527 if (ret != 0) 1528 return ret; 1529 1530 reg += win_residue; 1531 val_num -= win_residue; 1532 val += win_residue * map->format.val_bytes; 1533 val_len -= win_residue * map->format.val_bytes; 1534 1535 win_offset = (reg - range->range_min) % 1536 range->window_len; 1537 win_residue = range->window_len - win_offset; 1538 } 1539 1540 ret = _regmap_select_page(map, ®, range, val_num); 1541 if (ret != 0) 1542 return ret; 1543 } 1544 1545 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1546 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1547 map->write_flag_mask); 1548 1549 /* 1550 * Essentially all I/O mechanisms will be faster with a single 1551 * buffer to write. Since register syncs often generate raw 1552 * writes of single registers optimise that case. 1553 */ 1554 if (val != work_val && val_len == map->format.val_bytes) { 1555 memcpy(work_val, val, map->format.val_bytes); 1556 val = work_val; 1557 } 1558 1559 if (map->async && map->bus->async_write) { 1560 struct regmap_async *async; 1561 1562 trace_regmap_async_write_start(map, reg, val_len); 1563 1564 spin_lock_irqsave(&map->async_lock, flags); 1565 async = list_first_entry_or_null(&map->async_free, 1566 struct regmap_async, 1567 list); 1568 if (async) 1569 list_del(&async->list); 1570 spin_unlock_irqrestore(&map->async_lock, flags); 1571 1572 if (!async) { 1573 async = map->bus->async_alloc(); 1574 if (!async) 1575 return -ENOMEM; 1576 1577 async->work_buf = kzalloc(map->format.buf_size, 1578 GFP_KERNEL | GFP_DMA); 1579 if (!async->work_buf) { 1580 kfree(async); 1581 return -ENOMEM; 1582 } 1583 } 1584 1585 async->map = map; 1586 1587 /* If the caller supplied the value we can use it safely. */ 1588 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1589 map->format.reg_bytes + map->format.val_bytes); 1590 1591 spin_lock_irqsave(&map->async_lock, flags); 1592 list_add_tail(&async->list, &map->async_list); 1593 spin_unlock_irqrestore(&map->async_lock, flags); 1594 1595 if (val != work_val) 1596 ret = map->bus->async_write(map->bus_context, 1597 async->work_buf, 1598 map->format.reg_bytes + 1599 map->format.pad_bytes, 1600 val, val_len, async); 1601 else 1602 ret = map->bus->async_write(map->bus_context, 1603 async->work_buf, 1604 map->format.reg_bytes + 1605 map->format.pad_bytes + 1606 val_len, NULL, 0, async); 1607 1608 if (ret != 0) { 1609 dev_err(map->dev, "Failed to schedule write: %d\n", 1610 ret); 1611 1612 spin_lock_irqsave(&map->async_lock, flags); 1613 list_move(&async->list, &map->async_free); 1614 spin_unlock_irqrestore(&map->async_lock, flags); 1615 } 1616 1617 return ret; 1618 } 1619 1620 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1621 1622 /* If we're doing a single register write we can probably just 1623 * send the work_buf directly, otherwise try to do a gather 1624 * write. 1625 */ 1626 if (val == work_val) 1627 ret = map->bus->write(map->bus_context, map->work_buf, 1628 map->format.reg_bytes + 1629 map->format.pad_bytes + 1630 val_len); 1631 else if (map->bus->gather_write) 1632 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1633 map->format.reg_bytes + 1634 map->format.pad_bytes, 1635 val, val_len); 1636 else 1637 ret = -ENOTSUPP; 1638 1639 /* If that didn't work fall back on linearising by hand. */ 1640 if (ret == -ENOTSUPP) { 1641 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1642 buf = kzalloc(len, GFP_KERNEL); 1643 if (!buf) 1644 return -ENOMEM; 1645 1646 memcpy(buf, map->work_buf, map->format.reg_bytes); 1647 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1648 val, val_len); 1649 ret = map->bus->write(map->bus_context, buf, len); 1650 1651 kfree(buf); 1652 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1653 /* regcache_drop_region() takes lock that we already have, 1654 * thus call map->cache_ops->drop() directly 1655 */ 1656 if (map->cache_ops && map->cache_ops->drop) 1657 map->cache_ops->drop(map, reg, reg + 1); 1658 } 1659 1660 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1661 1662 return ret; 1663 } 1664 1665 /** 1666 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1667 * 1668 * @map: Map to check. 1669 */ 1670 bool regmap_can_raw_write(struct regmap *map) 1671 { 1672 return map->bus && map->bus->write && map->format.format_val && 1673 map->format.format_reg; 1674 } 1675 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1676 1677 /** 1678 * regmap_get_raw_read_max - Get the maximum size we can read 1679 * 1680 * @map: Map to check. 1681 */ 1682 size_t regmap_get_raw_read_max(struct regmap *map) 1683 { 1684 return map->max_raw_read; 1685 } 1686 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1687 1688 /** 1689 * regmap_get_raw_write_max - Get the maximum size we can read 1690 * 1691 * @map: Map to check. 1692 */ 1693 size_t regmap_get_raw_write_max(struct regmap *map) 1694 { 1695 return map->max_raw_write; 1696 } 1697 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1698 1699 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1700 unsigned int val) 1701 { 1702 int ret; 1703 struct regmap_range_node *range; 1704 struct regmap *map = context; 1705 1706 WARN_ON(!map->bus || !map->format.format_write); 1707 1708 range = _regmap_range_lookup(map, reg); 1709 if (range) { 1710 ret = _regmap_select_page(map, ®, range, 1); 1711 if (ret != 0) 1712 return ret; 1713 } 1714 1715 map->format.format_write(map, reg, val); 1716 1717 trace_regmap_hw_write_start(map, reg, 1); 1718 1719 ret = map->bus->write(map->bus_context, map->work_buf, 1720 map->format.buf_size); 1721 1722 trace_regmap_hw_write_done(map, reg, 1); 1723 1724 return ret; 1725 } 1726 1727 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1728 unsigned int val) 1729 { 1730 struct regmap *map = context; 1731 1732 return map->bus->reg_write(map->bus_context, reg, val); 1733 } 1734 1735 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1736 unsigned int val) 1737 { 1738 struct regmap *map = context; 1739 1740 WARN_ON(!map->bus || !map->format.format_val); 1741 1742 map->format.format_val(map->work_buf + map->format.reg_bytes 1743 + map->format.pad_bytes, val, 0); 1744 return _regmap_raw_write_impl(map, reg, 1745 map->work_buf + 1746 map->format.reg_bytes + 1747 map->format.pad_bytes, 1748 map->format.val_bytes); 1749 } 1750 1751 static inline void *_regmap_map_get_context(struct regmap *map) 1752 { 1753 return (map->bus) ? map : map->bus_context; 1754 } 1755 1756 int _regmap_write(struct regmap *map, unsigned int reg, 1757 unsigned int val) 1758 { 1759 int ret; 1760 void *context = _regmap_map_get_context(map); 1761 1762 if (!regmap_writeable(map, reg)) 1763 return -EIO; 1764 1765 if (!map->cache_bypass && !map->defer_caching) { 1766 ret = regcache_write(map, reg, val); 1767 if (ret != 0) 1768 return ret; 1769 if (map->cache_only) { 1770 map->cache_dirty = true; 1771 return 0; 1772 } 1773 } 1774 1775 if (regmap_should_log(map)) 1776 dev_info(map->dev, "%x <= %x\n", reg, val); 1777 1778 trace_regmap_reg_write(map, reg, val); 1779 1780 return map->reg_write(context, reg, val); 1781 } 1782 1783 /** 1784 * regmap_write() - Write a value to a single register 1785 * 1786 * @map: Register map to write to 1787 * @reg: Register to write to 1788 * @val: Value to be written 1789 * 1790 * A value of zero will be returned on success, a negative errno will 1791 * be returned in error cases. 1792 */ 1793 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1794 { 1795 int ret; 1796 1797 if (!IS_ALIGNED(reg, map->reg_stride)) 1798 return -EINVAL; 1799 1800 map->lock(map->lock_arg); 1801 1802 ret = _regmap_write(map, reg, val); 1803 1804 map->unlock(map->lock_arg); 1805 1806 return ret; 1807 } 1808 EXPORT_SYMBOL_GPL(regmap_write); 1809 1810 /** 1811 * regmap_write_async() - Write a value to a single register asynchronously 1812 * 1813 * @map: Register map to write to 1814 * @reg: Register to write to 1815 * @val: Value to be written 1816 * 1817 * A value of zero will be returned on success, a negative errno will 1818 * be returned in error cases. 1819 */ 1820 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1821 { 1822 int ret; 1823 1824 if (!IS_ALIGNED(reg, map->reg_stride)) 1825 return -EINVAL; 1826 1827 map->lock(map->lock_arg); 1828 1829 map->async = true; 1830 1831 ret = _regmap_write(map, reg, val); 1832 1833 map->async = false; 1834 1835 map->unlock(map->lock_arg); 1836 1837 return ret; 1838 } 1839 EXPORT_SYMBOL_GPL(regmap_write_async); 1840 1841 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1842 const void *val, size_t val_len) 1843 { 1844 size_t val_bytes = map->format.val_bytes; 1845 size_t val_count = val_len / val_bytes; 1846 size_t chunk_count, chunk_bytes; 1847 size_t chunk_regs = val_count; 1848 int ret, i; 1849 1850 if (!val_count) 1851 return -EINVAL; 1852 1853 if (map->use_single_write) 1854 chunk_regs = 1; 1855 else if (map->max_raw_write && val_len > map->max_raw_write) 1856 chunk_regs = map->max_raw_write / val_bytes; 1857 1858 chunk_count = val_count / chunk_regs; 1859 chunk_bytes = chunk_regs * val_bytes; 1860 1861 /* Write as many bytes as possible with chunk_size */ 1862 for (i = 0; i < chunk_count; i++) { 1863 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); 1864 if (ret) 1865 return ret; 1866 1867 reg += regmap_get_offset(map, chunk_regs); 1868 val += chunk_bytes; 1869 val_len -= chunk_bytes; 1870 } 1871 1872 /* Write remaining bytes */ 1873 if (val_len) 1874 ret = _regmap_raw_write_impl(map, reg, val, val_len); 1875 1876 return ret; 1877 } 1878 1879 /** 1880 * regmap_raw_write() - Write raw values to one or more registers 1881 * 1882 * @map: Register map to write to 1883 * @reg: Initial register to write to 1884 * @val: Block of data to be written, laid out for direct transmission to the 1885 * device 1886 * @val_len: Length of data pointed to by val. 1887 * 1888 * This function is intended to be used for things like firmware 1889 * download where a large block of data needs to be transferred to the 1890 * device. No formatting will be done on the data provided. 1891 * 1892 * A value of zero will be returned on success, a negative errno will 1893 * be returned in error cases. 1894 */ 1895 int regmap_raw_write(struct regmap *map, unsigned int reg, 1896 const void *val, size_t val_len) 1897 { 1898 int ret; 1899 1900 if (!regmap_can_raw_write(map)) 1901 return -EINVAL; 1902 if (val_len % map->format.val_bytes) 1903 return -EINVAL; 1904 1905 map->lock(map->lock_arg); 1906 1907 ret = _regmap_raw_write(map, reg, val, val_len); 1908 1909 map->unlock(map->lock_arg); 1910 1911 return ret; 1912 } 1913 EXPORT_SYMBOL_GPL(regmap_raw_write); 1914 1915 /** 1916 * regmap_noinc_write(): Write data from a register without incrementing the 1917 * register number 1918 * 1919 * @map: Register map to write to 1920 * @reg: Register to write to 1921 * @val: Pointer to data buffer 1922 * @val_len: Length of output buffer in bytes. 1923 * 1924 * The regmap API usually assumes that bulk bus write operations will write a 1925 * range of registers. Some devices have certain registers for which a write 1926 * operation can write to an internal FIFO. 1927 * 1928 * The target register must be volatile but registers after it can be 1929 * completely unrelated cacheable registers. 1930 * 1931 * This will attempt multiple writes as required to write val_len bytes. 1932 * 1933 * A value of zero will be returned on success, a negative errno will be 1934 * returned in error cases. 1935 */ 1936 int regmap_noinc_write(struct regmap *map, unsigned int reg, 1937 const void *val, size_t val_len) 1938 { 1939 size_t write_len; 1940 int ret; 1941 1942 if (!map->bus) 1943 return -EINVAL; 1944 if (!map->bus->write) 1945 return -ENOTSUPP; 1946 if (val_len % map->format.val_bytes) 1947 return -EINVAL; 1948 if (!IS_ALIGNED(reg, map->reg_stride)) 1949 return -EINVAL; 1950 if (val_len == 0) 1951 return -EINVAL; 1952 1953 map->lock(map->lock_arg); 1954 1955 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 1956 ret = -EINVAL; 1957 goto out_unlock; 1958 } 1959 1960 while (val_len) { 1961 if (map->max_raw_write && map->max_raw_write < val_len) 1962 write_len = map->max_raw_write; 1963 else 1964 write_len = val_len; 1965 ret = _regmap_raw_write(map, reg, val, write_len); 1966 if (ret) 1967 goto out_unlock; 1968 val = ((u8 *)val) + write_len; 1969 val_len -= write_len; 1970 } 1971 1972 out_unlock: 1973 map->unlock(map->lock_arg); 1974 return ret; 1975 } 1976 EXPORT_SYMBOL_GPL(regmap_noinc_write); 1977 1978 /** 1979 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 1980 * register field. 1981 * 1982 * @field: Register field to write to 1983 * @mask: Bitmask to change 1984 * @val: Value to be written 1985 * @change: Boolean indicating if a write was done 1986 * @async: Boolean indicating asynchronously 1987 * @force: Boolean indicating use force update 1988 * 1989 * Perform a read/modify/write cycle on the register field with change, 1990 * async, force option. 1991 * 1992 * A value of zero will be returned on success, a negative errno will 1993 * be returned in error cases. 1994 */ 1995 int regmap_field_update_bits_base(struct regmap_field *field, 1996 unsigned int mask, unsigned int val, 1997 bool *change, bool async, bool force) 1998 { 1999 mask = (mask << field->shift) & field->mask; 2000 2001 return regmap_update_bits_base(field->regmap, field->reg, 2002 mask, val << field->shift, 2003 change, async, force); 2004 } 2005 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2006 2007 /** 2008 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2009 * register field with port ID 2010 * 2011 * @field: Register field to write to 2012 * @id: port ID 2013 * @mask: Bitmask to change 2014 * @val: Value to be written 2015 * @change: Boolean indicating if a write was done 2016 * @async: Boolean indicating asynchronously 2017 * @force: Boolean indicating use force update 2018 * 2019 * A value of zero will be returned on success, a negative errno will 2020 * be returned in error cases. 2021 */ 2022 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2023 unsigned int mask, unsigned int val, 2024 bool *change, bool async, bool force) 2025 { 2026 if (id >= field->id_size) 2027 return -EINVAL; 2028 2029 mask = (mask << field->shift) & field->mask; 2030 2031 return regmap_update_bits_base(field->regmap, 2032 field->reg + (field->id_offset * id), 2033 mask, val << field->shift, 2034 change, async, force); 2035 } 2036 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2037 2038 /** 2039 * regmap_bulk_write() - Write multiple registers to the device 2040 * 2041 * @map: Register map to write to 2042 * @reg: First register to be write from 2043 * @val: Block of data to be written, in native register size for device 2044 * @val_count: Number of registers to write 2045 * 2046 * This function is intended to be used for writing a large block of 2047 * data to the device either in single transfer or multiple transfer. 2048 * 2049 * A value of zero will be returned on success, a negative errno will 2050 * be returned in error cases. 2051 */ 2052 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2053 size_t val_count) 2054 { 2055 int ret = 0, i; 2056 size_t val_bytes = map->format.val_bytes; 2057 2058 if (!IS_ALIGNED(reg, map->reg_stride)) 2059 return -EINVAL; 2060 2061 /* 2062 * Some devices don't support bulk write, for them we have a series of 2063 * single write operations. 2064 */ 2065 if (!map->bus || !map->format.parse_inplace) { 2066 map->lock(map->lock_arg); 2067 for (i = 0; i < val_count; i++) { 2068 unsigned int ival; 2069 2070 switch (val_bytes) { 2071 case 1: 2072 ival = *(u8 *)(val + (i * val_bytes)); 2073 break; 2074 case 2: 2075 ival = *(u16 *)(val + (i * val_bytes)); 2076 break; 2077 case 4: 2078 ival = *(u32 *)(val + (i * val_bytes)); 2079 break; 2080 #ifdef CONFIG_64BIT 2081 case 8: 2082 ival = *(u64 *)(val + (i * val_bytes)); 2083 break; 2084 #endif 2085 default: 2086 ret = -EINVAL; 2087 goto out; 2088 } 2089 2090 ret = _regmap_write(map, 2091 reg + regmap_get_offset(map, i), 2092 ival); 2093 if (ret != 0) 2094 goto out; 2095 } 2096 out: 2097 map->unlock(map->lock_arg); 2098 } else { 2099 void *wval; 2100 2101 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2102 if (!wval) 2103 return -ENOMEM; 2104 2105 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2106 map->format.parse_inplace(wval + i); 2107 2108 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2109 2110 kfree(wval); 2111 } 2112 return ret; 2113 } 2114 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2115 2116 /* 2117 * _regmap_raw_multi_reg_write() 2118 * 2119 * the (register,newvalue) pairs in regs have not been formatted, but 2120 * they are all in the same page and have been changed to being page 2121 * relative. The page register has been written if that was necessary. 2122 */ 2123 static int _regmap_raw_multi_reg_write(struct regmap *map, 2124 const struct reg_sequence *regs, 2125 size_t num_regs) 2126 { 2127 int ret; 2128 void *buf; 2129 int i; 2130 u8 *u8; 2131 size_t val_bytes = map->format.val_bytes; 2132 size_t reg_bytes = map->format.reg_bytes; 2133 size_t pad_bytes = map->format.pad_bytes; 2134 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2135 size_t len = pair_size * num_regs; 2136 2137 if (!len) 2138 return -EINVAL; 2139 2140 buf = kzalloc(len, GFP_KERNEL); 2141 if (!buf) 2142 return -ENOMEM; 2143 2144 /* We have to linearise by hand. */ 2145 2146 u8 = buf; 2147 2148 for (i = 0; i < num_regs; i++) { 2149 unsigned int reg = regs[i].reg; 2150 unsigned int val = regs[i].def; 2151 trace_regmap_hw_write_start(map, reg, 1); 2152 map->format.format_reg(u8, reg, map->reg_shift); 2153 u8 += reg_bytes + pad_bytes; 2154 map->format.format_val(u8, val, 0); 2155 u8 += val_bytes; 2156 } 2157 u8 = buf; 2158 *u8 |= map->write_flag_mask; 2159 2160 ret = map->bus->write(map->bus_context, buf, len); 2161 2162 kfree(buf); 2163 2164 for (i = 0; i < num_regs; i++) { 2165 int reg = regs[i].reg; 2166 trace_regmap_hw_write_done(map, reg, 1); 2167 } 2168 return ret; 2169 } 2170 2171 static unsigned int _regmap_register_page(struct regmap *map, 2172 unsigned int reg, 2173 struct regmap_range_node *range) 2174 { 2175 unsigned int win_page = (reg - range->range_min) / range->window_len; 2176 2177 return win_page; 2178 } 2179 2180 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2181 struct reg_sequence *regs, 2182 size_t num_regs) 2183 { 2184 int ret; 2185 int i, n; 2186 struct reg_sequence *base; 2187 unsigned int this_page = 0; 2188 unsigned int page_change = 0; 2189 /* 2190 * the set of registers are not neccessarily in order, but 2191 * since the order of write must be preserved this algorithm 2192 * chops the set each time the page changes. This also applies 2193 * if there is a delay required at any point in the sequence. 2194 */ 2195 base = regs; 2196 for (i = 0, n = 0; i < num_regs; i++, n++) { 2197 unsigned int reg = regs[i].reg; 2198 struct regmap_range_node *range; 2199 2200 range = _regmap_range_lookup(map, reg); 2201 if (range) { 2202 unsigned int win_page = _regmap_register_page(map, reg, 2203 range); 2204 2205 if (i == 0) 2206 this_page = win_page; 2207 if (win_page != this_page) { 2208 this_page = win_page; 2209 page_change = 1; 2210 } 2211 } 2212 2213 /* If we have both a page change and a delay make sure to 2214 * write the regs and apply the delay before we change the 2215 * page. 2216 */ 2217 2218 if (page_change || regs[i].delay_us) { 2219 2220 /* For situations where the first write requires 2221 * a delay we need to make sure we don't call 2222 * raw_multi_reg_write with n=0 2223 * This can't occur with page breaks as we 2224 * never write on the first iteration 2225 */ 2226 if (regs[i].delay_us && i == 0) 2227 n = 1; 2228 2229 ret = _regmap_raw_multi_reg_write(map, base, n); 2230 if (ret != 0) 2231 return ret; 2232 2233 if (regs[i].delay_us) 2234 udelay(regs[i].delay_us); 2235 2236 base += n; 2237 n = 0; 2238 2239 if (page_change) { 2240 ret = _regmap_select_page(map, 2241 &base[n].reg, 2242 range, 1); 2243 if (ret != 0) 2244 return ret; 2245 2246 page_change = 0; 2247 } 2248 2249 } 2250 2251 } 2252 if (n > 0) 2253 return _regmap_raw_multi_reg_write(map, base, n); 2254 return 0; 2255 } 2256 2257 static int _regmap_multi_reg_write(struct regmap *map, 2258 const struct reg_sequence *regs, 2259 size_t num_regs) 2260 { 2261 int i; 2262 int ret; 2263 2264 if (!map->can_multi_write) { 2265 for (i = 0; i < num_regs; i++) { 2266 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2267 if (ret != 0) 2268 return ret; 2269 2270 if (regs[i].delay_us) 2271 udelay(regs[i].delay_us); 2272 } 2273 return 0; 2274 } 2275 2276 if (!map->format.parse_inplace) 2277 return -EINVAL; 2278 2279 if (map->writeable_reg) 2280 for (i = 0; i < num_regs; i++) { 2281 int reg = regs[i].reg; 2282 if (!map->writeable_reg(map->dev, reg)) 2283 return -EINVAL; 2284 if (!IS_ALIGNED(reg, map->reg_stride)) 2285 return -EINVAL; 2286 } 2287 2288 if (!map->cache_bypass) { 2289 for (i = 0; i < num_regs; i++) { 2290 unsigned int val = regs[i].def; 2291 unsigned int reg = regs[i].reg; 2292 ret = regcache_write(map, reg, val); 2293 if (ret) { 2294 dev_err(map->dev, 2295 "Error in caching of register: %x ret: %d\n", 2296 reg, ret); 2297 return ret; 2298 } 2299 } 2300 if (map->cache_only) { 2301 map->cache_dirty = true; 2302 return 0; 2303 } 2304 } 2305 2306 WARN_ON(!map->bus); 2307 2308 for (i = 0; i < num_regs; i++) { 2309 unsigned int reg = regs[i].reg; 2310 struct regmap_range_node *range; 2311 2312 /* Coalesce all the writes between a page break or a delay 2313 * in a sequence 2314 */ 2315 range = _regmap_range_lookup(map, reg); 2316 if (range || regs[i].delay_us) { 2317 size_t len = sizeof(struct reg_sequence)*num_regs; 2318 struct reg_sequence *base = kmemdup(regs, len, 2319 GFP_KERNEL); 2320 if (!base) 2321 return -ENOMEM; 2322 ret = _regmap_range_multi_paged_reg_write(map, base, 2323 num_regs); 2324 kfree(base); 2325 2326 return ret; 2327 } 2328 } 2329 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2330 } 2331 2332 /** 2333 * regmap_multi_reg_write() - Write multiple registers to the device 2334 * 2335 * @map: Register map to write to 2336 * @regs: Array of structures containing register,value to be written 2337 * @num_regs: Number of registers to write 2338 * 2339 * Write multiple registers to the device where the set of register, value 2340 * pairs are supplied in any order, possibly not all in a single range. 2341 * 2342 * The 'normal' block write mode will send ultimately send data on the 2343 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2344 * addressed. However, this alternative block multi write mode will send 2345 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2346 * must of course support the mode. 2347 * 2348 * A value of zero will be returned on success, a negative errno will be 2349 * returned in error cases. 2350 */ 2351 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2352 int num_regs) 2353 { 2354 int ret; 2355 2356 map->lock(map->lock_arg); 2357 2358 ret = _regmap_multi_reg_write(map, regs, num_regs); 2359 2360 map->unlock(map->lock_arg); 2361 2362 return ret; 2363 } 2364 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2365 2366 /** 2367 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2368 * device but not the cache 2369 * 2370 * @map: Register map to write to 2371 * @regs: Array of structures containing register,value to be written 2372 * @num_regs: Number of registers to write 2373 * 2374 * Write multiple registers to the device but not the cache where the set 2375 * of register are supplied in any order. 2376 * 2377 * This function is intended to be used for writing a large block of data 2378 * atomically to the device in single transfer for those I2C client devices 2379 * that implement this alternative block write mode. 2380 * 2381 * A value of zero will be returned on success, a negative errno will 2382 * be returned in error cases. 2383 */ 2384 int regmap_multi_reg_write_bypassed(struct regmap *map, 2385 const struct reg_sequence *regs, 2386 int num_regs) 2387 { 2388 int ret; 2389 bool bypass; 2390 2391 map->lock(map->lock_arg); 2392 2393 bypass = map->cache_bypass; 2394 map->cache_bypass = true; 2395 2396 ret = _regmap_multi_reg_write(map, regs, num_regs); 2397 2398 map->cache_bypass = bypass; 2399 2400 map->unlock(map->lock_arg); 2401 2402 return ret; 2403 } 2404 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2405 2406 /** 2407 * regmap_raw_write_async() - Write raw values to one or more registers 2408 * asynchronously 2409 * 2410 * @map: Register map to write to 2411 * @reg: Initial register to write to 2412 * @val: Block of data to be written, laid out for direct transmission to the 2413 * device. Must be valid until regmap_async_complete() is called. 2414 * @val_len: Length of data pointed to by val. 2415 * 2416 * This function is intended to be used for things like firmware 2417 * download where a large block of data needs to be transferred to the 2418 * device. No formatting will be done on the data provided. 2419 * 2420 * If supported by the underlying bus the write will be scheduled 2421 * asynchronously, helping maximise I/O speed on higher speed buses 2422 * like SPI. regmap_async_complete() can be called to ensure that all 2423 * asynchrnous writes have been completed. 2424 * 2425 * A value of zero will be returned on success, a negative errno will 2426 * be returned in error cases. 2427 */ 2428 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2429 const void *val, size_t val_len) 2430 { 2431 int ret; 2432 2433 if (val_len % map->format.val_bytes) 2434 return -EINVAL; 2435 if (!IS_ALIGNED(reg, map->reg_stride)) 2436 return -EINVAL; 2437 2438 map->lock(map->lock_arg); 2439 2440 map->async = true; 2441 2442 ret = _regmap_raw_write(map, reg, val, val_len); 2443 2444 map->async = false; 2445 2446 map->unlock(map->lock_arg); 2447 2448 return ret; 2449 } 2450 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2451 2452 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2453 unsigned int val_len) 2454 { 2455 struct regmap_range_node *range; 2456 int ret; 2457 2458 WARN_ON(!map->bus); 2459 2460 if (!map->bus || !map->bus->read) 2461 return -EINVAL; 2462 2463 range = _regmap_range_lookup(map, reg); 2464 if (range) { 2465 ret = _regmap_select_page(map, ®, range, 2466 val_len / map->format.val_bytes); 2467 if (ret != 0) 2468 return ret; 2469 } 2470 2471 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2472 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2473 map->read_flag_mask); 2474 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2475 2476 ret = map->bus->read(map->bus_context, map->work_buf, 2477 map->format.reg_bytes + map->format.pad_bytes, 2478 val, val_len); 2479 2480 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2481 2482 return ret; 2483 } 2484 2485 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2486 unsigned int *val) 2487 { 2488 struct regmap *map = context; 2489 2490 return map->bus->reg_read(map->bus_context, reg, val); 2491 } 2492 2493 static int _regmap_bus_read(void *context, unsigned int reg, 2494 unsigned int *val) 2495 { 2496 int ret; 2497 struct regmap *map = context; 2498 void *work_val = map->work_buf + map->format.reg_bytes + 2499 map->format.pad_bytes; 2500 2501 if (!map->format.parse_val) 2502 return -EINVAL; 2503 2504 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); 2505 if (ret == 0) 2506 *val = map->format.parse_val(work_val); 2507 2508 return ret; 2509 } 2510 2511 static int _regmap_read(struct regmap *map, unsigned int reg, 2512 unsigned int *val) 2513 { 2514 int ret; 2515 void *context = _regmap_map_get_context(map); 2516 2517 if (!map->cache_bypass) { 2518 ret = regcache_read(map, reg, val); 2519 if (ret == 0) 2520 return 0; 2521 } 2522 2523 if (map->cache_only) 2524 return -EBUSY; 2525 2526 if (!regmap_readable(map, reg)) 2527 return -EIO; 2528 2529 ret = map->reg_read(context, reg, val); 2530 if (ret == 0) { 2531 if (regmap_should_log(map)) 2532 dev_info(map->dev, "%x => %x\n", reg, *val); 2533 2534 trace_regmap_reg_read(map, reg, *val); 2535 2536 if (!map->cache_bypass) 2537 regcache_write(map, reg, *val); 2538 } 2539 2540 return ret; 2541 } 2542 2543 /** 2544 * regmap_read() - Read a value from a single register 2545 * 2546 * @map: Register map to read from 2547 * @reg: Register to be read from 2548 * @val: Pointer to store read value 2549 * 2550 * A value of zero will be returned on success, a negative errno will 2551 * be returned in error cases. 2552 */ 2553 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2554 { 2555 int ret; 2556 2557 if (!IS_ALIGNED(reg, map->reg_stride)) 2558 return -EINVAL; 2559 2560 map->lock(map->lock_arg); 2561 2562 ret = _regmap_read(map, reg, val); 2563 2564 map->unlock(map->lock_arg); 2565 2566 return ret; 2567 } 2568 EXPORT_SYMBOL_GPL(regmap_read); 2569 2570 /** 2571 * regmap_raw_read() - Read raw data from the device 2572 * 2573 * @map: Register map to read from 2574 * @reg: First register to be read from 2575 * @val: Pointer to store read value 2576 * @val_len: Size of data to read 2577 * 2578 * A value of zero will be returned on success, a negative errno will 2579 * be returned in error cases. 2580 */ 2581 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2582 size_t val_len) 2583 { 2584 size_t val_bytes = map->format.val_bytes; 2585 size_t val_count = val_len / val_bytes; 2586 unsigned int v; 2587 int ret, i; 2588 2589 if (!map->bus) 2590 return -EINVAL; 2591 if (val_len % map->format.val_bytes) 2592 return -EINVAL; 2593 if (!IS_ALIGNED(reg, map->reg_stride)) 2594 return -EINVAL; 2595 if (val_count == 0) 2596 return -EINVAL; 2597 2598 map->lock(map->lock_arg); 2599 2600 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2601 map->cache_type == REGCACHE_NONE) { 2602 size_t chunk_count, chunk_bytes; 2603 size_t chunk_regs = val_count; 2604 2605 if (!map->bus->read) { 2606 ret = -ENOTSUPP; 2607 goto out; 2608 } 2609 2610 if (map->use_single_read) 2611 chunk_regs = 1; 2612 else if (map->max_raw_read && val_len > map->max_raw_read) 2613 chunk_regs = map->max_raw_read / val_bytes; 2614 2615 chunk_count = val_count / chunk_regs; 2616 chunk_bytes = chunk_regs * val_bytes; 2617 2618 /* Read bytes that fit into whole chunks */ 2619 for (i = 0; i < chunk_count; i++) { 2620 ret = _regmap_raw_read(map, reg, val, chunk_bytes); 2621 if (ret != 0) 2622 goto out; 2623 2624 reg += regmap_get_offset(map, chunk_regs); 2625 val += chunk_bytes; 2626 val_len -= chunk_bytes; 2627 } 2628 2629 /* Read remaining bytes */ 2630 if (val_len) { 2631 ret = _regmap_raw_read(map, reg, val, val_len); 2632 if (ret != 0) 2633 goto out; 2634 } 2635 } else { 2636 /* Otherwise go word by word for the cache; should be low 2637 * cost as we expect to hit the cache. 2638 */ 2639 for (i = 0; i < val_count; i++) { 2640 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2641 &v); 2642 if (ret != 0) 2643 goto out; 2644 2645 map->format.format_val(val + (i * val_bytes), v, 0); 2646 } 2647 } 2648 2649 out: 2650 map->unlock(map->lock_arg); 2651 2652 return ret; 2653 } 2654 EXPORT_SYMBOL_GPL(regmap_raw_read); 2655 2656 /** 2657 * regmap_noinc_read(): Read data from a register without incrementing the 2658 * register number 2659 * 2660 * @map: Register map to read from 2661 * @reg: Register to read from 2662 * @val: Pointer to data buffer 2663 * @val_len: Length of output buffer in bytes. 2664 * 2665 * The regmap API usually assumes that bulk bus read operations will read a 2666 * range of registers. Some devices have certain registers for which a read 2667 * operation read will read from an internal FIFO. 2668 * 2669 * The target register must be volatile but registers after it can be 2670 * completely unrelated cacheable registers. 2671 * 2672 * This will attempt multiple reads as required to read val_len bytes. 2673 * 2674 * A value of zero will be returned on success, a negative errno will be 2675 * returned in error cases. 2676 */ 2677 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2678 void *val, size_t val_len) 2679 { 2680 size_t read_len; 2681 int ret; 2682 2683 if (!map->bus) 2684 return -EINVAL; 2685 if (!map->bus->read) 2686 return -ENOTSUPP; 2687 if (val_len % map->format.val_bytes) 2688 return -EINVAL; 2689 if (!IS_ALIGNED(reg, map->reg_stride)) 2690 return -EINVAL; 2691 if (val_len == 0) 2692 return -EINVAL; 2693 2694 map->lock(map->lock_arg); 2695 2696 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2697 ret = -EINVAL; 2698 goto out_unlock; 2699 } 2700 2701 while (val_len) { 2702 if (map->max_raw_read && map->max_raw_read < val_len) 2703 read_len = map->max_raw_read; 2704 else 2705 read_len = val_len; 2706 ret = _regmap_raw_read(map, reg, val, read_len); 2707 if (ret) 2708 goto out_unlock; 2709 val = ((u8 *)val) + read_len; 2710 val_len -= read_len; 2711 } 2712 2713 out_unlock: 2714 map->unlock(map->lock_arg); 2715 return ret; 2716 } 2717 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2718 2719 /** 2720 * regmap_field_read(): Read a value to a single register field 2721 * 2722 * @field: Register field to read from 2723 * @val: Pointer to store read value 2724 * 2725 * A value of zero will be returned on success, a negative errno will 2726 * be returned in error cases. 2727 */ 2728 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2729 { 2730 int ret; 2731 unsigned int reg_val; 2732 ret = regmap_read(field->regmap, field->reg, ®_val); 2733 if (ret != 0) 2734 return ret; 2735 2736 reg_val &= field->mask; 2737 reg_val >>= field->shift; 2738 *val = reg_val; 2739 2740 return ret; 2741 } 2742 EXPORT_SYMBOL_GPL(regmap_field_read); 2743 2744 /** 2745 * regmap_fields_read() - Read a value to a single register field with port ID 2746 * 2747 * @field: Register field to read from 2748 * @id: port ID 2749 * @val: Pointer to store read value 2750 * 2751 * A value of zero will be returned on success, a negative errno will 2752 * be returned in error cases. 2753 */ 2754 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2755 unsigned int *val) 2756 { 2757 int ret; 2758 unsigned int reg_val; 2759 2760 if (id >= field->id_size) 2761 return -EINVAL; 2762 2763 ret = regmap_read(field->regmap, 2764 field->reg + (field->id_offset * id), 2765 ®_val); 2766 if (ret != 0) 2767 return ret; 2768 2769 reg_val &= field->mask; 2770 reg_val >>= field->shift; 2771 *val = reg_val; 2772 2773 return ret; 2774 } 2775 EXPORT_SYMBOL_GPL(regmap_fields_read); 2776 2777 /** 2778 * regmap_bulk_read() - Read multiple registers from the device 2779 * 2780 * @map: Register map to read from 2781 * @reg: First register to be read from 2782 * @val: Pointer to store read value, in native register size for device 2783 * @val_count: Number of registers to read 2784 * 2785 * A value of zero will be returned on success, a negative errno will 2786 * be returned in error cases. 2787 */ 2788 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2789 size_t val_count) 2790 { 2791 int ret, i; 2792 size_t val_bytes = map->format.val_bytes; 2793 bool vol = regmap_volatile_range(map, reg, val_count); 2794 2795 if (!IS_ALIGNED(reg, map->reg_stride)) 2796 return -EINVAL; 2797 if (val_count == 0) 2798 return -EINVAL; 2799 2800 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2801 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2802 if (ret != 0) 2803 return ret; 2804 2805 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2806 map->format.parse_inplace(val + i); 2807 } else { 2808 #ifdef CONFIG_64BIT 2809 u64 *u64 = val; 2810 #endif 2811 u32 *u32 = val; 2812 u16 *u16 = val; 2813 u8 *u8 = val; 2814 2815 map->lock(map->lock_arg); 2816 2817 for (i = 0; i < val_count; i++) { 2818 unsigned int ival; 2819 2820 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2821 &ival); 2822 if (ret != 0) 2823 goto out; 2824 2825 switch (map->format.val_bytes) { 2826 #ifdef CONFIG_64BIT 2827 case 8: 2828 u64[i] = ival; 2829 break; 2830 #endif 2831 case 4: 2832 u32[i] = ival; 2833 break; 2834 case 2: 2835 u16[i] = ival; 2836 break; 2837 case 1: 2838 u8[i] = ival; 2839 break; 2840 default: 2841 ret = -EINVAL; 2842 goto out; 2843 } 2844 } 2845 2846 out: 2847 map->unlock(map->lock_arg); 2848 } 2849 2850 return ret; 2851 } 2852 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2853 2854 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2855 unsigned int mask, unsigned int val, 2856 bool *change, bool force_write) 2857 { 2858 int ret; 2859 unsigned int tmp, orig; 2860 2861 if (change) 2862 *change = false; 2863 2864 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2865 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2866 if (ret == 0 && change) 2867 *change = true; 2868 } else { 2869 ret = _regmap_read(map, reg, &orig); 2870 if (ret != 0) 2871 return ret; 2872 2873 tmp = orig & ~mask; 2874 tmp |= val & mask; 2875 2876 if (force_write || (tmp != orig)) { 2877 ret = _regmap_write(map, reg, tmp); 2878 if (ret == 0 && change) 2879 *change = true; 2880 } 2881 } 2882 2883 return ret; 2884 } 2885 2886 /** 2887 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2888 * 2889 * @map: Register map to update 2890 * @reg: Register to update 2891 * @mask: Bitmask to change 2892 * @val: New value for bitmask 2893 * @change: Boolean indicating if a write was done 2894 * @async: Boolean indicating asynchronously 2895 * @force: Boolean indicating use force update 2896 * 2897 * Perform a read/modify/write cycle on a register map with change, async, force 2898 * options. 2899 * 2900 * If async is true: 2901 * 2902 * With most buses the read must be done synchronously so this is most useful 2903 * for devices with a cache which do not need to interact with the hardware to 2904 * determine the current register value. 2905 * 2906 * Returns zero for success, a negative number on error. 2907 */ 2908 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2909 unsigned int mask, unsigned int val, 2910 bool *change, bool async, bool force) 2911 { 2912 int ret; 2913 2914 map->lock(map->lock_arg); 2915 2916 map->async = async; 2917 2918 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2919 2920 map->async = false; 2921 2922 map->unlock(map->lock_arg); 2923 2924 return ret; 2925 } 2926 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2927 2928 /** 2929 * regmap_test_bits() - Check if all specified bits are set in a register. 2930 * 2931 * @map: Register map to operate on 2932 * @reg: Register to read from 2933 * @bits: Bits to test 2934 * 2935 * Returns 0 if at least one of the tested bits is not set, 1 if all tested 2936 * bits are set and a negative error number if the underlying regmap_read() 2937 * fails. 2938 */ 2939 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 2940 { 2941 unsigned int val, ret; 2942 2943 ret = regmap_read(map, reg, &val); 2944 if (ret) 2945 return ret; 2946 2947 return (val & bits) == bits; 2948 } 2949 EXPORT_SYMBOL_GPL(regmap_test_bits); 2950 2951 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2952 { 2953 struct regmap *map = async->map; 2954 bool wake; 2955 2956 trace_regmap_async_io_complete(map); 2957 2958 spin_lock(&map->async_lock); 2959 list_move(&async->list, &map->async_free); 2960 wake = list_empty(&map->async_list); 2961 2962 if (ret != 0) 2963 map->async_ret = ret; 2964 2965 spin_unlock(&map->async_lock); 2966 2967 if (wake) 2968 wake_up(&map->async_waitq); 2969 } 2970 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2971 2972 static int regmap_async_is_done(struct regmap *map) 2973 { 2974 unsigned long flags; 2975 int ret; 2976 2977 spin_lock_irqsave(&map->async_lock, flags); 2978 ret = list_empty(&map->async_list); 2979 spin_unlock_irqrestore(&map->async_lock, flags); 2980 2981 return ret; 2982 } 2983 2984 /** 2985 * regmap_async_complete - Ensure all asynchronous I/O has completed. 2986 * 2987 * @map: Map to operate on. 2988 * 2989 * Blocks until any pending asynchronous I/O has completed. Returns 2990 * an error code for any failed I/O operations. 2991 */ 2992 int regmap_async_complete(struct regmap *map) 2993 { 2994 unsigned long flags; 2995 int ret; 2996 2997 /* Nothing to do with no async support */ 2998 if (!map->bus || !map->bus->async_write) 2999 return 0; 3000 3001 trace_regmap_async_complete_start(map); 3002 3003 wait_event(map->async_waitq, regmap_async_is_done(map)); 3004 3005 spin_lock_irqsave(&map->async_lock, flags); 3006 ret = map->async_ret; 3007 map->async_ret = 0; 3008 spin_unlock_irqrestore(&map->async_lock, flags); 3009 3010 trace_regmap_async_complete_done(map); 3011 3012 return ret; 3013 } 3014 EXPORT_SYMBOL_GPL(regmap_async_complete); 3015 3016 /** 3017 * regmap_register_patch - Register and apply register updates to be applied 3018 * on device initialistion 3019 * 3020 * @map: Register map to apply updates to. 3021 * @regs: Values to update. 3022 * @num_regs: Number of entries in regs. 3023 * 3024 * Register a set of register updates to be applied to the device 3025 * whenever the device registers are synchronised with the cache and 3026 * apply them immediately. Typically this is used to apply 3027 * corrections to be applied to the device defaults on startup, such 3028 * as the updates some vendors provide to undocumented registers. 3029 * 3030 * The caller must ensure that this function cannot be called 3031 * concurrently with either itself or regcache_sync(). 3032 */ 3033 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3034 int num_regs) 3035 { 3036 struct reg_sequence *p; 3037 int ret; 3038 bool bypass; 3039 3040 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3041 num_regs)) 3042 return 0; 3043 3044 p = krealloc(map->patch, 3045 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3046 GFP_KERNEL); 3047 if (p) { 3048 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3049 map->patch = p; 3050 map->patch_regs += num_regs; 3051 } else { 3052 return -ENOMEM; 3053 } 3054 3055 map->lock(map->lock_arg); 3056 3057 bypass = map->cache_bypass; 3058 3059 map->cache_bypass = true; 3060 map->async = true; 3061 3062 ret = _regmap_multi_reg_write(map, regs, num_regs); 3063 3064 map->async = false; 3065 map->cache_bypass = bypass; 3066 3067 map->unlock(map->lock_arg); 3068 3069 regmap_async_complete(map); 3070 3071 return ret; 3072 } 3073 EXPORT_SYMBOL_GPL(regmap_register_patch); 3074 3075 /** 3076 * regmap_get_val_bytes() - Report the size of a register value 3077 * 3078 * @map: Register map to operate on. 3079 * 3080 * Report the size of a register value, mainly intended to for use by 3081 * generic infrastructure built on top of regmap. 3082 */ 3083 int regmap_get_val_bytes(struct regmap *map) 3084 { 3085 if (map->format.format_write) 3086 return -EINVAL; 3087 3088 return map->format.val_bytes; 3089 } 3090 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3091 3092 /** 3093 * regmap_get_max_register() - Report the max register value 3094 * 3095 * @map: Register map to operate on. 3096 * 3097 * Report the max register value, mainly intended to for use by 3098 * generic infrastructure built on top of regmap. 3099 */ 3100 int regmap_get_max_register(struct regmap *map) 3101 { 3102 return map->max_register ? map->max_register : -EINVAL; 3103 } 3104 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3105 3106 /** 3107 * regmap_get_reg_stride() - Report the register address stride 3108 * 3109 * @map: Register map to operate on. 3110 * 3111 * Report the register address stride, mainly intended to for use by 3112 * generic infrastructure built on top of regmap. 3113 */ 3114 int regmap_get_reg_stride(struct regmap *map) 3115 { 3116 return map->reg_stride; 3117 } 3118 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3119 3120 int regmap_parse_val(struct regmap *map, const void *buf, 3121 unsigned int *val) 3122 { 3123 if (!map->format.parse_val) 3124 return -EINVAL; 3125 3126 *val = map->format.parse_val(buf); 3127 3128 return 0; 3129 } 3130 EXPORT_SYMBOL_GPL(regmap_parse_val); 3131 3132 static int __init regmap_initcall(void) 3133 { 3134 regmap_debugfs_initcall(); 3135 3136 return 0; 3137 } 3138 postcore_initcall(regmap_initcall); 3139