1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/of.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 #include <asm/unaligned.h> 21 22 #define CREATE_TRACE_POINTS 23 #include "trace.h" 24 25 #include "internal.h" 26 27 /* 28 * Sometimes for failures during very early init the trace 29 * infrastructure isn't available early enough to be used. For this 30 * sort of problem defining LOG_DEVICE will add printks for basic 31 * register I/O on a specific device. 32 */ 33 #undef LOG_DEVICE 34 35 #ifdef LOG_DEVICE 36 static inline bool regmap_should_log(struct regmap *map) 37 { 38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 39 } 40 #else 41 static inline bool regmap_should_log(struct regmap *map) { return false; } 42 #endif 43 44 45 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 46 unsigned int mask, unsigned int val, 47 bool *change, bool force_write); 48 49 static int _regmap_bus_reg_read(void *context, unsigned int reg, 50 unsigned int *val); 51 static int _regmap_bus_read(void *context, unsigned int reg, 52 unsigned int *val); 53 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 54 unsigned int val); 55 static int _regmap_bus_reg_write(void *context, unsigned int reg, 56 unsigned int val); 57 static int _regmap_bus_raw_write(void *context, unsigned int reg, 58 unsigned int val); 59 60 bool regmap_reg_in_ranges(unsigned int reg, 61 const struct regmap_range *ranges, 62 unsigned int nranges) 63 { 64 const struct regmap_range *r; 65 int i; 66 67 for (i = 0, r = ranges; i < nranges; i++, r++) 68 if (regmap_reg_in_range(reg, r)) 69 return true; 70 return false; 71 } 72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 73 74 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 75 const struct regmap_access_table *table) 76 { 77 /* Check "no ranges" first */ 78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 79 return false; 80 81 /* In case zero "yes ranges" are supplied, any reg is OK */ 82 if (!table->n_yes_ranges) 83 return true; 84 85 return regmap_reg_in_ranges(reg, table->yes_ranges, 86 table->n_yes_ranges); 87 } 88 EXPORT_SYMBOL_GPL(regmap_check_range_table); 89 90 bool regmap_writeable(struct regmap *map, unsigned int reg) 91 { 92 if (map->max_register && reg > map->max_register) 93 return false; 94 95 if (map->writeable_reg) 96 return map->writeable_reg(map->dev, reg); 97 98 if (map->wr_table) 99 return regmap_check_range_table(map, reg, map->wr_table); 100 101 return true; 102 } 103 104 bool regmap_cached(struct regmap *map, unsigned int reg) 105 { 106 int ret; 107 unsigned int val; 108 109 if (map->cache_type == REGCACHE_NONE) 110 return false; 111 112 if (!map->cache_ops) 113 return false; 114 115 if (map->max_register && reg > map->max_register) 116 return false; 117 118 map->lock(map->lock_arg); 119 ret = regcache_read(map, reg, &val); 120 map->unlock(map->lock_arg); 121 if (ret) 122 return false; 123 124 return true; 125 } 126 127 bool regmap_readable(struct regmap *map, unsigned int reg) 128 { 129 if (!map->reg_read) 130 return false; 131 132 if (map->max_register && reg > map->max_register) 133 return false; 134 135 if (map->format.format_write) 136 return false; 137 138 if (map->readable_reg) 139 return map->readable_reg(map->dev, reg); 140 141 if (map->rd_table) 142 return regmap_check_range_table(map, reg, map->rd_table); 143 144 return true; 145 } 146 147 bool regmap_volatile(struct regmap *map, unsigned int reg) 148 { 149 if (!map->format.format_write && !regmap_readable(map, reg)) 150 return false; 151 152 if (map->volatile_reg) 153 return map->volatile_reg(map->dev, reg); 154 155 if (map->volatile_table) 156 return regmap_check_range_table(map, reg, map->volatile_table); 157 158 if (map->cache_ops) 159 return false; 160 else 161 return true; 162 } 163 164 bool regmap_precious(struct regmap *map, unsigned int reg) 165 { 166 if (!regmap_readable(map, reg)) 167 return false; 168 169 if (map->precious_reg) 170 return map->precious_reg(map->dev, reg); 171 172 if (map->precious_table) 173 return regmap_check_range_table(map, reg, map->precious_table); 174 175 return false; 176 } 177 178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 179 { 180 if (map->writeable_noinc_reg) 181 return map->writeable_noinc_reg(map->dev, reg); 182 183 if (map->wr_noinc_table) 184 return regmap_check_range_table(map, reg, map->wr_noinc_table); 185 186 return true; 187 } 188 189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 190 { 191 if (map->readable_noinc_reg) 192 return map->readable_noinc_reg(map->dev, reg); 193 194 if (map->rd_noinc_table) 195 return regmap_check_range_table(map, reg, map->rd_noinc_table); 196 197 return true; 198 } 199 200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 201 size_t num) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num; i++) 206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 207 return false; 208 209 return true; 210 } 211 212 static void regmap_format_2_6_write(struct regmap *map, 213 unsigned int reg, unsigned int val) 214 { 215 u8 *out = map->work_buf; 216 217 *out = (reg << 6) | val; 218 } 219 220 static void regmap_format_4_12_write(struct regmap *map, 221 unsigned int reg, unsigned int val) 222 { 223 __be16 *out = map->work_buf; 224 *out = cpu_to_be16((reg << 12) | val); 225 } 226 227 static void regmap_format_7_9_write(struct regmap *map, 228 unsigned int reg, unsigned int val) 229 { 230 __be16 *out = map->work_buf; 231 *out = cpu_to_be16((reg << 9) | val); 232 } 233 234 static void regmap_format_10_14_write(struct regmap *map, 235 unsigned int reg, unsigned int val) 236 { 237 u8 *out = map->work_buf; 238 239 out[2] = val; 240 out[1] = (val >> 8) | (reg << 6); 241 out[0] = reg >> 2; 242 } 243 244 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 245 { 246 u8 *b = buf; 247 248 b[0] = val << shift; 249 } 250 251 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 252 { 253 put_unaligned_be16(val << shift, buf); 254 } 255 256 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 257 { 258 put_unaligned_le16(val << shift, buf); 259 } 260 261 static void regmap_format_16_native(void *buf, unsigned int val, 262 unsigned int shift) 263 { 264 u16 v = val << shift; 265 266 memcpy(buf, &v, sizeof(v)); 267 } 268 269 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 270 { 271 u8 *b = buf; 272 273 val <<= shift; 274 275 b[0] = val >> 16; 276 b[1] = val >> 8; 277 b[2] = val; 278 } 279 280 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 281 { 282 put_unaligned_be32(val << shift, buf); 283 } 284 285 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 286 { 287 put_unaligned_le32(val << shift, buf); 288 } 289 290 static void regmap_format_32_native(void *buf, unsigned int val, 291 unsigned int shift) 292 { 293 u32 v = val << shift; 294 295 memcpy(buf, &v, sizeof(v)); 296 } 297 298 #ifdef CONFIG_64BIT 299 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 300 { 301 put_unaligned_be64((u64) val << shift, buf); 302 } 303 304 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 305 { 306 put_unaligned_le64((u64) val << shift, buf); 307 } 308 309 static void regmap_format_64_native(void *buf, unsigned int val, 310 unsigned int shift) 311 { 312 u64 v = (u64) val << shift; 313 314 memcpy(buf, &v, sizeof(v)); 315 } 316 #endif 317 318 static void regmap_parse_inplace_noop(void *buf) 319 { 320 } 321 322 static unsigned int regmap_parse_8(const void *buf) 323 { 324 const u8 *b = buf; 325 326 return b[0]; 327 } 328 329 static unsigned int regmap_parse_16_be(const void *buf) 330 { 331 return get_unaligned_be16(buf); 332 } 333 334 static unsigned int regmap_parse_16_le(const void *buf) 335 { 336 return get_unaligned_le16(buf); 337 } 338 339 static void regmap_parse_16_be_inplace(void *buf) 340 { 341 u16 v = get_unaligned_be16(buf); 342 343 memcpy(buf, &v, sizeof(v)); 344 } 345 346 static void regmap_parse_16_le_inplace(void *buf) 347 { 348 u16 v = get_unaligned_le16(buf); 349 350 memcpy(buf, &v, sizeof(v)); 351 } 352 353 static unsigned int regmap_parse_16_native(const void *buf) 354 { 355 u16 v; 356 357 memcpy(&v, buf, sizeof(v)); 358 return v; 359 } 360 361 static unsigned int regmap_parse_24(const void *buf) 362 { 363 const u8 *b = buf; 364 unsigned int ret = b[2]; 365 ret |= ((unsigned int)b[1]) << 8; 366 ret |= ((unsigned int)b[0]) << 16; 367 368 return ret; 369 } 370 371 static unsigned int regmap_parse_32_be(const void *buf) 372 { 373 return get_unaligned_be32(buf); 374 } 375 376 static unsigned int regmap_parse_32_le(const void *buf) 377 { 378 return get_unaligned_le32(buf); 379 } 380 381 static void regmap_parse_32_be_inplace(void *buf) 382 { 383 u32 v = get_unaligned_be32(buf); 384 385 memcpy(buf, &v, sizeof(v)); 386 } 387 388 static void regmap_parse_32_le_inplace(void *buf) 389 { 390 u32 v = get_unaligned_le32(buf); 391 392 memcpy(buf, &v, sizeof(v)); 393 } 394 395 static unsigned int regmap_parse_32_native(const void *buf) 396 { 397 u32 v; 398 399 memcpy(&v, buf, sizeof(v)); 400 return v; 401 } 402 403 #ifdef CONFIG_64BIT 404 static unsigned int regmap_parse_64_be(const void *buf) 405 { 406 return get_unaligned_be64(buf); 407 } 408 409 static unsigned int regmap_parse_64_le(const void *buf) 410 { 411 return get_unaligned_le64(buf); 412 } 413 414 static void regmap_parse_64_be_inplace(void *buf) 415 { 416 u64 v = get_unaligned_be64(buf); 417 418 memcpy(buf, &v, sizeof(v)); 419 } 420 421 static void regmap_parse_64_le_inplace(void *buf) 422 { 423 u64 v = get_unaligned_le64(buf); 424 425 memcpy(buf, &v, sizeof(v)); 426 } 427 428 static unsigned int regmap_parse_64_native(const void *buf) 429 { 430 u64 v; 431 432 memcpy(&v, buf, sizeof(v)); 433 return v; 434 } 435 #endif 436 437 static void regmap_lock_hwlock(void *__map) 438 { 439 struct regmap *map = __map; 440 441 hwspin_lock_timeout(map->hwlock, UINT_MAX); 442 } 443 444 static void regmap_lock_hwlock_irq(void *__map) 445 { 446 struct regmap *map = __map; 447 448 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 449 } 450 451 static void regmap_lock_hwlock_irqsave(void *__map) 452 { 453 struct regmap *map = __map; 454 455 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 456 &map->spinlock_flags); 457 } 458 459 static void regmap_unlock_hwlock(void *__map) 460 { 461 struct regmap *map = __map; 462 463 hwspin_unlock(map->hwlock); 464 } 465 466 static void regmap_unlock_hwlock_irq(void *__map) 467 { 468 struct regmap *map = __map; 469 470 hwspin_unlock_irq(map->hwlock); 471 } 472 473 static void regmap_unlock_hwlock_irqrestore(void *__map) 474 { 475 struct regmap *map = __map; 476 477 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 478 } 479 480 static void regmap_lock_unlock_none(void *__map) 481 { 482 483 } 484 485 static void regmap_lock_mutex(void *__map) 486 { 487 struct regmap *map = __map; 488 mutex_lock(&map->mutex); 489 } 490 491 static void regmap_unlock_mutex(void *__map) 492 { 493 struct regmap *map = __map; 494 mutex_unlock(&map->mutex); 495 } 496 497 static void regmap_lock_spinlock(void *__map) 498 __acquires(&map->spinlock) 499 { 500 struct regmap *map = __map; 501 unsigned long flags; 502 503 spin_lock_irqsave(&map->spinlock, flags); 504 map->spinlock_flags = flags; 505 } 506 507 static void regmap_unlock_spinlock(void *__map) 508 __releases(&map->spinlock) 509 { 510 struct regmap *map = __map; 511 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 512 } 513 514 static void dev_get_regmap_release(struct device *dev, void *res) 515 { 516 /* 517 * We don't actually have anything to do here; the goal here 518 * is not to manage the regmap but to provide a simple way to 519 * get the regmap back given a struct device. 520 */ 521 } 522 523 static bool _regmap_range_add(struct regmap *map, 524 struct regmap_range_node *data) 525 { 526 struct rb_root *root = &map->range_tree; 527 struct rb_node **new = &(root->rb_node), *parent = NULL; 528 529 while (*new) { 530 struct regmap_range_node *this = 531 rb_entry(*new, struct regmap_range_node, node); 532 533 parent = *new; 534 if (data->range_max < this->range_min) 535 new = &((*new)->rb_left); 536 else if (data->range_min > this->range_max) 537 new = &((*new)->rb_right); 538 else 539 return false; 540 } 541 542 rb_link_node(&data->node, parent, new); 543 rb_insert_color(&data->node, root); 544 545 return true; 546 } 547 548 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 549 unsigned int reg) 550 { 551 struct rb_node *node = map->range_tree.rb_node; 552 553 while (node) { 554 struct regmap_range_node *this = 555 rb_entry(node, struct regmap_range_node, node); 556 557 if (reg < this->range_min) 558 node = node->rb_left; 559 else if (reg > this->range_max) 560 node = node->rb_right; 561 else 562 return this; 563 } 564 565 return NULL; 566 } 567 568 static void regmap_range_exit(struct regmap *map) 569 { 570 struct rb_node *next; 571 struct regmap_range_node *range_node; 572 573 next = rb_first(&map->range_tree); 574 while (next) { 575 range_node = rb_entry(next, struct regmap_range_node, node); 576 next = rb_next(&range_node->node); 577 rb_erase(&range_node->node, &map->range_tree); 578 kfree(range_node); 579 } 580 581 kfree(map->selector_work_buf); 582 } 583 584 int regmap_attach_dev(struct device *dev, struct regmap *map, 585 const struct regmap_config *config) 586 { 587 struct regmap **m; 588 589 map->dev = dev; 590 591 regmap_debugfs_init(map, config->name); 592 593 /* Add a devres resource for dev_get_regmap() */ 594 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 595 if (!m) { 596 regmap_debugfs_exit(map); 597 return -ENOMEM; 598 } 599 *m = map; 600 devres_add(dev, m); 601 602 return 0; 603 } 604 EXPORT_SYMBOL_GPL(regmap_attach_dev); 605 606 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 607 const struct regmap_config *config) 608 { 609 enum regmap_endian endian; 610 611 /* Retrieve the endianness specification from the regmap config */ 612 endian = config->reg_format_endian; 613 614 /* If the regmap config specified a non-default value, use that */ 615 if (endian != REGMAP_ENDIAN_DEFAULT) 616 return endian; 617 618 /* Retrieve the endianness specification from the bus config */ 619 if (bus && bus->reg_format_endian_default) 620 endian = bus->reg_format_endian_default; 621 622 /* If the bus specified a non-default value, use that */ 623 if (endian != REGMAP_ENDIAN_DEFAULT) 624 return endian; 625 626 /* Use this if no other value was found */ 627 return REGMAP_ENDIAN_BIG; 628 } 629 630 enum regmap_endian regmap_get_val_endian(struct device *dev, 631 const struct regmap_bus *bus, 632 const struct regmap_config *config) 633 { 634 struct device_node *np; 635 enum regmap_endian endian; 636 637 /* Retrieve the endianness specification from the regmap config */ 638 endian = config->val_format_endian; 639 640 /* If the regmap config specified a non-default value, use that */ 641 if (endian != REGMAP_ENDIAN_DEFAULT) 642 return endian; 643 644 /* If the dev and dev->of_node exist try to get endianness from DT */ 645 if (dev && dev->of_node) { 646 np = dev->of_node; 647 648 /* Parse the device's DT node for an endianness specification */ 649 if (of_property_read_bool(np, "big-endian")) 650 endian = REGMAP_ENDIAN_BIG; 651 else if (of_property_read_bool(np, "little-endian")) 652 endian = REGMAP_ENDIAN_LITTLE; 653 else if (of_property_read_bool(np, "native-endian")) 654 endian = REGMAP_ENDIAN_NATIVE; 655 656 /* If the endianness was specified in DT, use that */ 657 if (endian != REGMAP_ENDIAN_DEFAULT) 658 return endian; 659 } 660 661 /* Retrieve the endianness specification from the bus config */ 662 if (bus && bus->val_format_endian_default) 663 endian = bus->val_format_endian_default; 664 665 /* If the bus specified a non-default value, use that */ 666 if (endian != REGMAP_ENDIAN_DEFAULT) 667 return endian; 668 669 /* Use this if no other value was found */ 670 return REGMAP_ENDIAN_BIG; 671 } 672 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 673 674 struct regmap *__regmap_init(struct device *dev, 675 const struct regmap_bus *bus, 676 void *bus_context, 677 const struct regmap_config *config, 678 struct lock_class_key *lock_key, 679 const char *lock_name) 680 { 681 struct regmap *map; 682 int ret = -EINVAL; 683 enum regmap_endian reg_endian, val_endian; 684 int i, j; 685 686 if (!config) 687 goto err; 688 689 map = kzalloc(sizeof(*map), GFP_KERNEL); 690 if (map == NULL) { 691 ret = -ENOMEM; 692 goto err; 693 } 694 695 if (config->name) { 696 map->name = kstrdup_const(config->name, GFP_KERNEL); 697 if (!map->name) { 698 ret = -ENOMEM; 699 goto err_map; 700 } 701 } 702 703 if (config->disable_locking) { 704 map->lock = map->unlock = regmap_lock_unlock_none; 705 regmap_debugfs_disable(map); 706 } else if (config->lock && config->unlock) { 707 map->lock = config->lock; 708 map->unlock = config->unlock; 709 map->lock_arg = config->lock_arg; 710 } else if (config->use_hwlock) { 711 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 712 if (!map->hwlock) { 713 ret = -ENXIO; 714 goto err_name; 715 } 716 717 switch (config->hwlock_mode) { 718 case HWLOCK_IRQSTATE: 719 map->lock = regmap_lock_hwlock_irqsave; 720 map->unlock = regmap_unlock_hwlock_irqrestore; 721 break; 722 case HWLOCK_IRQ: 723 map->lock = regmap_lock_hwlock_irq; 724 map->unlock = regmap_unlock_hwlock_irq; 725 break; 726 default: 727 map->lock = regmap_lock_hwlock; 728 map->unlock = regmap_unlock_hwlock; 729 break; 730 } 731 732 map->lock_arg = map; 733 } else { 734 if ((bus && bus->fast_io) || 735 config->fast_io) { 736 spin_lock_init(&map->spinlock); 737 map->lock = regmap_lock_spinlock; 738 map->unlock = regmap_unlock_spinlock; 739 lockdep_set_class_and_name(&map->spinlock, 740 lock_key, lock_name); 741 } else { 742 mutex_init(&map->mutex); 743 map->lock = regmap_lock_mutex; 744 map->unlock = regmap_unlock_mutex; 745 lockdep_set_class_and_name(&map->mutex, 746 lock_key, lock_name); 747 } 748 map->lock_arg = map; 749 } 750 751 /* 752 * When we write in fast-paths with regmap_bulk_write() don't allocate 753 * scratch buffers with sleeping allocations. 754 */ 755 if ((bus && bus->fast_io) || config->fast_io) 756 map->alloc_flags = GFP_ATOMIC; 757 else 758 map->alloc_flags = GFP_KERNEL; 759 760 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 761 map->format.pad_bytes = config->pad_bits / 8; 762 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 763 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 764 config->val_bits + config->pad_bits, 8); 765 map->reg_shift = config->pad_bits % 8; 766 if (config->reg_stride) 767 map->reg_stride = config->reg_stride; 768 else 769 map->reg_stride = 1; 770 if (is_power_of_2(map->reg_stride)) 771 map->reg_stride_order = ilog2(map->reg_stride); 772 else 773 map->reg_stride_order = -1; 774 map->use_single_read = config->use_single_read || !bus || !bus->read; 775 map->use_single_write = config->use_single_write || !bus || !bus->write; 776 map->can_multi_write = config->can_multi_write && bus && bus->write; 777 if (bus) { 778 map->max_raw_read = bus->max_raw_read; 779 map->max_raw_write = bus->max_raw_write; 780 } 781 map->dev = dev; 782 map->bus = bus; 783 map->bus_context = bus_context; 784 map->max_register = config->max_register; 785 map->wr_table = config->wr_table; 786 map->rd_table = config->rd_table; 787 map->volatile_table = config->volatile_table; 788 map->precious_table = config->precious_table; 789 map->wr_noinc_table = config->wr_noinc_table; 790 map->rd_noinc_table = config->rd_noinc_table; 791 map->writeable_reg = config->writeable_reg; 792 map->readable_reg = config->readable_reg; 793 map->volatile_reg = config->volatile_reg; 794 map->precious_reg = config->precious_reg; 795 map->writeable_noinc_reg = config->writeable_noinc_reg; 796 map->readable_noinc_reg = config->readable_noinc_reg; 797 map->cache_type = config->cache_type; 798 799 spin_lock_init(&map->async_lock); 800 INIT_LIST_HEAD(&map->async_list); 801 INIT_LIST_HEAD(&map->async_free); 802 init_waitqueue_head(&map->async_waitq); 803 804 if (config->read_flag_mask || 805 config->write_flag_mask || 806 config->zero_flag_mask) { 807 map->read_flag_mask = config->read_flag_mask; 808 map->write_flag_mask = config->write_flag_mask; 809 } else if (bus) { 810 map->read_flag_mask = bus->read_flag_mask; 811 } 812 813 if (!bus) { 814 map->reg_read = config->reg_read; 815 map->reg_write = config->reg_write; 816 817 map->defer_caching = false; 818 goto skip_format_initialization; 819 } else if (!bus->read || !bus->write) { 820 map->reg_read = _regmap_bus_reg_read; 821 map->reg_write = _regmap_bus_reg_write; 822 map->reg_update_bits = bus->reg_update_bits; 823 824 map->defer_caching = false; 825 goto skip_format_initialization; 826 } else { 827 map->reg_read = _regmap_bus_read; 828 map->reg_update_bits = bus->reg_update_bits; 829 } 830 831 reg_endian = regmap_get_reg_endian(bus, config); 832 val_endian = regmap_get_val_endian(dev, bus, config); 833 834 switch (config->reg_bits + map->reg_shift) { 835 case 2: 836 switch (config->val_bits) { 837 case 6: 838 map->format.format_write = regmap_format_2_6_write; 839 break; 840 default: 841 goto err_hwlock; 842 } 843 break; 844 845 case 4: 846 switch (config->val_bits) { 847 case 12: 848 map->format.format_write = regmap_format_4_12_write; 849 break; 850 default: 851 goto err_hwlock; 852 } 853 break; 854 855 case 7: 856 switch (config->val_bits) { 857 case 9: 858 map->format.format_write = regmap_format_7_9_write; 859 break; 860 default: 861 goto err_hwlock; 862 } 863 break; 864 865 case 10: 866 switch (config->val_bits) { 867 case 14: 868 map->format.format_write = regmap_format_10_14_write; 869 break; 870 default: 871 goto err_hwlock; 872 } 873 break; 874 875 case 8: 876 map->format.format_reg = regmap_format_8; 877 break; 878 879 case 16: 880 switch (reg_endian) { 881 case REGMAP_ENDIAN_BIG: 882 map->format.format_reg = regmap_format_16_be; 883 break; 884 case REGMAP_ENDIAN_LITTLE: 885 map->format.format_reg = regmap_format_16_le; 886 break; 887 case REGMAP_ENDIAN_NATIVE: 888 map->format.format_reg = regmap_format_16_native; 889 break; 890 default: 891 goto err_hwlock; 892 } 893 break; 894 895 case 24: 896 if (reg_endian != REGMAP_ENDIAN_BIG) 897 goto err_hwlock; 898 map->format.format_reg = regmap_format_24; 899 break; 900 901 case 32: 902 switch (reg_endian) { 903 case REGMAP_ENDIAN_BIG: 904 map->format.format_reg = regmap_format_32_be; 905 break; 906 case REGMAP_ENDIAN_LITTLE: 907 map->format.format_reg = regmap_format_32_le; 908 break; 909 case REGMAP_ENDIAN_NATIVE: 910 map->format.format_reg = regmap_format_32_native; 911 break; 912 default: 913 goto err_hwlock; 914 } 915 break; 916 917 #ifdef CONFIG_64BIT 918 case 64: 919 switch (reg_endian) { 920 case REGMAP_ENDIAN_BIG: 921 map->format.format_reg = regmap_format_64_be; 922 break; 923 case REGMAP_ENDIAN_LITTLE: 924 map->format.format_reg = regmap_format_64_le; 925 break; 926 case REGMAP_ENDIAN_NATIVE: 927 map->format.format_reg = regmap_format_64_native; 928 break; 929 default: 930 goto err_hwlock; 931 } 932 break; 933 #endif 934 935 default: 936 goto err_hwlock; 937 } 938 939 if (val_endian == REGMAP_ENDIAN_NATIVE) 940 map->format.parse_inplace = regmap_parse_inplace_noop; 941 942 switch (config->val_bits) { 943 case 8: 944 map->format.format_val = regmap_format_8; 945 map->format.parse_val = regmap_parse_8; 946 map->format.parse_inplace = regmap_parse_inplace_noop; 947 break; 948 case 16: 949 switch (val_endian) { 950 case REGMAP_ENDIAN_BIG: 951 map->format.format_val = regmap_format_16_be; 952 map->format.parse_val = regmap_parse_16_be; 953 map->format.parse_inplace = regmap_parse_16_be_inplace; 954 break; 955 case REGMAP_ENDIAN_LITTLE: 956 map->format.format_val = regmap_format_16_le; 957 map->format.parse_val = regmap_parse_16_le; 958 map->format.parse_inplace = regmap_parse_16_le_inplace; 959 break; 960 case REGMAP_ENDIAN_NATIVE: 961 map->format.format_val = regmap_format_16_native; 962 map->format.parse_val = regmap_parse_16_native; 963 break; 964 default: 965 goto err_hwlock; 966 } 967 break; 968 case 24: 969 if (val_endian != REGMAP_ENDIAN_BIG) 970 goto err_hwlock; 971 map->format.format_val = regmap_format_24; 972 map->format.parse_val = regmap_parse_24; 973 break; 974 case 32: 975 switch (val_endian) { 976 case REGMAP_ENDIAN_BIG: 977 map->format.format_val = regmap_format_32_be; 978 map->format.parse_val = regmap_parse_32_be; 979 map->format.parse_inplace = regmap_parse_32_be_inplace; 980 break; 981 case REGMAP_ENDIAN_LITTLE: 982 map->format.format_val = regmap_format_32_le; 983 map->format.parse_val = regmap_parse_32_le; 984 map->format.parse_inplace = regmap_parse_32_le_inplace; 985 break; 986 case REGMAP_ENDIAN_NATIVE: 987 map->format.format_val = regmap_format_32_native; 988 map->format.parse_val = regmap_parse_32_native; 989 break; 990 default: 991 goto err_hwlock; 992 } 993 break; 994 #ifdef CONFIG_64BIT 995 case 64: 996 switch (val_endian) { 997 case REGMAP_ENDIAN_BIG: 998 map->format.format_val = regmap_format_64_be; 999 map->format.parse_val = regmap_parse_64_be; 1000 map->format.parse_inplace = regmap_parse_64_be_inplace; 1001 break; 1002 case REGMAP_ENDIAN_LITTLE: 1003 map->format.format_val = regmap_format_64_le; 1004 map->format.parse_val = regmap_parse_64_le; 1005 map->format.parse_inplace = regmap_parse_64_le_inplace; 1006 break; 1007 case REGMAP_ENDIAN_NATIVE: 1008 map->format.format_val = regmap_format_64_native; 1009 map->format.parse_val = regmap_parse_64_native; 1010 break; 1011 default: 1012 goto err_hwlock; 1013 } 1014 break; 1015 #endif 1016 } 1017 1018 if (map->format.format_write) { 1019 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1020 (val_endian != REGMAP_ENDIAN_BIG)) 1021 goto err_hwlock; 1022 map->use_single_write = true; 1023 } 1024 1025 if (!map->format.format_write && 1026 !(map->format.format_reg && map->format.format_val)) 1027 goto err_hwlock; 1028 1029 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1030 if (map->work_buf == NULL) { 1031 ret = -ENOMEM; 1032 goto err_hwlock; 1033 } 1034 1035 if (map->format.format_write) { 1036 map->defer_caching = false; 1037 map->reg_write = _regmap_bus_formatted_write; 1038 } else if (map->format.format_val) { 1039 map->defer_caching = true; 1040 map->reg_write = _regmap_bus_raw_write; 1041 } 1042 1043 skip_format_initialization: 1044 1045 map->range_tree = RB_ROOT; 1046 for (i = 0; i < config->num_ranges; i++) { 1047 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1048 struct regmap_range_node *new; 1049 1050 /* Sanity check */ 1051 if (range_cfg->range_max < range_cfg->range_min) { 1052 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1053 range_cfg->range_max, range_cfg->range_min); 1054 goto err_range; 1055 } 1056 1057 if (range_cfg->range_max > map->max_register) { 1058 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1059 range_cfg->range_max, map->max_register); 1060 goto err_range; 1061 } 1062 1063 if (range_cfg->selector_reg > map->max_register) { 1064 dev_err(map->dev, 1065 "Invalid range %d: selector out of map\n", i); 1066 goto err_range; 1067 } 1068 1069 if (range_cfg->window_len == 0) { 1070 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1071 i); 1072 goto err_range; 1073 } 1074 1075 /* Make sure, that this register range has no selector 1076 or data window within its boundary */ 1077 for (j = 0; j < config->num_ranges; j++) { 1078 unsigned sel_reg = config->ranges[j].selector_reg; 1079 unsigned win_min = config->ranges[j].window_start; 1080 unsigned win_max = win_min + 1081 config->ranges[j].window_len - 1; 1082 1083 /* Allow data window inside its own virtual range */ 1084 if (j == i) 1085 continue; 1086 1087 if (range_cfg->range_min <= sel_reg && 1088 sel_reg <= range_cfg->range_max) { 1089 dev_err(map->dev, 1090 "Range %d: selector for %d in window\n", 1091 i, j); 1092 goto err_range; 1093 } 1094 1095 if (!(win_max < range_cfg->range_min || 1096 win_min > range_cfg->range_max)) { 1097 dev_err(map->dev, 1098 "Range %d: window for %d in window\n", 1099 i, j); 1100 goto err_range; 1101 } 1102 } 1103 1104 new = kzalloc(sizeof(*new), GFP_KERNEL); 1105 if (new == NULL) { 1106 ret = -ENOMEM; 1107 goto err_range; 1108 } 1109 1110 new->map = map; 1111 new->name = range_cfg->name; 1112 new->range_min = range_cfg->range_min; 1113 new->range_max = range_cfg->range_max; 1114 new->selector_reg = range_cfg->selector_reg; 1115 new->selector_mask = range_cfg->selector_mask; 1116 new->selector_shift = range_cfg->selector_shift; 1117 new->window_start = range_cfg->window_start; 1118 new->window_len = range_cfg->window_len; 1119 1120 if (!_regmap_range_add(map, new)) { 1121 dev_err(map->dev, "Failed to add range %d\n", i); 1122 kfree(new); 1123 goto err_range; 1124 } 1125 1126 if (map->selector_work_buf == NULL) { 1127 map->selector_work_buf = 1128 kzalloc(map->format.buf_size, GFP_KERNEL); 1129 if (map->selector_work_buf == NULL) { 1130 ret = -ENOMEM; 1131 goto err_range; 1132 } 1133 } 1134 } 1135 1136 ret = regcache_init(map, config); 1137 if (ret != 0) 1138 goto err_range; 1139 1140 if (dev) { 1141 ret = regmap_attach_dev(dev, map, config); 1142 if (ret != 0) 1143 goto err_regcache; 1144 } else { 1145 regmap_debugfs_init(map, config->name); 1146 } 1147 1148 return map; 1149 1150 err_regcache: 1151 regcache_exit(map); 1152 err_range: 1153 regmap_range_exit(map); 1154 kfree(map->work_buf); 1155 err_hwlock: 1156 if (map->hwlock) 1157 hwspin_lock_free(map->hwlock); 1158 err_name: 1159 kfree_const(map->name); 1160 err_map: 1161 kfree(map); 1162 err: 1163 return ERR_PTR(ret); 1164 } 1165 EXPORT_SYMBOL_GPL(__regmap_init); 1166 1167 static void devm_regmap_release(struct device *dev, void *res) 1168 { 1169 regmap_exit(*(struct regmap **)res); 1170 } 1171 1172 struct regmap *__devm_regmap_init(struct device *dev, 1173 const struct regmap_bus *bus, 1174 void *bus_context, 1175 const struct regmap_config *config, 1176 struct lock_class_key *lock_key, 1177 const char *lock_name) 1178 { 1179 struct regmap **ptr, *regmap; 1180 1181 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1182 if (!ptr) 1183 return ERR_PTR(-ENOMEM); 1184 1185 regmap = __regmap_init(dev, bus, bus_context, config, 1186 lock_key, lock_name); 1187 if (!IS_ERR(regmap)) { 1188 *ptr = regmap; 1189 devres_add(dev, ptr); 1190 } else { 1191 devres_free(ptr); 1192 } 1193 1194 return regmap; 1195 } 1196 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1197 1198 static void regmap_field_init(struct regmap_field *rm_field, 1199 struct regmap *regmap, struct reg_field reg_field) 1200 { 1201 rm_field->regmap = regmap; 1202 rm_field->reg = reg_field.reg; 1203 rm_field->shift = reg_field.lsb; 1204 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1205 rm_field->id_size = reg_field.id_size; 1206 rm_field->id_offset = reg_field.id_offset; 1207 } 1208 1209 /** 1210 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1211 * 1212 * @dev: Device that will be interacted with 1213 * @regmap: regmap bank in which this register field is located. 1214 * @reg_field: Register field with in the bank. 1215 * 1216 * The return value will be an ERR_PTR() on error or a valid pointer 1217 * to a struct regmap_field. The regmap_field will be automatically freed 1218 * by the device management code. 1219 */ 1220 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1221 struct regmap *regmap, struct reg_field reg_field) 1222 { 1223 struct regmap_field *rm_field = devm_kzalloc(dev, 1224 sizeof(*rm_field), GFP_KERNEL); 1225 if (!rm_field) 1226 return ERR_PTR(-ENOMEM); 1227 1228 regmap_field_init(rm_field, regmap, reg_field); 1229 1230 return rm_field; 1231 1232 } 1233 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1234 1235 /** 1236 * devm_regmap_field_free() - Free a register field allocated using 1237 * devm_regmap_field_alloc. 1238 * 1239 * @dev: Device that will be interacted with 1240 * @field: regmap field which should be freed. 1241 * 1242 * Free register field allocated using devm_regmap_field_alloc(). Usually 1243 * drivers need not call this function, as the memory allocated via devm 1244 * will be freed as per device-driver life-cyle. 1245 */ 1246 void devm_regmap_field_free(struct device *dev, 1247 struct regmap_field *field) 1248 { 1249 devm_kfree(dev, field); 1250 } 1251 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1252 1253 /** 1254 * regmap_field_alloc() - Allocate and initialise a register field. 1255 * 1256 * @regmap: regmap bank in which this register field is located. 1257 * @reg_field: Register field with in the bank. 1258 * 1259 * The return value will be an ERR_PTR() on error or a valid pointer 1260 * to a struct regmap_field. The regmap_field should be freed by the 1261 * user once its finished working with it using regmap_field_free(). 1262 */ 1263 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1264 struct reg_field reg_field) 1265 { 1266 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1267 1268 if (!rm_field) 1269 return ERR_PTR(-ENOMEM); 1270 1271 regmap_field_init(rm_field, regmap, reg_field); 1272 1273 return rm_field; 1274 } 1275 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1276 1277 /** 1278 * regmap_field_free() - Free register field allocated using 1279 * regmap_field_alloc. 1280 * 1281 * @field: regmap field which should be freed. 1282 */ 1283 void regmap_field_free(struct regmap_field *field) 1284 { 1285 kfree(field); 1286 } 1287 EXPORT_SYMBOL_GPL(regmap_field_free); 1288 1289 /** 1290 * regmap_reinit_cache() - Reinitialise the current register cache 1291 * 1292 * @map: Register map to operate on. 1293 * @config: New configuration. Only the cache data will be used. 1294 * 1295 * Discard any existing register cache for the map and initialize a 1296 * new cache. This can be used to restore the cache to defaults or to 1297 * update the cache configuration to reflect runtime discovery of the 1298 * hardware. 1299 * 1300 * No explicit locking is done here, the user needs to ensure that 1301 * this function will not race with other calls to regmap. 1302 */ 1303 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1304 { 1305 regcache_exit(map); 1306 regmap_debugfs_exit(map); 1307 1308 map->max_register = config->max_register; 1309 map->writeable_reg = config->writeable_reg; 1310 map->readable_reg = config->readable_reg; 1311 map->volatile_reg = config->volatile_reg; 1312 map->precious_reg = config->precious_reg; 1313 map->writeable_noinc_reg = config->writeable_noinc_reg; 1314 map->readable_noinc_reg = config->readable_noinc_reg; 1315 map->cache_type = config->cache_type; 1316 1317 regmap_debugfs_init(map, config->name); 1318 1319 map->cache_bypass = false; 1320 map->cache_only = false; 1321 1322 return regcache_init(map, config); 1323 } 1324 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1325 1326 /** 1327 * regmap_exit() - Free a previously allocated register map 1328 * 1329 * @map: Register map to operate on. 1330 */ 1331 void regmap_exit(struct regmap *map) 1332 { 1333 struct regmap_async *async; 1334 1335 regcache_exit(map); 1336 regmap_debugfs_exit(map); 1337 regmap_range_exit(map); 1338 if (map->bus && map->bus->free_context) 1339 map->bus->free_context(map->bus_context); 1340 kfree(map->work_buf); 1341 while (!list_empty(&map->async_free)) { 1342 async = list_first_entry_or_null(&map->async_free, 1343 struct regmap_async, 1344 list); 1345 list_del(&async->list); 1346 kfree(async->work_buf); 1347 kfree(async); 1348 } 1349 if (map->hwlock) 1350 hwspin_lock_free(map->hwlock); 1351 kfree_const(map->name); 1352 kfree(map->patch); 1353 kfree(map); 1354 } 1355 EXPORT_SYMBOL_GPL(regmap_exit); 1356 1357 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1358 { 1359 struct regmap **r = res; 1360 if (!r || !*r) { 1361 WARN_ON(!r || !*r); 1362 return 0; 1363 } 1364 1365 /* If the user didn't specify a name match any */ 1366 if (data) 1367 return (*r)->name == data; 1368 else 1369 return 1; 1370 } 1371 1372 /** 1373 * dev_get_regmap() - Obtain the regmap (if any) for a device 1374 * 1375 * @dev: Device to retrieve the map for 1376 * @name: Optional name for the register map, usually NULL. 1377 * 1378 * Returns the regmap for the device if one is present, or NULL. If 1379 * name is specified then it must match the name specified when 1380 * registering the device, if it is NULL then the first regmap found 1381 * will be used. Devices with multiple register maps are very rare, 1382 * generic code should normally not need to specify a name. 1383 */ 1384 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1385 { 1386 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1387 dev_get_regmap_match, (void *)name); 1388 1389 if (!r) 1390 return NULL; 1391 return *r; 1392 } 1393 EXPORT_SYMBOL_GPL(dev_get_regmap); 1394 1395 /** 1396 * regmap_get_device() - Obtain the device from a regmap 1397 * 1398 * @map: Register map to operate on. 1399 * 1400 * Returns the underlying device that the regmap has been created for. 1401 */ 1402 struct device *regmap_get_device(struct regmap *map) 1403 { 1404 return map->dev; 1405 } 1406 EXPORT_SYMBOL_GPL(regmap_get_device); 1407 1408 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1409 struct regmap_range_node *range, 1410 unsigned int val_num) 1411 { 1412 void *orig_work_buf; 1413 unsigned int win_offset; 1414 unsigned int win_page; 1415 bool page_chg; 1416 int ret; 1417 1418 win_offset = (*reg - range->range_min) % range->window_len; 1419 win_page = (*reg - range->range_min) / range->window_len; 1420 1421 if (val_num > 1) { 1422 /* Bulk write shouldn't cross range boundary */ 1423 if (*reg + val_num - 1 > range->range_max) 1424 return -EINVAL; 1425 1426 /* ... or single page boundary */ 1427 if (val_num > range->window_len - win_offset) 1428 return -EINVAL; 1429 } 1430 1431 /* It is possible to have selector register inside data window. 1432 In that case, selector register is located on every page and 1433 it needs no page switching, when accessed alone. */ 1434 if (val_num > 1 || 1435 range->window_start + win_offset != range->selector_reg) { 1436 /* Use separate work_buf during page switching */ 1437 orig_work_buf = map->work_buf; 1438 map->work_buf = map->selector_work_buf; 1439 1440 ret = _regmap_update_bits(map, range->selector_reg, 1441 range->selector_mask, 1442 win_page << range->selector_shift, 1443 &page_chg, false); 1444 1445 map->work_buf = orig_work_buf; 1446 1447 if (ret != 0) 1448 return ret; 1449 } 1450 1451 *reg = range->window_start + win_offset; 1452 1453 return 0; 1454 } 1455 1456 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1457 unsigned long mask) 1458 { 1459 u8 *buf; 1460 int i; 1461 1462 if (!mask || !map->work_buf) 1463 return; 1464 1465 buf = map->work_buf; 1466 1467 for (i = 0; i < max_bytes; i++) 1468 buf[i] |= (mask >> (8 * i)) & 0xff; 1469 } 1470 1471 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1472 const void *val, size_t val_len) 1473 { 1474 struct regmap_range_node *range; 1475 unsigned long flags; 1476 void *work_val = map->work_buf + map->format.reg_bytes + 1477 map->format.pad_bytes; 1478 void *buf; 1479 int ret = -ENOTSUPP; 1480 size_t len; 1481 int i; 1482 1483 WARN_ON(!map->bus); 1484 1485 /* Check for unwritable or noinc registers in range 1486 * before we start 1487 */ 1488 if (!regmap_writeable_noinc(map, reg)) { 1489 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1490 unsigned int element = 1491 reg + regmap_get_offset(map, i); 1492 if (!regmap_writeable(map, element) || 1493 regmap_writeable_noinc(map, element)) 1494 return -EINVAL; 1495 } 1496 } 1497 1498 if (!map->cache_bypass && map->format.parse_val) { 1499 unsigned int ival; 1500 int val_bytes = map->format.val_bytes; 1501 for (i = 0; i < val_len / val_bytes; i++) { 1502 ival = map->format.parse_val(val + (i * val_bytes)); 1503 ret = regcache_write(map, 1504 reg + regmap_get_offset(map, i), 1505 ival); 1506 if (ret) { 1507 dev_err(map->dev, 1508 "Error in caching of register: %x ret: %d\n", 1509 reg + i, ret); 1510 return ret; 1511 } 1512 } 1513 if (map->cache_only) { 1514 map->cache_dirty = true; 1515 return 0; 1516 } 1517 } 1518 1519 range = _regmap_range_lookup(map, reg); 1520 if (range) { 1521 int val_num = val_len / map->format.val_bytes; 1522 int win_offset = (reg - range->range_min) % range->window_len; 1523 int win_residue = range->window_len - win_offset; 1524 1525 /* If the write goes beyond the end of the window split it */ 1526 while (val_num > win_residue) { 1527 dev_dbg(map->dev, "Writing window %d/%zu\n", 1528 win_residue, val_len / map->format.val_bytes); 1529 ret = _regmap_raw_write_impl(map, reg, val, 1530 win_residue * 1531 map->format.val_bytes); 1532 if (ret != 0) 1533 return ret; 1534 1535 reg += win_residue; 1536 val_num -= win_residue; 1537 val += win_residue * map->format.val_bytes; 1538 val_len -= win_residue * map->format.val_bytes; 1539 1540 win_offset = (reg - range->range_min) % 1541 range->window_len; 1542 win_residue = range->window_len - win_offset; 1543 } 1544 1545 ret = _regmap_select_page(map, ®, range, val_num); 1546 if (ret != 0) 1547 return ret; 1548 } 1549 1550 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1551 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1552 map->write_flag_mask); 1553 1554 /* 1555 * Essentially all I/O mechanisms will be faster with a single 1556 * buffer to write. Since register syncs often generate raw 1557 * writes of single registers optimise that case. 1558 */ 1559 if (val != work_val && val_len == map->format.val_bytes) { 1560 memcpy(work_val, val, map->format.val_bytes); 1561 val = work_val; 1562 } 1563 1564 if (map->async && map->bus->async_write) { 1565 struct regmap_async *async; 1566 1567 trace_regmap_async_write_start(map, reg, val_len); 1568 1569 spin_lock_irqsave(&map->async_lock, flags); 1570 async = list_first_entry_or_null(&map->async_free, 1571 struct regmap_async, 1572 list); 1573 if (async) 1574 list_del(&async->list); 1575 spin_unlock_irqrestore(&map->async_lock, flags); 1576 1577 if (!async) { 1578 async = map->bus->async_alloc(); 1579 if (!async) 1580 return -ENOMEM; 1581 1582 async->work_buf = kzalloc(map->format.buf_size, 1583 GFP_KERNEL | GFP_DMA); 1584 if (!async->work_buf) { 1585 kfree(async); 1586 return -ENOMEM; 1587 } 1588 } 1589 1590 async->map = map; 1591 1592 /* If the caller supplied the value we can use it safely. */ 1593 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1594 map->format.reg_bytes + map->format.val_bytes); 1595 1596 spin_lock_irqsave(&map->async_lock, flags); 1597 list_add_tail(&async->list, &map->async_list); 1598 spin_unlock_irqrestore(&map->async_lock, flags); 1599 1600 if (val != work_val) 1601 ret = map->bus->async_write(map->bus_context, 1602 async->work_buf, 1603 map->format.reg_bytes + 1604 map->format.pad_bytes, 1605 val, val_len, async); 1606 else 1607 ret = map->bus->async_write(map->bus_context, 1608 async->work_buf, 1609 map->format.reg_bytes + 1610 map->format.pad_bytes + 1611 val_len, NULL, 0, async); 1612 1613 if (ret != 0) { 1614 dev_err(map->dev, "Failed to schedule write: %d\n", 1615 ret); 1616 1617 spin_lock_irqsave(&map->async_lock, flags); 1618 list_move(&async->list, &map->async_free); 1619 spin_unlock_irqrestore(&map->async_lock, flags); 1620 } 1621 1622 return ret; 1623 } 1624 1625 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1626 1627 /* If we're doing a single register write we can probably just 1628 * send the work_buf directly, otherwise try to do a gather 1629 * write. 1630 */ 1631 if (val == work_val) 1632 ret = map->bus->write(map->bus_context, map->work_buf, 1633 map->format.reg_bytes + 1634 map->format.pad_bytes + 1635 val_len); 1636 else if (map->bus->gather_write) 1637 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1638 map->format.reg_bytes + 1639 map->format.pad_bytes, 1640 val, val_len); 1641 else 1642 ret = -ENOTSUPP; 1643 1644 /* If that didn't work fall back on linearising by hand. */ 1645 if (ret == -ENOTSUPP) { 1646 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1647 buf = kzalloc(len, GFP_KERNEL); 1648 if (!buf) 1649 return -ENOMEM; 1650 1651 memcpy(buf, map->work_buf, map->format.reg_bytes); 1652 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1653 val, val_len); 1654 ret = map->bus->write(map->bus_context, buf, len); 1655 1656 kfree(buf); 1657 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1658 /* regcache_drop_region() takes lock that we already have, 1659 * thus call map->cache_ops->drop() directly 1660 */ 1661 if (map->cache_ops && map->cache_ops->drop) 1662 map->cache_ops->drop(map, reg, reg + 1); 1663 } 1664 1665 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1666 1667 return ret; 1668 } 1669 1670 /** 1671 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1672 * 1673 * @map: Map to check. 1674 */ 1675 bool regmap_can_raw_write(struct regmap *map) 1676 { 1677 return map->bus && map->bus->write && map->format.format_val && 1678 map->format.format_reg; 1679 } 1680 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1681 1682 /** 1683 * regmap_get_raw_read_max - Get the maximum size we can read 1684 * 1685 * @map: Map to check. 1686 */ 1687 size_t regmap_get_raw_read_max(struct regmap *map) 1688 { 1689 return map->max_raw_read; 1690 } 1691 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1692 1693 /** 1694 * regmap_get_raw_write_max - Get the maximum size we can read 1695 * 1696 * @map: Map to check. 1697 */ 1698 size_t regmap_get_raw_write_max(struct regmap *map) 1699 { 1700 return map->max_raw_write; 1701 } 1702 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1703 1704 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1705 unsigned int val) 1706 { 1707 int ret; 1708 struct regmap_range_node *range; 1709 struct regmap *map = context; 1710 1711 WARN_ON(!map->bus || !map->format.format_write); 1712 1713 range = _regmap_range_lookup(map, reg); 1714 if (range) { 1715 ret = _regmap_select_page(map, ®, range, 1); 1716 if (ret != 0) 1717 return ret; 1718 } 1719 1720 map->format.format_write(map, reg, val); 1721 1722 trace_regmap_hw_write_start(map, reg, 1); 1723 1724 ret = map->bus->write(map->bus_context, map->work_buf, 1725 map->format.buf_size); 1726 1727 trace_regmap_hw_write_done(map, reg, 1); 1728 1729 return ret; 1730 } 1731 1732 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1733 unsigned int val) 1734 { 1735 struct regmap *map = context; 1736 1737 return map->bus->reg_write(map->bus_context, reg, val); 1738 } 1739 1740 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1741 unsigned int val) 1742 { 1743 struct regmap *map = context; 1744 1745 WARN_ON(!map->bus || !map->format.format_val); 1746 1747 map->format.format_val(map->work_buf + map->format.reg_bytes 1748 + map->format.pad_bytes, val, 0); 1749 return _regmap_raw_write_impl(map, reg, 1750 map->work_buf + 1751 map->format.reg_bytes + 1752 map->format.pad_bytes, 1753 map->format.val_bytes); 1754 } 1755 1756 static inline void *_regmap_map_get_context(struct regmap *map) 1757 { 1758 return (map->bus) ? map : map->bus_context; 1759 } 1760 1761 int _regmap_write(struct regmap *map, unsigned int reg, 1762 unsigned int val) 1763 { 1764 int ret; 1765 void *context = _regmap_map_get_context(map); 1766 1767 if (!regmap_writeable(map, reg)) 1768 return -EIO; 1769 1770 if (!map->cache_bypass && !map->defer_caching) { 1771 ret = regcache_write(map, reg, val); 1772 if (ret != 0) 1773 return ret; 1774 if (map->cache_only) { 1775 map->cache_dirty = true; 1776 return 0; 1777 } 1778 } 1779 1780 if (regmap_should_log(map)) 1781 dev_info(map->dev, "%x <= %x\n", reg, val); 1782 1783 trace_regmap_reg_write(map, reg, val); 1784 1785 return map->reg_write(context, reg, val); 1786 } 1787 1788 /** 1789 * regmap_write() - Write a value to a single register 1790 * 1791 * @map: Register map to write to 1792 * @reg: Register to write to 1793 * @val: Value to be written 1794 * 1795 * A value of zero will be returned on success, a negative errno will 1796 * be returned in error cases. 1797 */ 1798 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1799 { 1800 int ret; 1801 1802 if (!IS_ALIGNED(reg, map->reg_stride)) 1803 return -EINVAL; 1804 1805 map->lock(map->lock_arg); 1806 1807 ret = _regmap_write(map, reg, val); 1808 1809 map->unlock(map->lock_arg); 1810 1811 return ret; 1812 } 1813 EXPORT_SYMBOL_GPL(regmap_write); 1814 1815 /** 1816 * regmap_write_async() - Write a value to a single register asynchronously 1817 * 1818 * @map: Register map to write to 1819 * @reg: Register to write to 1820 * @val: Value to be written 1821 * 1822 * A value of zero will be returned on success, a negative errno will 1823 * be returned in error cases. 1824 */ 1825 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1826 { 1827 int ret; 1828 1829 if (!IS_ALIGNED(reg, map->reg_stride)) 1830 return -EINVAL; 1831 1832 map->lock(map->lock_arg); 1833 1834 map->async = true; 1835 1836 ret = _regmap_write(map, reg, val); 1837 1838 map->async = false; 1839 1840 map->unlock(map->lock_arg); 1841 1842 return ret; 1843 } 1844 EXPORT_SYMBOL_GPL(regmap_write_async); 1845 1846 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1847 const void *val, size_t val_len) 1848 { 1849 size_t val_bytes = map->format.val_bytes; 1850 size_t val_count = val_len / val_bytes; 1851 size_t chunk_count, chunk_bytes; 1852 size_t chunk_regs = val_count; 1853 int ret, i; 1854 1855 if (!val_count) 1856 return -EINVAL; 1857 1858 if (map->use_single_write) 1859 chunk_regs = 1; 1860 else if (map->max_raw_write && val_len > map->max_raw_write) 1861 chunk_regs = map->max_raw_write / val_bytes; 1862 1863 chunk_count = val_count / chunk_regs; 1864 chunk_bytes = chunk_regs * val_bytes; 1865 1866 /* Write as many bytes as possible with chunk_size */ 1867 for (i = 0; i < chunk_count; i++) { 1868 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); 1869 if (ret) 1870 return ret; 1871 1872 reg += regmap_get_offset(map, chunk_regs); 1873 val += chunk_bytes; 1874 val_len -= chunk_bytes; 1875 } 1876 1877 /* Write remaining bytes */ 1878 if (val_len) 1879 ret = _regmap_raw_write_impl(map, reg, val, val_len); 1880 1881 return ret; 1882 } 1883 1884 /** 1885 * regmap_raw_write() - Write raw values to one or more registers 1886 * 1887 * @map: Register map to write to 1888 * @reg: Initial register to write to 1889 * @val: Block of data to be written, laid out for direct transmission to the 1890 * device 1891 * @val_len: Length of data pointed to by val. 1892 * 1893 * This function is intended to be used for things like firmware 1894 * download where a large block of data needs to be transferred to the 1895 * device. No formatting will be done on the data provided. 1896 * 1897 * A value of zero will be returned on success, a negative errno will 1898 * be returned in error cases. 1899 */ 1900 int regmap_raw_write(struct regmap *map, unsigned int reg, 1901 const void *val, size_t val_len) 1902 { 1903 int ret; 1904 1905 if (!regmap_can_raw_write(map)) 1906 return -EINVAL; 1907 if (val_len % map->format.val_bytes) 1908 return -EINVAL; 1909 1910 map->lock(map->lock_arg); 1911 1912 ret = _regmap_raw_write(map, reg, val, val_len); 1913 1914 map->unlock(map->lock_arg); 1915 1916 return ret; 1917 } 1918 EXPORT_SYMBOL_GPL(regmap_raw_write); 1919 1920 /** 1921 * regmap_noinc_write(): Write data from a register without incrementing the 1922 * register number 1923 * 1924 * @map: Register map to write to 1925 * @reg: Register to write to 1926 * @val: Pointer to data buffer 1927 * @val_len: Length of output buffer in bytes. 1928 * 1929 * The regmap API usually assumes that bulk bus write operations will write a 1930 * range of registers. Some devices have certain registers for which a write 1931 * operation can write to an internal FIFO. 1932 * 1933 * The target register must be volatile but registers after it can be 1934 * completely unrelated cacheable registers. 1935 * 1936 * This will attempt multiple writes as required to write val_len bytes. 1937 * 1938 * A value of zero will be returned on success, a negative errno will be 1939 * returned in error cases. 1940 */ 1941 int regmap_noinc_write(struct regmap *map, unsigned int reg, 1942 const void *val, size_t val_len) 1943 { 1944 size_t write_len; 1945 int ret; 1946 1947 if (!map->bus) 1948 return -EINVAL; 1949 if (!map->bus->write) 1950 return -ENOTSUPP; 1951 if (val_len % map->format.val_bytes) 1952 return -EINVAL; 1953 if (!IS_ALIGNED(reg, map->reg_stride)) 1954 return -EINVAL; 1955 if (val_len == 0) 1956 return -EINVAL; 1957 1958 map->lock(map->lock_arg); 1959 1960 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 1961 ret = -EINVAL; 1962 goto out_unlock; 1963 } 1964 1965 while (val_len) { 1966 if (map->max_raw_write && map->max_raw_write < val_len) 1967 write_len = map->max_raw_write; 1968 else 1969 write_len = val_len; 1970 ret = _regmap_raw_write(map, reg, val, write_len); 1971 if (ret) 1972 goto out_unlock; 1973 val = ((u8 *)val) + write_len; 1974 val_len -= write_len; 1975 } 1976 1977 out_unlock: 1978 map->unlock(map->lock_arg); 1979 return ret; 1980 } 1981 EXPORT_SYMBOL_GPL(regmap_noinc_write); 1982 1983 /** 1984 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 1985 * register field. 1986 * 1987 * @field: Register field to write to 1988 * @mask: Bitmask to change 1989 * @val: Value to be written 1990 * @change: Boolean indicating if a write was done 1991 * @async: Boolean indicating asynchronously 1992 * @force: Boolean indicating use force update 1993 * 1994 * Perform a read/modify/write cycle on the register field with change, 1995 * async, force option. 1996 * 1997 * A value of zero will be returned on success, a negative errno will 1998 * be returned in error cases. 1999 */ 2000 int regmap_field_update_bits_base(struct regmap_field *field, 2001 unsigned int mask, unsigned int val, 2002 bool *change, bool async, bool force) 2003 { 2004 mask = (mask << field->shift) & field->mask; 2005 2006 return regmap_update_bits_base(field->regmap, field->reg, 2007 mask, val << field->shift, 2008 change, async, force); 2009 } 2010 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2011 2012 /** 2013 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2014 * register field with port ID 2015 * 2016 * @field: Register field to write to 2017 * @id: port ID 2018 * @mask: Bitmask to change 2019 * @val: Value to be written 2020 * @change: Boolean indicating if a write was done 2021 * @async: Boolean indicating asynchronously 2022 * @force: Boolean indicating use force update 2023 * 2024 * A value of zero will be returned on success, a negative errno will 2025 * be returned in error cases. 2026 */ 2027 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2028 unsigned int mask, unsigned int val, 2029 bool *change, bool async, bool force) 2030 { 2031 if (id >= field->id_size) 2032 return -EINVAL; 2033 2034 mask = (mask << field->shift) & field->mask; 2035 2036 return regmap_update_bits_base(field->regmap, 2037 field->reg + (field->id_offset * id), 2038 mask, val << field->shift, 2039 change, async, force); 2040 } 2041 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2042 2043 /** 2044 * regmap_bulk_write() - Write multiple registers to the device 2045 * 2046 * @map: Register map to write to 2047 * @reg: First register to be write from 2048 * @val: Block of data to be written, in native register size for device 2049 * @val_count: Number of registers to write 2050 * 2051 * This function is intended to be used for writing a large block of 2052 * data to the device either in single transfer or multiple transfer. 2053 * 2054 * A value of zero will be returned on success, a negative errno will 2055 * be returned in error cases. 2056 */ 2057 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2058 size_t val_count) 2059 { 2060 int ret = 0, i; 2061 size_t val_bytes = map->format.val_bytes; 2062 2063 if (!IS_ALIGNED(reg, map->reg_stride)) 2064 return -EINVAL; 2065 2066 /* 2067 * Some devices don't support bulk write, for them we have a series of 2068 * single write operations. 2069 */ 2070 if (!map->bus || !map->format.parse_inplace) { 2071 map->lock(map->lock_arg); 2072 for (i = 0; i < val_count; i++) { 2073 unsigned int ival; 2074 2075 switch (val_bytes) { 2076 case 1: 2077 ival = *(u8 *)(val + (i * val_bytes)); 2078 break; 2079 case 2: 2080 ival = *(u16 *)(val + (i * val_bytes)); 2081 break; 2082 case 4: 2083 ival = *(u32 *)(val + (i * val_bytes)); 2084 break; 2085 #ifdef CONFIG_64BIT 2086 case 8: 2087 ival = *(u64 *)(val + (i * val_bytes)); 2088 break; 2089 #endif 2090 default: 2091 ret = -EINVAL; 2092 goto out; 2093 } 2094 2095 ret = _regmap_write(map, 2096 reg + regmap_get_offset(map, i), 2097 ival); 2098 if (ret != 0) 2099 goto out; 2100 } 2101 out: 2102 map->unlock(map->lock_arg); 2103 } else { 2104 void *wval; 2105 2106 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2107 if (!wval) 2108 return -ENOMEM; 2109 2110 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2111 map->format.parse_inplace(wval + i); 2112 2113 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2114 2115 kfree(wval); 2116 } 2117 return ret; 2118 } 2119 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2120 2121 /* 2122 * _regmap_raw_multi_reg_write() 2123 * 2124 * the (register,newvalue) pairs in regs have not been formatted, but 2125 * they are all in the same page and have been changed to being page 2126 * relative. The page register has been written if that was necessary. 2127 */ 2128 static int _regmap_raw_multi_reg_write(struct regmap *map, 2129 const struct reg_sequence *regs, 2130 size_t num_regs) 2131 { 2132 int ret; 2133 void *buf; 2134 int i; 2135 u8 *u8; 2136 size_t val_bytes = map->format.val_bytes; 2137 size_t reg_bytes = map->format.reg_bytes; 2138 size_t pad_bytes = map->format.pad_bytes; 2139 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2140 size_t len = pair_size * num_regs; 2141 2142 if (!len) 2143 return -EINVAL; 2144 2145 buf = kzalloc(len, GFP_KERNEL); 2146 if (!buf) 2147 return -ENOMEM; 2148 2149 /* We have to linearise by hand. */ 2150 2151 u8 = buf; 2152 2153 for (i = 0; i < num_regs; i++) { 2154 unsigned int reg = regs[i].reg; 2155 unsigned int val = regs[i].def; 2156 trace_regmap_hw_write_start(map, reg, 1); 2157 map->format.format_reg(u8, reg, map->reg_shift); 2158 u8 += reg_bytes + pad_bytes; 2159 map->format.format_val(u8, val, 0); 2160 u8 += val_bytes; 2161 } 2162 u8 = buf; 2163 *u8 |= map->write_flag_mask; 2164 2165 ret = map->bus->write(map->bus_context, buf, len); 2166 2167 kfree(buf); 2168 2169 for (i = 0; i < num_regs; i++) { 2170 int reg = regs[i].reg; 2171 trace_regmap_hw_write_done(map, reg, 1); 2172 } 2173 return ret; 2174 } 2175 2176 static unsigned int _regmap_register_page(struct regmap *map, 2177 unsigned int reg, 2178 struct regmap_range_node *range) 2179 { 2180 unsigned int win_page = (reg - range->range_min) / range->window_len; 2181 2182 return win_page; 2183 } 2184 2185 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2186 struct reg_sequence *regs, 2187 size_t num_regs) 2188 { 2189 int ret; 2190 int i, n; 2191 struct reg_sequence *base; 2192 unsigned int this_page = 0; 2193 unsigned int page_change = 0; 2194 /* 2195 * the set of registers are not neccessarily in order, but 2196 * since the order of write must be preserved this algorithm 2197 * chops the set each time the page changes. This also applies 2198 * if there is a delay required at any point in the sequence. 2199 */ 2200 base = regs; 2201 for (i = 0, n = 0; i < num_regs; i++, n++) { 2202 unsigned int reg = regs[i].reg; 2203 struct regmap_range_node *range; 2204 2205 range = _regmap_range_lookup(map, reg); 2206 if (range) { 2207 unsigned int win_page = _regmap_register_page(map, reg, 2208 range); 2209 2210 if (i == 0) 2211 this_page = win_page; 2212 if (win_page != this_page) { 2213 this_page = win_page; 2214 page_change = 1; 2215 } 2216 } 2217 2218 /* If we have both a page change and a delay make sure to 2219 * write the regs and apply the delay before we change the 2220 * page. 2221 */ 2222 2223 if (page_change || regs[i].delay_us) { 2224 2225 /* For situations where the first write requires 2226 * a delay we need to make sure we don't call 2227 * raw_multi_reg_write with n=0 2228 * This can't occur with page breaks as we 2229 * never write on the first iteration 2230 */ 2231 if (regs[i].delay_us && i == 0) 2232 n = 1; 2233 2234 ret = _regmap_raw_multi_reg_write(map, base, n); 2235 if (ret != 0) 2236 return ret; 2237 2238 if (regs[i].delay_us) 2239 udelay(regs[i].delay_us); 2240 2241 base += n; 2242 n = 0; 2243 2244 if (page_change) { 2245 ret = _regmap_select_page(map, 2246 &base[n].reg, 2247 range, 1); 2248 if (ret != 0) 2249 return ret; 2250 2251 page_change = 0; 2252 } 2253 2254 } 2255 2256 } 2257 if (n > 0) 2258 return _regmap_raw_multi_reg_write(map, base, n); 2259 return 0; 2260 } 2261 2262 static int _regmap_multi_reg_write(struct regmap *map, 2263 const struct reg_sequence *regs, 2264 size_t num_regs) 2265 { 2266 int i; 2267 int ret; 2268 2269 if (!map->can_multi_write) { 2270 for (i = 0; i < num_regs; i++) { 2271 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2272 if (ret != 0) 2273 return ret; 2274 2275 if (regs[i].delay_us) 2276 udelay(regs[i].delay_us); 2277 } 2278 return 0; 2279 } 2280 2281 if (!map->format.parse_inplace) 2282 return -EINVAL; 2283 2284 if (map->writeable_reg) 2285 for (i = 0; i < num_regs; i++) { 2286 int reg = regs[i].reg; 2287 if (!map->writeable_reg(map->dev, reg)) 2288 return -EINVAL; 2289 if (!IS_ALIGNED(reg, map->reg_stride)) 2290 return -EINVAL; 2291 } 2292 2293 if (!map->cache_bypass) { 2294 for (i = 0; i < num_regs; i++) { 2295 unsigned int val = regs[i].def; 2296 unsigned int reg = regs[i].reg; 2297 ret = regcache_write(map, reg, val); 2298 if (ret) { 2299 dev_err(map->dev, 2300 "Error in caching of register: %x ret: %d\n", 2301 reg, ret); 2302 return ret; 2303 } 2304 } 2305 if (map->cache_only) { 2306 map->cache_dirty = true; 2307 return 0; 2308 } 2309 } 2310 2311 WARN_ON(!map->bus); 2312 2313 for (i = 0; i < num_regs; i++) { 2314 unsigned int reg = regs[i].reg; 2315 struct regmap_range_node *range; 2316 2317 /* Coalesce all the writes between a page break or a delay 2318 * in a sequence 2319 */ 2320 range = _regmap_range_lookup(map, reg); 2321 if (range || regs[i].delay_us) { 2322 size_t len = sizeof(struct reg_sequence)*num_regs; 2323 struct reg_sequence *base = kmemdup(regs, len, 2324 GFP_KERNEL); 2325 if (!base) 2326 return -ENOMEM; 2327 ret = _regmap_range_multi_paged_reg_write(map, base, 2328 num_regs); 2329 kfree(base); 2330 2331 return ret; 2332 } 2333 } 2334 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2335 } 2336 2337 /** 2338 * regmap_multi_reg_write() - Write multiple registers to the device 2339 * 2340 * @map: Register map to write to 2341 * @regs: Array of structures containing register,value to be written 2342 * @num_regs: Number of registers to write 2343 * 2344 * Write multiple registers to the device where the set of register, value 2345 * pairs are supplied in any order, possibly not all in a single range. 2346 * 2347 * The 'normal' block write mode will send ultimately send data on the 2348 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2349 * addressed. However, this alternative block multi write mode will send 2350 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2351 * must of course support the mode. 2352 * 2353 * A value of zero will be returned on success, a negative errno will be 2354 * returned in error cases. 2355 */ 2356 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2357 int num_regs) 2358 { 2359 int ret; 2360 2361 map->lock(map->lock_arg); 2362 2363 ret = _regmap_multi_reg_write(map, regs, num_regs); 2364 2365 map->unlock(map->lock_arg); 2366 2367 return ret; 2368 } 2369 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2370 2371 /** 2372 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2373 * device but not the cache 2374 * 2375 * @map: Register map to write to 2376 * @regs: Array of structures containing register,value to be written 2377 * @num_regs: Number of registers to write 2378 * 2379 * Write multiple registers to the device but not the cache where the set 2380 * of register are supplied in any order. 2381 * 2382 * This function is intended to be used for writing a large block of data 2383 * atomically to the device in single transfer for those I2C client devices 2384 * that implement this alternative block write mode. 2385 * 2386 * A value of zero will be returned on success, a negative errno will 2387 * be returned in error cases. 2388 */ 2389 int regmap_multi_reg_write_bypassed(struct regmap *map, 2390 const struct reg_sequence *regs, 2391 int num_regs) 2392 { 2393 int ret; 2394 bool bypass; 2395 2396 map->lock(map->lock_arg); 2397 2398 bypass = map->cache_bypass; 2399 map->cache_bypass = true; 2400 2401 ret = _regmap_multi_reg_write(map, regs, num_regs); 2402 2403 map->cache_bypass = bypass; 2404 2405 map->unlock(map->lock_arg); 2406 2407 return ret; 2408 } 2409 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2410 2411 /** 2412 * regmap_raw_write_async() - Write raw values to one or more registers 2413 * asynchronously 2414 * 2415 * @map: Register map to write to 2416 * @reg: Initial register to write to 2417 * @val: Block of data to be written, laid out for direct transmission to the 2418 * device. Must be valid until regmap_async_complete() is called. 2419 * @val_len: Length of data pointed to by val. 2420 * 2421 * This function is intended to be used for things like firmware 2422 * download where a large block of data needs to be transferred to the 2423 * device. No formatting will be done on the data provided. 2424 * 2425 * If supported by the underlying bus the write will be scheduled 2426 * asynchronously, helping maximise I/O speed on higher speed buses 2427 * like SPI. regmap_async_complete() can be called to ensure that all 2428 * asynchrnous writes have been completed. 2429 * 2430 * A value of zero will be returned on success, a negative errno will 2431 * be returned in error cases. 2432 */ 2433 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2434 const void *val, size_t val_len) 2435 { 2436 int ret; 2437 2438 if (val_len % map->format.val_bytes) 2439 return -EINVAL; 2440 if (!IS_ALIGNED(reg, map->reg_stride)) 2441 return -EINVAL; 2442 2443 map->lock(map->lock_arg); 2444 2445 map->async = true; 2446 2447 ret = _regmap_raw_write(map, reg, val, val_len); 2448 2449 map->async = false; 2450 2451 map->unlock(map->lock_arg); 2452 2453 return ret; 2454 } 2455 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2456 2457 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2458 unsigned int val_len) 2459 { 2460 struct regmap_range_node *range; 2461 int ret; 2462 2463 WARN_ON(!map->bus); 2464 2465 if (!map->bus || !map->bus->read) 2466 return -EINVAL; 2467 2468 range = _regmap_range_lookup(map, reg); 2469 if (range) { 2470 ret = _regmap_select_page(map, ®, range, 2471 val_len / map->format.val_bytes); 2472 if (ret != 0) 2473 return ret; 2474 } 2475 2476 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2477 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2478 map->read_flag_mask); 2479 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2480 2481 ret = map->bus->read(map->bus_context, map->work_buf, 2482 map->format.reg_bytes + map->format.pad_bytes, 2483 val, val_len); 2484 2485 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2486 2487 return ret; 2488 } 2489 2490 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2491 unsigned int *val) 2492 { 2493 struct regmap *map = context; 2494 2495 return map->bus->reg_read(map->bus_context, reg, val); 2496 } 2497 2498 static int _regmap_bus_read(void *context, unsigned int reg, 2499 unsigned int *val) 2500 { 2501 int ret; 2502 struct regmap *map = context; 2503 void *work_val = map->work_buf + map->format.reg_bytes + 2504 map->format.pad_bytes; 2505 2506 if (!map->format.parse_val) 2507 return -EINVAL; 2508 2509 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); 2510 if (ret == 0) 2511 *val = map->format.parse_val(work_val); 2512 2513 return ret; 2514 } 2515 2516 static int _regmap_read(struct regmap *map, unsigned int reg, 2517 unsigned int *val) 2518 { 2519 int ret; 2520 void *context = _regmap_map_get_context(map); 2521 2522 if (!map->cache_bypass) { 2523 ret = regcache_read(map, reg, val); 2524 if (ret == 0) 2525 return 0; 2526 } 2527 2528 if (map->cache_only) 2529 return -EBUSY; 2530 2531 if (!regmap_readable(map, reg)) 2532 return -EIO; 2533 2534 ret = map->reg_read(context, reg, val); 2535 if (ret == 0) { 2536 if (regmap_should_log(map)) 2537 dev_info(map->dev, "%x => %x\n", reg, *val); 2538 2539 trace_regmap_reg_read(map, reg, *val); 2540 2541 if (!map->cache_bypass) 2542 regcache_write(map, reg, *val); 2543 } 2544 2545 return ret; 2546 } 2547 2548 /** 2549 * regmap_read() - Read a value from a single register 2550 * 2551 * @map: Register map to read from 2552 * @reg: Register to be read from 2553 * @val: Pointer to store read value 2554 * 2555 * A value of zero will be returned on success, a negative errno will 2556 * be returned in error cases. 2557 */ 2558 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2559 { 2560 int ret; 2561 2562 if (!IS_ALIGNED(reg, map->reg_stride)) 2563 return -EINVAL; 2564 2565 map->lock(map->lock_arg); 2566 2567 ret = _regmap_read(map, reg, val); 2568 2569 map->unlock(map->lock_arg); 2570 2571 return ret; 2572 } 2573 EXPORT_SYMBOL_GPL(regmap_read); 2574 2575 /** 2576 * regmap_raw_read() - Read raw data from the device 2577 * 2578 * @map: Register map to read from 2579 * @reg: First register to be read from 2580 * @val: Pointer to store read value 2581 * @val_len: Size of data to read 2582 * 2583 * A value of zero will be returned on success, a negative errno will 2584 * be returned in error cases. 2585 */ 2586 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2587 size_t val_len) 2588 { 2589 size_t val_bytes = map->format.val_bytes; 2590 size_t val_count = val_len / val_bytes; 2591 unsigned int v; 2592 int ret, i; 2593 2594 if (!map->bus) 2595 return -EINVAL; 2596 if (val_len % map->format.val_bytes) 2597 return -EINVAL; 2598 if (!IS_ALIGNED(reg, map->reg_stride)) 2599 return -EINVAL; 2600 if (val_count == 0) 2601 return -EINVAL; 2602 2603 map->lock(map->lock_arg); 2604 2605 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2606 map->cache_type == REGCACHE_NONE) { 2607 size_t chunk_count, chunk_bytes; 2608 size_t chunk_regs = val_count; 2609 2610 if (!map->bus->read) { 2611 ret = -ENOTSUPP; 2612 goto out; 2613 } 2614 2615 if (map->use_single_read) 2616 chunk_regs = 1; 2617 else if (map->max_raw_read && val_len > map->max_raw_read) 2618 chunk_regs = map->max_raw_read / val_bytes; 2619 2620 chunk_count = val_count / chunk_regs; 2621 chunk_bytes = chunk_regs * val_bytes; 2622 2623 /* Read bytes that fit into whole chunks */ 2624 for (i = 0; i < chunk_count; i++) { 2625 ret = _regmap_raw_read(map, reg, val, chunk_bytes); 2626 if (ret != 0) 2627 goto out; 2628 2629 reg += regmap_get_offset(map, chunk_regs); 2630 val += chunk_bytes; 2631 val_len -= chunk_bytes; 2632 } 2633 2634 /* Read remaining bytes */ 2635 if (val_len) { 2636 ret = _regmap_raw_read(map, reg, val, val_len); 2637 if (ret != 0) 2638 goto out; 2639 } 2640 } else { 2641 /* Otherwise go word by word for the cache; should be low 2642 * cost as we expect to hit the cache. 2643 */ 2644 for (i = 0; i < val_count; i++) { 2645 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2646 &v); 2647 if (ret != 0) 2648 goto out; 2649 2650 map->format.format_val(val + (i * val_bytes), v, 0); 2651 } 2652 } 2653 2654 out: 2655 map->unlock(map->lock_arg); 2656 2657 return ret; 2658 } 2659 EXPORT_SYMBOL_GPL(regmap_raw_read); 2660 2661 /** 2662 * regmap_noinc_read(): Read data from a register without incrementing the 2663 * register number 2664 * 2665 * @map: Register map to read from 2666 * @reg: Register to read from 2667 * @val: Pointer to data buffer 2668 * @val_len: Length of output buffer in bytes. 2669 * 2670 * The regmap API usually assumes that bulk bus read operations will read a 2671 * range of registers. Some devices have certain registers for which a read 2672 * operation read will read from an internal FIFO. 2673 * 2674 * The target register must be volatile but registers after it can be 2675 * completely unrelated cacheable registers. 2676 * 2677 * This will attempt multiple reads as required to read val_len bytes. 2678 * 2679 * A value of zero will be returned on success, a negative errno will be 2680 * returned in error cases. 2681 */ 2682 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2683 void *val, size_t val_len) 2684 { 2685 size_t read_len; 2686 int ret; 2687 2688 if (!map->bus) 2689 return -EINVAL; 2690 if (!map->bus->read) 2691 return -ENOTSUPP; 2692 if (val_len % map->format.val_bytes) 2693 return -EINVAL; 2694 if (!IS_ALIGNED(reg, map->reg_stride)) 2695 return -EINVAL; 2696 if (val_len == 0) 2697 return -EINVAL; 2698 2699 map->lock(map->lock_arg); 2700 2701 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2702 ret = -EINVAL; 2703 goto out_unlock; 2704 } 2705 2706 while (val_len) { 2707 if (map->max_raw_read && map->max_raw_read < val_len) 2708 read_len = map->max_raw_read; 2709 else 2710 read_len = val_len; 2711 ret = _regmap_raw_read(map, reg, val, read_len); 2712 if (ret) 2713 goto out_unlock; 2714 val = ((u8 *)val) + read_len; 2715 val_len -= read_len; 2716 } 2717 2718 out_unlock: 2719 map->unlock(map->lock_arg); 2720 return ret; 2721 } 2722 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2723 2724 /** 2725 * regmap_field_read(): Read a value to a single register field 2726 * 2727 * @field: Register field to read from 2728 * @val: Pointer to store read value 2729 * 2730 * A value of zero will be returned on success, a negative errno will 2731 * be returned in error cases. 2732 */ 2733 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2734 { 2735 int ret; 2736 unsigned int reg_val; 2737 ret = regmap_read(field->regmap, field->reg, ®_val); 2738 if (ret != 0) 2739 return ret; 2740 2741 reg_val &= field->mask; 2742 reg_val >>= field->shift; 2743 *val = reg_val; 2744 2745 return ret; 2746 } 2747 EXPORT_SYMBOL_GPL(regmap_field_read); 2748 2749 /** 2750 * regmap_fields_read() - Read a value to a single register field with port ID 2751 * 2752 * @field: Register field to read from 2753 * @id: port ID 2754 * @val: Pointer to store read value 2755 * 2756 * A value of zero will be returned on success, a negative errno will 2757 * be returned in error cases. 2758 */ 2759 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2760 unsigned int *val) 2761 { 2762 int ret; 2763 unsigned int reg_val; 2764 2765 if (id >= field->id_size) 2766 return -EINVAL; 2767 2768 ret = regmap_read(field->regmap, 2769 field->reg + (field->id_offset * id), 2770 ®_val); 2771 if (ret != 0) 2772 return ret; 2773 2774 reg_val &= field->mask; 2775 reg_val >>= field->shift; 2776 *val = reg_val; 2777 2778 return ret; 2779 } 2780 EXPORT_SYMBOL_GPL(regmap_fields_read); 2781 2782 /** 2783 * regmap_bulk_read() - Read multiple registers from the device 2784 * 2785 * @map: Register map to read from 2786 * @reg: First register to be read from 2787 * @val: Pointer to store read value, in native register size for device 2788 * @val_count: Number of registers to read 2789 * 2790 * A value of zero will be returned on success, a negative errno will 2791 * be returned in error cases. 2792 */ 2793 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2794 size_t val_count) 2795 { 2796 int ret, i; 2797 size_t val_bytes = map->format.val_bytes; 2798 bool vol = regmap_volatile_range(map, reg, val_count); 2799 2800 if (!IS_ALIGNED(reg, map->reg_stride)) 2801 return -EINVAL; 2802 if (val_count == 0) 2803 return -EINVAL; 2804 2805 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2806 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2807 if (ret != 0) 2808 return ret; 2809 2810 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2811 map->format.parse_inplace(val + i); 2812 } else { 2813 #ifdef CONFIG_64BIT 2814 u64 *u64 = val; 2815 #endif 2816 u32 *u32 = val; 2817 u16 *u16 = val; 2818 u8 *u8 = val; 2819 2820 map->lock(map->lock_arg); 2821 2822 for (i = 0; i < val_count; i++) { 2823 unsigned int ival; 2824 2825 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2826 &ival); 2827 if (ret != 0) 2828 goto out; 2829 2830 switch (map->format.val_bytes) { 2831 #ifdef CONFIG_64BIT 2832 case 8: 2833 u64[i] = ival; 2834 break; 2835 #endif 2836 case 4: 2837 u32[i] = ival; 2838 break; 2839 case 2: 2840 u16[i] = ival; 2841 break; 2842 case 1: 2843 u8[i] = ival; 2844 break; 2845 default: 2846 ret = -EINVAL; 2847 goto out; 2848 } 2849 } 2850 2851 out: 2852 map->unlock(map->lock_arg); 2853 } 2854 2855 return ret; 2856 } 2857 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2858 2859 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2860 unsigned int mask, unsigned int val, 2861 bool *change, bool force_write) 2862 { 2863 int ret; 2864 unsigned int tmp, orig; 2865 2866 if (change) 2867 *change = false; 2868 2869 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2870 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2871 if (ret == 0 && change) 2872 *change = true; 2873 } else { 2874 ret = _regmap_read(map, reg, &orig); 2875 if (ret != 0) 2876 return ret; 2877 2878 tmp = orig & ~mask; 2879 tmp |= val & mask; 2880 2881 if (force_write || (tmp != orig)) { 2882 ret = _regmap_write(map, reg, tmp); 2883 if (ret == 0 && change) 2884 *change = true; 2885 } 2886 } 2887 2888 return ret; 2889 } 2890 2891 /** 2892 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2893 * 2894 * @map: Register map to update 2895 * @reg: Register to update 2896 * @mask: Bitmask to change 2897 * @val: New value for bitmask 2898 * @change: Boolean indicating if a write was done 2899 * @async: Boolean indicating asynchronously 2900 * @force: Boolean indicating use force update 2901 * 2902 * Perform a read/modify/write cycle on a register map with change, async, force 2903 * options. 2904 * 2905 * If async is true: 2906 * 2907 * With most buses the read must be done synchronously so this is most useful 2908 * for devices with a cache which do not need to interact with the hardware to 2909 * determine the current register value. 2910 * 2911 * Returns zero for success, a negative number on error. 2912 */ 2913 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2914 unsigned int mask, unsigned int val, 2915 bool *change, bool async, bool force) 2916 { 2917 int ret; 2918 2919 map->lock(map->lock_arg); 2920 2921 map->async = async; 2922 2923 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2924 2925 map->async = false; 2926 2927 map->unlock(map->lock_arg); 2928 2929 return ret; 2930 } 2931 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2932 2933 /** 2934 * regmap_test_bits() - Check if all specified bits are set in a register. 2935 * 2936 * @map: Register map to operate on 2937 * @reg: Register to read from 2938 * @bits: Bits to test 2939 * 2940 * Returns 0 if at least one of the tested bits is not set, 1 if all tested 2941 * bits are set and a negative error number if the underlying regmap_read() 2942 * fails. 2943 */ 2944 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 2945 { 2946 unsigned int val, ret; 2947 2948 ret = regmap_read(map, reg, &val); 2949 if (ret) 2950 return ret; 2951 2952 return (val & bits) == bits; 2953 } 2954 EXPORT_SYMBOL_GPL(regmap_test_bits); 2955 2956 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2957 { 2958 struct regmap *map = async->map; 2959 bool wake; 2960 2961 trace_regmap_async_io_complete(map); 2962 2963 spin_lock(&map->async_lock); 2964 list_move(&async->list, &map->async_free); 2965 wake = list_empty(&map->async_list); 2966 2967 if (ret != 0) 2968 map->async_ret = ret; 2969 2970 spin_unlock(&map->async_lock); 2971 2972 if (wake) 2973 wake_up(&map->async_waitq); 2974 } 2975 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2976 2977 static int regmap_async_is_done(struct regmap *map) 2978 { 2979 unsigned long flags; 2980 int ret; 2981 2982 spin_lock_irqsave(&map->async_lock, flags); 2983 ret = list_empty(&map->async_list); 2984 spin_unlock_irqrestore(&map->async_lock, flags); 2985 2986 return ret; 2987 } 2988 2989 /** 2990 * regmap_async_complete - Ensure all asynchronous I/O has completed. 2991 * 2992 * @map: Map to operate on. 2993 * 2994 * Blocks until any pending asynchronous I/O has completed. Returns 2995 * an error code for any failed I/O operations. 2996 */ 2997 int regmap_async_complete(struct regmap *map) 2998 { 2999 unsigned long flags; 3000 int ret; 3001 3002 /* Nothing to do with no async support */ 3003 if (!map->bus || !map->bus->async_write) 3004 return 0; 3005 3006 trace_regmap_async_complete_start(map); 3007 3008 wait_event(map->async_waitq, regmap_async_is_done(map)); 3009 3010 spin_lock_irqsave(&map->async_lock, flags); 3011 ret = map->async_ret; 3012 map->async_ret = 0; 3013 spin_unlock_irqrestore(&map->async_lock, flags); 3014 3015 trace_regmap_async_complete_done(map); 3016 3017 return ret; 3018 } 3019 EXPORT_SYMBOL_GPL(regmap_async_complete); 3020 3021 /** 3022 * regmap_register_patch - Register and apply register updates to be applied 3023 * on device initialistion 3024 * 3025 * @map: Register map to apply updates to. 3026 * @regs: Values to update. 3027 * @num_regs: Number of entries in regs. 3028 * 3029 * Register a set of register updates to be applied to the device 3030 * whenever the device registers are synchronised with the cache and 3031 * apply them immediately. Typically this is used to apply 3032 * corrections to be applied to the device defaults on startup, such 3033 * as the updates some vendors provide to undocumented registers. 3034 * 3035 * The caller must ensure that this function cannot be called 3036 * concurrently with either itself or regcache_sync(). 3037 */ 3038 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3039 int num_regs) 3040 { 3041 struct reg_sequence *p; 3042 int ret; 3043 bool bypass; 3044 3045 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3046 num_regs)) 3047 return 0; 3048 3049 p = krealloc(map->patch, 3050 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3051 GFP_KERNEL); 3052 if (p) { 3053 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3054 map->patch = p; 3055 map->patch_regs += num_regs; 3056 } else { 3057 return -ENOMEM; 3058 } 3059 3060 map->lock(map->lock_arg); 3061 3062 bypass = map->cache_bypass; 3063 3064 map->cache_bypass = true; 3065 map->async = true; 3066 3067 ret = _regmap_multi_reg_write(map, regs, num_regs); 3068 3069 map->async = false; 3070 map->cache_bypass = bypass; 3071 3072 map->unlock(map->lock_arg); 3073 3074 regmap_async_complete(map); 3075 3076 return ret; 3077 } 3078 EXPORT_SYMBOL_GPL(regmap_register_patch); 3079 3080 /** 3081 * regmap_get_val_bytes() - Report the size of a register value 3082 * 3083 * @map: Register map to operate on. 3084 * 3085 * Report the size of a register value, mainly intended to for use by 3086 * generic infrastructure built on top of regmap. 3087 */ 3088 int regmap_get_val_bytes(struct regmap *map) 3089 { 3090 if (map->format.format_write) 3091 return -EINVAL; 3092 3093 return map->format.val_bytes; 3094 } 3095 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3096 3097 /** 3098 * regmap_get_max_register() - Report the max register value 3099 * 3100 * @map: Register map to operate on. 3101 * 3102 * Report the max register value, mainly intended to for use by 3103 * generic infrastructure built on top of regmap. 3104 */ 3105 int regmap_get_max_register(struct regmap *map) 3106 { 3107 return map->max_register ? map->max_register : -EINVAL; 3108 } 3109 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3110 3111 /** 3112 * regmap_get_reg_stride() - Report the register address stride 3113 * 3114 * @map: Register map to operate on. 3115 * 3116 * Report the register address stride, mainly intended to for use by 3117 * generic infrastructure built on top of regmap. 3118 */ 3119 int regmap_get_reg_stride(struct regmap *map) 3120 { 3121 return map->reg_stride; 3122 } 3123 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3124 3125 int regmap_parse_val(struct regmap *map, const void *buf, 3126 unsigned int *val) 3127 { 3128 if (!map->format.parse_val) 3129 return -EINVAL; 3130 3131 *val = map->format.parse_val(buf); 3132 3133 return 0; 3134 } 3135 EXPORT_SYMBOL_GPL(regmap_parse_val); 3136 3137 static int __init regmap_initcall(void) 3138 { 3139 regmap_debugfs_initcall(); 3140 3141 return 0; 3142 } 3143 postcore_initcall(regmap_initcall); 3144