1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/property.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 #include <asm/unaligned.h> 21 22 #define CREATE_TRACE_POINTS 23 #include "trace.h" 24 25 #include "internal.h" 26 27 /* 28 * Sometimes for failures during very early init the trace 29 * infrastructure isn't available early enough to be used. For this 30 * sort of problem defining LOG_DEVICE will add printks for basic 31 * register I/O on a specific device. 32 */ 33 #undef LOG_DEVICE 34 35 #ifdef LOG_DEVICE 36 static inline bool regmap_should_log(struct regmap *map) 37 { 38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 39 } 40 #else 41 static inline bool regmap_should_log(struct regmap *map) { return false; } 42 #endif 43 44 45 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 46 unsigned int mask, unsigned int val, 47 bool *change, bool force_write); 48 49 static int _regmap_bus_reg_read(void *context, unsigned int reg, 50 unsigned int *val); 51 static int _regmap_bus_read(void *context, unsigned int reg, 52 unsigned int *val); 53 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 54 unsigned int val); 55 static int _regmap_bus_reg_write(void *context, unsigned int reg, 56 unsigned int val); 57 static int _regmap_bus_raw_write(void *context, unsigned int reg, 58 unsigned int val); 59 60 bool regmap_reg_in_ranges(unsigned int reg, 61 const struct regmap_range *ranges, 62 unsigned int nranges) 63 { 64 const struct regmap_range *r; 65 int i; 66 67 for (i = 0, r = ranges; i < nranges; i++, r++) 68 if (regmap_reg_in_range(reg, r)) 69 return true; 70 return false; 71 } 72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 73 74 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 75 const struct regmap_access_table *table) 76 { 77 /* Check "no ranges" first */ 78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 79 return false; 80 81 /* In case zero "yes ranges" are supplied, any reg is OK */ 82 if (!table->n_yes_ranges) 83 return true; 84 85 return regmap_reg_in_ranges(reg, table->yes_ranges, 86 table->n_yes_ranges); 87 } 88 EXPORT_SYMBOL_GPL(regmap_check_range_table); 89 90 bool regmap_writeable(struct regmap *map, unsigned int reg) 91 { 92 if (map->max_register && reg > map->max_register) 93 return false; 94 95 if (map->writeable_reg) 96 return map->writeable_reg(map->dev, reg); 97 98 if (map->wr_table) 99 return regmap_check_range_table(map, reg, map->wr_table); 100 101 return true; 102 } 103 104 bool regmap_cached(struct regmap *map, unsigned int reg) 105 { 106 int ret; 107 unsigned int val; 108 109 if (map->cache_type == REGCACHE_NONE) 110 return false; 111 112 if (!map->cache_ops) 113 return false; 114 115 if (map->max_register && reg > map->max_register) 116 return false; 117 118 map->lock(map->lock_arg); 119 ret = regcache_read(map, reg, &val); 120 map->unlock(map->lock_arg); 121 if (ret) 122 return false; 123 124 return true; 125 } 126 127 bool regmap_readable(struct regmap *map, unsigned int reg) 128 { 129 if (!map->reg_read) 130 return false; 131 132 if (map->max_register && reg > map->max_register) 133 return false; 134 135 if (map->format.format_write) 136 return false; 137 138 if (map->readable_reg) 139 return map->readable_reg(map->dev, reg); 140 141 if (map->rd_table) 142 return regmap_check_range_table(map, reg, map->rd_table); 143 144 return true; 145 } 146 147 bool regmap_volatile(struct regmap *map, unsigned int reg) 148 { 149 if (!map->format.format_write && !regmap_readable(map, reg)) 150 return false; 151 152 if (map->volatile_reg) 153 return map->volatile_reg(map->dev, reg); 154 155 if (map->volatile_table) 156 return regmap_check_range_table(map, reg, map->volatile_table); 157 158 if (map->cache_ops) 159 return false; 160 else 161 return true; 162 } 163 164 bool regmap_precious(struct regmap *map, unsigned int reg) 165 { 166 if (!regmap_readable(map, reg)) 167 return false; 168 169 if (map->precious_reg) 170 return map->precious_reg(map->dev, reg); 171 172 if (map->precious_table) 173 return regmap_check_range_table(map, reg, map->precious_table); 174 175 return false; 176 } 177 178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 179 { 180 if (map->writeable_noinc_reg) 181 return map->writeable_noinc_reg(map->dev, reg); 182 183 if (map->wr_noinc_table) 184 return regmap_check_range_table(map, reg, map->wr_noinc_table); 185 186 return true; 187 } 188 189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 190 { 191 if (map->readable_noinc_reg) 192 return map->readable_noinc_reg(map->dev, reg); 193 194 if (map->rd_noinc_table) 195 return regmap_check_range_table(map, reg, map->rd_noinc_table); 196 197 return true; 198 } 199 200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 201 size_t num) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num; i++) 206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 207 return false; 208 209 return true; 210 } 211 212 static void regmap_format_2_6_write(struct regmap *map, 213 unsigned int reg, unsigned int val) 214 { 215 u8 *out = map->work_buf; 216 217 *out = (reg << 6) | val; 218 } 219 220 static void regmap_format_4_12_write(struct regmap *map, 221 unsigned int reg, unsigned int val) 222 { 223 __be16 *out = map->work_buf; 224 *out = cpu_to_be16((reg << 12) | val); 225 } 226 227 static void regmap_format_7_9_write(struct regmap *map, 228 unsigned int reg, unsigned int val) 229 { 230 __be16 *out = map->work_buf; 231 *out = cpu_to_be16((reg << 9) | val); 232 } 233 234 static void regmap_format_10_14_write(struct regmap *map, 235 unsigned int reg, unsigned int val) 236 { 237 u8 *out = map->work_buf; 238 239 out[2] = val; 240 out[1] = (val >> 8) | (reg << 6); 241 out[0] = reg >> 2; 242 } 243 244 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 245 { 246 u8 *b = buf; 247 248 b[0] = val << shift; 249 } 250 251 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 252 { 253 put_unaligned_be16(val << shift, buf); 254 } 255 256 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 257 { 258 put_unaligned_le16(val << shift, buf); 259 } 260 261 static void regmap_format_16_native(void *buf, unsigned int val, 262 unsigned int shift) 263 { 264 u16 v = val << shift; 265 266 memcpy(buf, &v, sizeof(v)); 267 } 268 269 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 270 { 271 u8 *b = buf; 272 273 val <<= shift; 274 275 b[0] = val >> 16; 276 b[1] = val >> 8; 277 b[2] = val; 278 } 279 280 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 281 { 282 put_unaligned_be32(val << shift, buf); 283 } 284 285 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 286 { 287 put_unaligned_le32(val << shift, buf); 288 } 289 290 static void regmap_format_32_native(void *buf, unsigned int val, 291 unsigned int shift) 292 { 293 u32 v = val << shift; 294 295 memcpy(buf, &v, sizeof(v)); 296 } 297 298 #ifdef CONFIG_64BIT 299 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 300 { 301 put_unaligned_be64((u64) val << shift, buf); 302 } 303 304 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 305 { 306 put_unaligned_le64((u64) val << shift, buf); 307 } 308 309 static void regmap_format_64_native(void *buf, unsigned int val, 310 unsigned int shift) 311 { 312 u64 v = (u64) val << shift; 313 314 memcpy(buf, &v, sizeof(v)); 315 } 316 #endif 317 318 static void regmap_parse_inplace_noop(void *buf) 319 { 320 } 321 322 static unsigned int regmap_parse_8(const void *buf) 323 { 324 const u8 *b = buf; 325 326 return b[0]; 327 } 328 329 static unsigned int regmap_parse_16_be(const void *buf) 330 { 331 return get_unaligned_be16(buf); 332 } 333 334 static unsigned int regmap_parse_16_le(const void *buf) 335 { 336 return get_unaligned_le16(buf); 337 } 338 339 static void regmap_parse_16_be_inplace(void *buf) 340 { 341 u16 v = get_unaligned_be16(buf); 342 343 memcpy(buf, &v, sizeof(v)); 344 } 345 346 static void regmap_parse_16_le_inplace(void *buf) 347 { 348 u16 v = get_unaligned_le16(buf); 349 350 memcpy(buf, &v, sizeof(v)); 351 } 352 353 static unsigned int regmap_parse_16_native(const void *buf) 354 { 355 u16 v; 356 357 memcpy(&v, buf, sizeof(v)); 358 return v; 359 } 360 361 static unsigned int regmap_parse_24(const void *buf) 362 { 363 const u8 *b = buf; 364 unsigned int ret = b[2]; 365 ret |= ((unsigned int)b[1]) << 8; 366 ret |= ((unsigned int)b[0]) << 16; 367 368 return ret; 369 } 370 371 static unsigned int regmap_parse_32_be(const void *buf) 372 { 373 return get_unaligned_be32(buf); 374 } 375 376 static unsigned int regmap_parse_32_le(const void *buf) 377 { 378 return get_unaligned_le32(buf); 379 } 380 381 static void regmap_parse_32_be_inplace(void *buf) 382 { 383 u32 v = get_unaligned_be32(buf); 384 385 memcpy(buf, &v, sizeof(v)); 386 } 387 388 static void regmap_parse_32_le_inplace(void *buf) 389 { 390 u32 v = get_unaligned_le32(buf); 391 392 memcpy(buf, &v, sizeof(v)); 393 } 394 395 static unsigned int regmap_parse_32_native(const void *buf) 396 { 397 u32 v; 398 399 memcpy(&v, buf, sizeof(v)); 400 return v; 401 } 402 403 #ifdef CONFIG_64BIT 404 static unsigned int regmap_parse_64_be(const void *buf) 405 { 406 return get_unaligned_be64(buf); 407 } 408 409 static unsigned int regmap_parse_64_le(const void *buf) 410 { 411 return get_unaligned_le64(buf); 412 } 413 414 static void regmap_parse_64_be_inplace(void *buf) 415 { 416 u64 v = get_unaligned_be64(buf); 417 418 memcpy(buf, &v, sizeof(v)); 419 } 420 421 static void regmap_parse_64_le_inplace(void *buf) 422 { 423 u64 v = get_unaligned_le64(buf); 424 425 memcpy(buf, &v, sizeof(v)); 426 } 427 428 static unsigned int regmap_parse_64_native(const void *buf) 429 { 430 u64 v; 431 432 memcpy(&v, buf, sizeof(v)); 433 return v; 434 } 435 #endif 436 437 static void regmap_lock_hwlock(void *__map) 438 { 439 struct regmap *map = __map; 440 441 hwspin_lock_timeout(map->hwlock, UINT_MAX); 442 } 443 444 static void regmap_lock_hwlock_irq(void *__map) 445 { 446 struct regmap *map = __map; 447 448 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 449 } 450 451 static void regmap_lock_hwlock_irqsave(void *__map) 452 { 453 struct regmap *map = __map; 454 455 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 456 &map->spinlock_flags); 457 } 458 459 static void regmap_unlock_hwlock(void *__map) 460 { 461 struct regmap *map = __map; 462 463 hwspin_unlock(map->hwlock); 464 } 465 466 static void regmap_unlock_hwlock_irq(void *__map) 467 { 468 struct regmap *map = __map; 469 470 hwspin_unlock_irq(map->hwlock); 471 } 472 473 static void regmap_unlock_hwlock_irqrestore(void *__map) 474 { 475 struct regmap *map = __map; 476 477 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 478 } 479 480 static void regmap_lock_unlock_none(void *__map) 481 { 482 483 } 484 485 static void regmap_lock_mutex(void *__map) 486 { 487 struct regmap *map = __map; 488 mutex_lock(&map->mutex); 489 } 490 491 static void regmap_unlock_mutex(void *__map) 492 { 493 struct regmap *map = __map; 494 mutex_unlock(&map->mutex); 495 } 496 497 static void regmap_lock_spinlock(void *__map) 498 __acquires(&map->spinlock) 499 { 500 struct regmap *map = __map; 501 unsigned long flags; 502 503 spin_lock_irqsave(&map->spinlock, flags); 504 map->spinlock_flags = flags; 505 } 506 507 static void regmap_unlock_spinlock(void *__map) 508 __releases(&map->spinlock) 509 { 510 struct regmap *map = __map; 511 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 512 } 513 514 static void dev_get_regmap_release(struct device *dev, void *res) 515 { 516 /* 517 * We don't actually have anything to do here; the goal here 518 * is not to manage the regmap but to provide a simple way to 519 * get the regmap back given a struct device. 520 */ 521 } 522 523 static bool _regmap_range_add(struct regmap *map, 524 struct regmap_range_node *data) 525 { 526 struct rb_root *root = &map->range_tree; 527 struct rb_node **new = &(root->rb_node), *parent = NULL; 528 529 while (*new) { 530 struct regmap_range_node *this = 531 rb_entry(*new, struct regmap_range_node, node); 532 533 parent = *new; 534 if (data->range_max < this->range_min) 535 new = &((*new)->rb_left); 536 else if (data->range_min > this->range_max) 537 new = &((*new)->rb_right); 538 else 539 return false; 540 } 541 542 rb_link_node(&data->node, parent, new); 543 rb_insert_color(&data->node, root); 544 545 return true; 546 } 547 548 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 549 unsigned int reg) 550 { 551 struct rb_node *node = map->range_tree.rb_node; 552 553 while (node) { 554 struct regmap_range_node *this = 555 rb_entry(node, struct regmap_range_node, node); 556 557 if (reg < this->range_min) 558 node = node->rb_left; 559 else if (reg > this->range_max) 560 node = node->rb_right; 561 else 562 return this; 563 } 564 565 return NULL; 566 } 567 568 static void regmap_range_exit(struct regmap *map) 569 { 570 struct rb_node *next; 571 struct regmap_range_node *range_node; 572 573 next = rb_first(&map->range_tree); 574 while (next) { 575 range_node = rb_entry(next, struct regmap_range_node, node); 576 next = rb_next(&range_node->node); 577 rb_erase(&range_node->node, &map->range_tree); 578 kfree(range_node); 579 } 580 581 kfree(map->selector_work_buf); 582 } 583 584 static int regmap_set_name(struct regmap *map, const struct regmap_config *config) 585 { 586 if (config->name) { 587 const char *name = kstrdup_const(config->name, GFP_KERNEL); 588 589 if (!name) 590 return -ENOMEM; 591 592 kfree_const(map->name); 593 map->name = name; 594 } 595 596 return 0; 597 } 598 599 int regmap_attach_dev(struct device *dev, struct regmap *map, 600 const struct regmap_config *config) 601 { 602 struct regmap **m; 603 int ret; 604 605 map->dev = dev; 606 607 ret = regmap_set_name(map, config); 608 if (ret) 609 return ret; 610 611 regmap_debugfs_init(map); 612 613 /* Add a devres resource for dev_get_regmap() */ 614 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 615 if (!m) { 616 regmap_debugfs_exit(map); 617 return -ENOMEM; 618 } 619 *m = map; 620 devres_add(dev, m); 621 622 return 0; 623 } 624 EXPORT_SYMBOL_GPL(regmap_attach_dev); 625 626 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 627 const struct regmap_config *config) 628 { 629 enum regmap_endian endian; 630 631 /* Retrieve the endianness specification from the regmap config */ 632 endian = config->reg_format_endian; 633 634 /* If the regmap config specified a non-default value, use that */ 635 if (endian != REGMAP_ENDIAN_DEFAULT) 636 return endian; 637 638 /* Retrieve the endianness specification from the bus config */ 639 if (bus && bus->reg_format_endian_default) 640 endian = bus->reg_format_endian_default; 641 642 /* If the bus specified a non-default value, use that */ 643 if (endian != REGMAP_ENDIAN_DEFAULT) 644 return endian; 645 646 /* Use this if no other value was found */ 647 return REGMAP_ENDIAN_BIG; 648 } 649 650 enum regmap_endian regmap_get_val_endian(struct device *dev, 651 const struct regmap_bus *bus, 652 const struct regmap_config *config) 653 { 654 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; 655 enum regmap_endian endian; 656 657 /* Retrieve the endianness specification from the regmap config */ 658 endian = config->val_format_endian; 659 660 /* If the regmap config specified a non-default value, use that */ 661 if (endian != REGMAP_ENDIAN_DEFAULT) 662 return endian; 663 664 /* If the firmware node exist try to get endianness from it */ 665 if (fwnode_property_read_bool(fwnode, "big-endian")) 666 endian = REGMAP_ENDIAN_BIG; 667 else if (fwnode_property_read_bool(fwnode, "little-endian")) 668 endian = REGMAP_ENDIAN_LITTLE; 669 else if (fwnode_property_read_bool(fwnode, "native-endian")) 670 endian = REGMAP_ENDIAN_NATIVE; 671 672 /* If the endianness was specified in fwnode, use that */ 673 if (endian != REGMAP_ENDIAN_DEFAULT) 674 return endian; 675 676 /* Retrieve the endianness specification from the bus config */ 677 if (bus && bus->val_format_endian_default) 678 endian = bus->val_format_endian_default; 679 680 /* If the bus specified a non-default value, use that */ 681 if (endian != REGMAP_ENDIAN_DEFAULT) 682 return endian; 683 684 /* Use this if no other value was found */ 685 return REGMAP_ENDIAN_BIG; 686 } 687 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 688 689 struct regmap *__regmap_init(struct device *dev, 690 const struct regmap_bus *bus, 691 void *bus_context, 692 const struct regmap_config *config, 693 struct lock_class_key *lock_key, 694 const char *lock_name) 695 { 696 struct regmap *map; 697 int ret = -EINVAL; 698 enum regmap_endian reg_endian, val_endian; 699 int i, j; 700 701 if (!config) 702 goto err; 703 704 map = kzalloc(sizeof(*map), GFP_KERNEL); 705 if (map == NULL) { 706 ret = -ENOMEM; 707 goto err; 708 } 709 710 ret = regmap_set_name(map, config); 711 if (ret) 712 goto err_map; 713 714 if (config->disable_locking) { 715 map->lock = map->unlock = regmap_lock_unlock_none; 716 regmap_debugfs_disable(map); 717 } else if (config->lock && config->unlock) { 718 map->lock = config->lock; 719 map->unlock = config->unlock; 720 map->lock_arg = config->lock_arg; 721 } else if (config->use_hwlock) { 722 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 723 if (!map->hwlock) { 724 ret = -ENXIO; 725 goto err_name; 726 } 727 728 switch (config->hwlock_mode) { 729 case HWLOCK_IRQSTATE: 730 map->lock = regmap_lock_hwlock_irqsave; 731 map->unlock = regmap_unlock_hwlock_irqrestore; 732 break; 733 case HWLOCK_IRQ: 734 map->lock = regmap_lock_hwlock_irq; 735 map->unlock = regmap_unlock_hwlock_irq; 736 break; 737 default: 738 map->lock = regmap_lock_hwlock; 739 map->unlock = regmap_unlock_hwlock; 740 break; 741 } 742 743 map->lock_arg = map; 744 } else { 745 if ((bus && bus->fast_io) || 746 config->fast_io) { 747 spin_lock_init(&map->spinlock); 748 map->lock = regmap_lock_spinlock; 749 map->unlock = regmap_unlock_spinlock; 750 lockdep_set_class_and_name(&map->spinlock, 751 lock_key, lock_name); 752 } else { 753 mutex_init(&map->mutex); 754 map->lock = regmap_lock_mutex; 755 map->unlock = regmap_unlock_mutex; 756 lockdep_set_class_and_name(&map->mutex, 757 lock_key, lock_name); 758 } 759 map->lock_arg = map; 760 } 761 762 /* 763 * When we write in fast-paths with regmap_bulk_write() don't allocate 764 * scratch buffers with sleeping allocations. 765 */ 766 if ((bus && bus->fast_io) || config->fast_io) 767 map->alloc_flags = GFP_ATOMIC; 768 else 769 map->alloc_flags = GFP_KERNEL; 770 771 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 772 map->format.pad_bytes = config->pad_bits / 8; 773 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 774 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 775 config->val_bits + config->pad_bits, 8); 776 map->reg_shift = config->pad_bits % 8; 777 if (config->reg_stride) 778 map->reg_stride = config->reg_stride; 779 else 780 map->reg_stride = 1; 781 if (is_power_of_2(map->reg_stride)) 782 map->reg_stride_order = ilog2(map->reg_stride); 783 else 784 map->reg_stride_order = -1; 785 map->use_single_read = config->use_single_read || !bus || !bus->read; 786 map->use_single_write = config->use_single_write || !bus || !bus->write; 787 map->can_multi_write = config->can_multi_write && bus && bus->write; 788 if (bus) { 789 map->max_raw_read = bus->max_raw_read; 790 map->max_raw_write = bus->max_raw_write; 791 } 792 map->dev = dev; 793 map->bus = bus; 794 map->bus_context = bus_context; 795 map->max_register = config->max_register; 796 map->wr_table = config->wr_table; 797 map->rd_table = config->rd_table; 798 map->volatile_table = config->volatile_table; 799 map->precious_table = config->precious_table; 800 map->wr_noinc_table = config->wr_noinc_table; 801 map->rd_noinc_table = config->rd_noinc_table; 802 map->writeable_reg = config->writeable_reg; 803 map->readable_reg = config->readable_reg; 804 map->volatile_reg = config->volatile_reg; 805 map->precious_reg = config->precious_reg; 806 map->writeable_noinc_reg = config->writeable_noinc_reg; 807 map->readable_noinc_reg = config->readable_noinc_reg; 808 map->cache_type = config->cache_type; 809 810 spin_lock_init(&map->async_lock); 811 INIT_LIST_HEAD(&map->async_list); 812 INIT_LIST_HEAD(&map->async_free); 813 init_waitqueue_head(&map->async_waitq); 814 815 if (config->read_flag_mask || 816 config->write_flag_mask || 817 config->zero_flag_mask) { 818 map->read_flag_mask = config->read_flag_mask; 819 map->write_flag_mask = config->write_flag_mask; 820 } else if (bus) { 821 map->read_flag_mask = bus->read_flag_mask; 822 } 823 824 if (!bus) { 825 map->reg_read = config->reg_read; 826 map->reg_write = config->reg_write; 827 828 map->defer_caching = false; 829 goto skip_format_initialization; 830 } else if (!bus->read || !bus->write) { 831 map->reg_read = _regmap_bus_reg_read; 832 map->reg_write = _regmap_bus_reg_write; 833 map->reg_update_bits = bus->reg_update_bits; 834 835 map->defer_caching = false; 836 goto skip_format_initialization; 837 } else { 838 map->reg_read = _regmap_bus_read; 839 map->reg_update_bits = bus->reg_update_bits; 840 } 841 842 reg_endian = regmap_get_reg_endian(bus, config); 843 val_endian = regmap_get_val_endian(dev, bus, config); 844 845 switch (config->reg_bits + map->reg_shift) { 846 case 2: 847 switch (config->val_bits) { 848 case 6: 849 map->format.format_write = regmap_format_2_6_write; 850 break; 851 default: 852 goto err_hwlock; 853 } 854 break; 855 856 case 4: 857 switch (config->val_bits) { 858 case 12: 859 map->format.format_write = regmap_format_4_12_write; 860 break; 861 default: 862 goto err_hwlock; 863 } 864 break; 865 866 case 7: 867 switch (config->val_bits) { 868 case 9: 869 map->format.format_write = regmap_format_7_9_write; 870 break; 871 default: 872 goto err_hwlock; 873 } 874 break; 875 876 case 10: 877 switch (config->val_bits) { 878 case 14: 879 map->format.format_write = regmap_format_10_14_write; 880 break; 881 default: 882 goto err_hwlock; 883 } 884 break; 885 886 case 8: 887 map->format.format_reg = regmap_format_8; 888 break; 889 890 case 16: 891 switch (reg_endian) { 892 case REGMAP_ENDIAN_BIG: 893 map->format.format_reg = regmap_format_16_be; 894 break; 895 case REGMAP_ENDIAN_LITTLE: 896 map->format.format_reg = regmap_format_16_le; 897 break; 898 case REGMAP_ENDIAN_NATIVE: 899 map->format.format_reg = regmap_format_16_native; 900 break; 901 default: 902 goto err_hwlock; 903 } 904 break; 905 906 case 24: 907 if (reg_endian != REGMAP_ENDIAN_BIG) 908 goto err_hwlock; 909 map->format.format_reg = regmap_format_24; 910 break; 911 912 case 32: 913 switch (reg_endian) { 914 case REGMAP_ENDIAN_BIG: 915 map->format.format_reg = regmap_format_32_be; 916 break; 917 case REGMAP_ENDIAN_LITTLE: 918 map->format.format_reg = regmap_format_32_le; 919 break; 920 case REGMAP_ENDIAN_NATIVE: 921 map->format.format_reg = regmap_format_32_native; 922 break; 923 default: 924 goto err_hwlock; 925 } 926 break; 927 928 #ifdef CONFIG_64BIT 929 case 64: 930 switch (reg_endian) { 931 case REGMAP_ENDIAN_BIG: 932 map->format.format_reg = regmap_format_64_be; 933 break; 934 case REGMAP_ENDIAN_LITTLE: 935 map->format.format_reg = regmap_format_64_le; 936 break; 937 case REGMAP_ENDIAN_NATIVE: 938 map->format.format_reg = regmap_format_64_native; 939 break; 940 default: 941 goto err_hwlock; 942 } 943 break; 944 #endif 945 946 default: 947 goto err_hwlock; 948 } 949 950 if (val_endian == REGMAP_ENDIAN_NATIVE) 951 map->format.parse_inplace = regmap_parse_inplace_noop; 952 953 switch (config->val_bits) { 954 case 8: 955 map->format.format_val = regmap_format_8; 956 map->format.parse_val = regmap_parse_8; 957 map->format.parse_inplace = regmap_parse_inplace_noop; 958 break; 959 case 16: 960 switch (val_endian) { 961 case REGMAP_ENDIAN_BIG: 962 map->format.format_val = regmap_format_16_be; 963 map->format.parse_val = regmap_parse_16_be; 964 map->format.parse_inplace = regmap_parse_16_be_inplace; 965 break; 966 case REGMAP_ENDIAN_LITTLE: 967 map->format.format_val = regmap_format_16_le; 968 map->format.parse_val = regmap_parse_16_le; 969 map->format.parse_inplace = regmap_parse_16_le_inplace; 970 break; 971 case REGMAP_ENDIAN_NATIVE: 972 map->format.format_val = regmap_format_16_native; 973 map->format.parse_val = regmap_parse_16_native; 974 break; 975 default: 976 goto err_hwlock; 977 } 978 break; 979 case 24: 980 if (val_endian != REGMAP_ENDIAN_BIG) 981 goto err_hwlock; 982 map->format.format_val = regmap_format_24; 983 map->format.parse_val = regmap_parse_24; 984 break; 985 case 32: 986 switch (val_endian) { 987 case REGMAP_ENDIAN_BIG: 988 map->format.format_val = regmap_format_32_be; 989 map->format.parse_val = regmap_parse_32_be; 990 map->format.parse_inplace = regmap_parse_32_be_inplace; 991 break; 992 case REGMAP_ENDIAN_LITTLE: 993 map->format.format_val = regmap_format_32_le; 994 map->format.parse_val = regmap_parse_32_le; 995 map->format.parse_inplace = regmap_parse_32_le_inplace; 996 break; 997 case REGMAP_ENDIAN_NATIVE: 998 map->format.format_val = regmap_format_32_native; 999 map->format.parse_val = regmap_parse_32_native; 1000 break; 1001 default: 1002 goto err_hwlock; 1003 } 1004 break; 1005 #ifdef CONFIG_64BIT 1006 case 64: 1007 switch (val_endian) { 1008 case REGMAP_ENDIAN_BIG: 1009 map->format.format_val = regmap_format_64_be; 1010 map->format.parse_val = regmap_parse_64_be; 1011 map->format.parse_inplace = regmap_parse_64_be_inplace; 1012 break; 1013 case REGMAP_ENDIAN_LITTLE: 1014 map->format.format_val = regmap_format_64_le; 1015 map->format.parse_val = regmap_parse_64_le; 1016 map->format.parse_inplace = regmap_parse_64_le_inplace; 1017 break; 1018 case REGMAP_ENDIAN_NATIVE: 1019 map->format.format_val = regmap_format_64_native; 1020 map->format.parse_val = regmap_parse_64_native; 1021 break; 1022 default: 1023 goto err_hwlock; 1024 } 1025 break; 1026 #endif 1027 } 1028 1029 if (map->format.format_write) { 1030 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1031 (val_endian != REGMAP_ENDIAN_BIG)) 1032 goto err_hwlock; 1033 map->use_single_write = true; 1034 } 1035 1036 if (!map->format.format_write && 1037 !(map->format.format_reg && map->format.format_val)) 1038 goto err_hwlock; 1039 1040 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1041 if (map->work_buf == NULL) { 1042 ret = -ENOMEM; 1043 goto err_hwlock; 1044 } 1045 1046 if (map->format.format_write) { 1047 map->defer_caching = false; 1048 map->reg_write = _regmap_bus_formatted_write; 1049 } else if (map->format.format_val) { 1050 map->defer_caching = true; 1051 map->reg_write = _regmap_bus_raw_write; 1052 } 1053 1054 skip_format_initialization: 1055 1056 map->range_tree = RB_ROOT; 1057 for (i = 0; i < config->num_ranges; i++) { 1058 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1059 struct regmap_range_node *new; 1060 1061 /* Sanity check */ 1062 if (range_cfg->range_max < range_cfg->range_min) { 1063 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1064 range_cfg->range_max, range_cfg->range_min); 1065 goto err_range; 1066 } 1067 1068 if (range_cfg->range_max > map->max_register) { 1069 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1070 range_cfg->range_max, map->max_register); 1071 goto err_range; 1072 } 1073 1074 if (range_cfg->selector_reg > map->max_register) { 1075 dev_err(map->dev, 1076 "Invalid range %d: selector out of map\n", i); 1077 goto err_range; 1078 } 1079 1080 if (range_cfg->window_len == 0) { 1081 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1082 i); 1083 goto err_range; 1084 } 1085 1086 /* Make sure, that this register range has no selector 1087 or data window within its boundary */ 1088 for (j = 0; j < config->num_ranges; j++) { 1089 unsigned sel_reg = config->ranges[j].selector_reg; 1090 unsigned win_min = config->ranges[j].window_start; 1091 unsigned win_max = win_min + 1092 config->ranges[j].window_len - 1; 1093 1094 /* Allow data window inside its own virtual range */ 1095 if (j == i) 1096 continue; 1097 1098 if (range_cfg->range_min <= sel_reg && 1099 sel_reg <= range_cfg->range_max) { 1100 dev_err(map->dev, 1101 "Range %d: selector for %d in window\n", 1102 i, j); 1103 goto err_range; 1104 } 1105 1106 if (!(win_max < range_cfg->range_min || 1107 win_min > range_cfg->range_max)) { 1108 dev_err(map->dev, 1109 "Range %d: window for %d in window\n", 1110 i, j); 1111 goto err_range; 1112 } 1113 } 1114 1115 new = kzalloc(sizeof(*new), GFP_KERNEL); 1116 if (new == NULL) { 1117 ret = -ENOMEM; 1118 goto err_range; 1119 } 1120 1121 new->map = map; 1122 new->name = range_cfg->name; 1123 new->range_min = range_cfg->range_min; 1124 new->range_max = range_cfg->range_max; 1125 new->selector_reg = range_cfg->selector_reg; 1126 new->selector_mask = range_cfg->selector_mask; 1127 new->selector_shift = range_cfg->selector_shift; 1128 new->window_start = range_cfg->window_start; 1129 new->window_len = range_cfg->window_len; 1130 1131 if (!_regmap_range_add(map, new)) { 1132 dev_err(map->dev, "Failed to add range %d\n", i); 1133 kfree(new); 1134 goto err_range; 1135 } 1136 1137 if (map->selector_work_buf == NULL) { 1138 map->selector_work_buf = 1139 kzalloc(map->format.buf_size, GFP_KERNEL); 1140 if (map->selector_work_buf == NULL) { 1141 ret = -ENOMEM; 1142 goto err_range; 1143 } 1144 } 1145 } 1146 1147 ret = regcache_init(map, config); 1148 if (ret != 0) 1149 goto err_range; 1150 1151 if (dev) { 1152 ret = regmap_attach_dev(dev, map, config); 1153 if (ret != 0) 1154 goto err_regcache; 1155 } else { 1156 regmap_debugfs_init(map); 1157 } 1158 1159 return map; 1160 1161 err_regcache: 1162 regcache_exit(map); 1163 err_range: 1164 regmap_range_exit(map); 1165 kfree(map->work_buf); 1166 err_hwlock: 1167 if (map->hwlock) 1168 hwspin_lock_free(map->hwlock); 1169 err_name: 1170 kfree_const(map->name); 1171 err_map: 1172 kfree(map); 1173 err: 1174 return ERR_PTR(ret); 1175 } 1176 EXPORT_SYMBOL_GPL(__regmap_init); 1177 1178 static void devm_regmap_release(struct device *dev, void *res) 1179 { 1180 regmap_exit(*(struct regmap **)res); 1181 } 1182 1183 struct regmap *__devm_regmap_init(struct device *dev, 1184 const struct regmap_bus *bus, 1185 void *bus_context, 1186 const struct regmap_config *config, 1187 struct lock_class_key *lock_key, 1188 const char *lock_name) 1189 { 1190 struct regmap **ptr, *regmap; 1191 1192 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1193 if (!ptr) 1194 return ERR_PTR(-ENOMEM); 1195 1196 regmap = __regmap_init(dev, bus, bus_context, config, 1197 lock_key, lock_name); 1198 if (!IS_ERR(regmap)) { 1199 *ptr = regmap; 1200 devres_add(dev, ptr); 1201 } else { 1202 devres_free(ptr); 1203 } 1204 1205 return regmap; 1206 } 1207 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1208 1209 static void regmap_field_init(struct regmap_field *rm_field, 1210 struct regmap *regmap, struct reg_field reg_field) 1211 { 1212 rm_field->regmap = regmap; 1213 rm_field->reg = reg_field.reg; 1214 rm_field->shift = reg_field.lsb; 1215 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1216 rm_field->id_size = reg_field.id_size; 1217 rm_field->id_offset = reg_field.id_offset; 1218 } 1219 1220 /** 1221 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1222 * 1223 * @dev: Device that will be interacted with 1224 * @regmap: regmap bank in which this register field is located. 1225 * @reg_field: Register field with in the bank. 1226 * 1227 * The return value will be an ERR_PTR() on error or a valid pointer 1228 * to a struct regmap_field. The regmap_field will be automatically freed 1229 * by the device management code. 1230 */ 1231 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1232 struct regmap *regmap, struct reg_field reg_field) 1233 { 1234 struct regmap_field *rm_field = devm_kzalloc(dev, 1235 sizeof(*rm_field), GFP_KERNEL); 1236 if (!rm_field) 1237 return ERR_PTR(-ENOMEM); 1238 1239 regmap_field_init(rm_field, regmap, reg_field); 1240 1241 return rm_field; 1242 1243 } 1244 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1245 1246 /** 1247 * devm_regmap_field_free() - Free a register field allocated using 1248 * devm_regmap_field_alloc. 1249 * 1250 * @dev: Device that will be interacted with 1251 * @field: regmap field which should be freed. 1252 * 1253 * Free register field allocated using devm_regmap_field_alloc(). Usually 1254 * drivers need not call this function, as the memory allocated via devm 1255 * will be freed as per device-driver life-cyle. 1256 */ 1257 void devm_regmap_field_free(struct device *dev, 1258 struct regmap_field *field) 1259 { 1260 devm_kfree(dev, field); 1261 } 1262 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1263 1264 /** 1265 * regmap_field_alloc() - Allocate and initialise a register field. 1266 * 1267 * @regmap: regmap bank in which this register field is located. 1268 * @reg_field: Register field with in the bank. 1269 * 1270 * The return value will be an ERR_PTR() on error or a valid pointer 1271 * to a struct regmap_field. The regmap_field should be freed by the 1272 * user once its finished working with it using regmap_field_free(). 1273 */ 1274 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1275 struct reg_field reg_field) 1276 { 1277 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1278 1279 if (!rm_field) 1280 return ERR_PTR(-ENOMEM); 1281 1282 regmap_field_init(rm_field, regmap, reg_field); 1283 1284 return rm_field; 1285 } 1286 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1287 1288 /** 1289 * regmap_field_free() - Free register field allocated using 1290 * regmap_field_alloc. 1291 * 1292 * @field: regmap field which should be freed. 1293 */ 1294 void regmap_field_free(struct regmap_field *field) 1295 { 1296 kfree(field); 1297 } 1298 EXPORT_SYMBOL_GPL(regmap_field_free); 1299 1300 /** 1301 * regmap_reinit_cache() - Reinitialise the current register cache 1302 * 1303 * @map: Register map to operate on. 1304 * @config: New configuration. Only the cache data will be used. 1305 * 1306 * Discard any existing register cache for the map and initialize a 1307 * new cache. This can be used to restore the cache to defaults or to 1308 * update the cache configuration to reflect runtime discovery of the 1309 * hardware. 1310 * 1311 * No explicit locking is done here, the user needs to ensure that 1312 * this function will not race with other calls to regmap. 1313 */ 1314 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1315 { 1316 int ret; 1317 1318 regcache_exit(map); 1319 regmap_debugfs_exit(map); 1320 1321 map->max_register = config->max_register; 1322 map->writeable_reg = config->writeable_reg; 1323 map->readable_reg = config->readable_reg; 1324 map->volatile_reg = config->volatile_reg; 1325 map->precious_reg = config->precious_reg; 1326 map->writeable_noinc_reg = config->writeable_noinc_reg; 1327 map->readable_noinc_reg = config->readable_noinc_reg; 1328 map->cache_type = config->cache_type; 1329 1330 ret = regmap_set_name(map, config); 1331 if (ret) 1332 return ret; 1333 1334 regmap_debugfs_init(map); 1335 1336 map->cache_bypass = false; 1337 map->cache_only = false; 1338 1339 return regcache_init(map, config); 1340 } 1341 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1342 1343 /** 1344 * regmap_exit() - Free a previously allocated register map 1345 * 1346 * @map: Register map to operate on. 1347 */ 1348 void regmap_exit(struct regmap *map) 1349 { 1350 struct regmap_async *async; 1351 1352 regcache_exit(map); 1353 regmap_debugfs_exit(map); 1354 regmap_range_exit(map); 1355 if (map->bus && map->bus->free_context) 1356 map->bus->free_context(map->bus_context); 1357 kfree(map->work_buf); 1358 while (!list_empty(&map->async_free)) { 1359 async = list_first_entry_or_null(&map->async_free, 1360 struct regmap_async, 1361 list); 1362 list_del(&async->list); 1363 kfree(async->work_buf); 1364 kfree(async); 1365 } 1366 if (map->hwlock) 1367 hwspin_lock_free(map->hwlock); 1368 kfree_const(map->name); 1369 kfree(map->patch); 1370 kfree(map); 1371 } 1372 EXPORT_SYMBOL_GPL(regmap_exit); 1373 1374 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1375 { 1376 struct regmap **r = res; 1377 if (!r || !*r) { 1378 WARN_ON(!r || !*r); 1379 return 0; 1380 } 1381 1382 /* If the user didn't specify a name match any */ 1383 if (data) 1384 return !strcmp((*r)->name, data); 1385 else 1386 return 1; 1387 } 1388 1389 /** 1390 * dev_get_regmap() - Obtain the regmap (if any) for a device 1391 * 1392 * @dev: Device to retrieve the map for 1393 * @name: Optional name for the register map, usually NULL. 1394 * 1395 * Returns the regmap for the device if one is present, or NULL. If 1396 * name is specified then it must match the name specified when 1397 * registering the device, if it is NULL then the first regmap found 1398 * will be used. Devices with multiple register maps are very rare, 1399 * generic code should normally not need to specify a name. 1400 */ 1401 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1402 { 1403 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1404 dev_get_regmap_match, (void *)name); 1405 1406 if (!r) 1407 return NULL; 1408 return *r; 1409 } 1410 EXPORT_SYMBOL_GPL(dev_get_regmap); 1411 1412 /** 1413 * regmap_get_device() - Obtain the device from a regmap 1414 * 1415 * @map: Register map to operate on. 1416 * 1417 * Returns the underlying device that the regmap has been created for. 1418 */ 1419 struct device *regmap_get_device(struct regmap *map) 1420 { 1421 return map->dev; 1422 } 1423 EXPORT_SYMBOL_GPL(regmap_get_device); 1424 1425 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1426 struct regmap_range_node *range, 1427 unsigned int val_num) 1428 { 1429 void *orig_work_buf; 1430 unsigned int win_offset; 1431 unsigned int win_page; 1432 bool page_chg; 1433 int ret; 1434 1435 win_offset = (*reg - range->range_min) % range->window_len; 1436 win_page = (*reg - range->range_min) / range->window_len; 1437 1438 if (val_num > 1) { 1439 /* Bulk write shouldn't cross range boundary */ 1440 if (*reg + val_num - 1 > range->range_max) 1441 return -EINVAL; 1442 1443 /* ... or single page boundary */ 1444 if (val_num > range->window_len - win_offset) 1445 return -EINVAL; 1446 } 1447 1448 /* It is possible to have selector register inside data window. 1449 In that case, selector register is located on every page and 1450 it needs no page switching, when accessed alone. */ 1451 if (val_num > 1 || 1452 range->window_start + win_offset != range->selector_reg) { 1453 /* Use separate work_buf during page switching */ 1454 orig_work_buf = map->work_buf; 1455 map->work_buf = map->selector_work_buf; 1456 1457 ret = _regmap_update_bits(map, range->selector_reg, 1458 range->selector_mask, 1459 win_page << range->selector_shift, 1460 &page_chg, false); 1461 1462 map->work_buf = orig_work_buf; 1463 1464 if (ret != 0) 1465 return ret; 1466 } 1467 1468 *reg = range->window_start + win_offset; 1469 1470 return 0; 1471 } 1472 1473 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1474 unsigned long mask) 1475 { 1476 u8 *buf; 1477 int i; 1478 1479 if (!mask || !map->work_buf) 1480 return; 1481 1482 buf = map->work_buf; 1483 1484 for (i = 0; i < max_bytes; i++) 1485 buf[i] |= (mask >> (8 * i)) & 0xff; 1486 } 1487 1488 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1489 const void *val, size_t val_len, bool noinc) 1490 { 1491 struct regmap_range_node *range; 1492 unsigned long flags; 1493 void *work_val = map->work_buf + map->format.reg_bytes + 1494 map->format.pad_bytes; 1495 void *buf; 1496 int ret = -ENOTSUPP; 1497 size_t len; 1498 int i; 1499 1500 WARN_ON(!map->bus); 1501 1502 /* Check for unwritable or noinc registers in range 1503 * before we start 1504 */ 1505 if (!regmap_writeable_noinc(map, reg)) { 1506 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1507 unsigned int element = 1508 reg + regmap_get_offset(map, i); 1509 if (!regmap_writeable(map, element) || 1510 regmap_writeable_noinc(map, element)) 1511 return -EINVAL; 1512 } 1513 } 1514 1515 if (!map->cache_bypass && map->format.parse_val) { 1516 unsigned int ival; 1517 int val_bytes = map->format.val_bytes; 1518 for (i = 0; i < val_len / val_bytes; i++) { 1519 ival = map->format.parse_val(val + (i * val_bytes)); 1520 ret = regcache_write(map, 1521 reg + regmap_get_offset(map, i), 1522 ival); 1523 if (ret) { 1524 dev_err(map->dev, 1525 "Error in caching of register: %x ret: %d\n", 1526 reg + i, ret); 1527 return ret; 1528 } 1529 } 1530 if (map->cache_only) { 1531 map->cache_dirty = true; 1532 return 0; 1533 } 1534 } 1535 1536 range = _regmap_range_lookup(map, reg); 1537 if (range) { 1538 int val_num = val_len / map->format.val_bytes; 1539 int win_offset = (reg - range->range_min) % range->window_len; 1540 int win_residue = range->window_len - win_offset; 1541 1542 /* If the write goes beyond the end of the window split it */ 1543 while (val_num > win_residue) { 1544 dev_dbg(map->dev, "Writing window %d/%zu\n", 1545 win_residue, val_len / map->format.val_bytes); 1546 ret = _regmap_raw_write_impl(map, reg, val, 1547 win_residue * 1548 map->format.val_bytes, noinc); 1549 if (ret != 0) 1550 return ret; 1551 1552 reg += win_residue; 1553 val_num -= win_residue; 1554 val += win_residue * map->format.val_bytes; 1555 val_len -= win_residue * map->format.val_bytes; 1556 1557 win_offset = (reg - range->range_min) % 1558 range->window_len; 1559 win_residue = range->window_len - win_offset; 1560 } 1561 1562 ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); 1563 if (ret != 0) 1564 return ret; 1565 } 1566 1567 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1568 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1569 map->write_flag_mask); 1570 1571 /* 1572 * Essentially all I/O mechanisms will be faster with a single 1573 * buffer to write. Since register syncs often generate raw 1574 * writes of single registers optimise that case. 1575 */ 1576 if (val != work_val && val_len == map->format.val_bytes) { 1577 memcpy(work_val, val, map->format.val_bytes); 1578 val = work_val; 1579 } 1580 1581 if (map->async && map->bus->async_write) { 1582 struct regmap_async *async; 1583 1584 trace_regmap_async_write_start(map, reg, val_len); 1585 1586 spin_lock_irqsave(&map->async_lock, flags); 1587 async = list_first_entry_or_null(&map->async_free, 1588 struct regmap_async, 1589 list); 1590 if (async) 1591 list_del(&async->list); 1592 spin_unlock_irqrestore(&map->async_lock, flags); 1593 1594 if (!async) { 1595 async = map->bus->async_alloc(); 1596 if (!async) 1597 return -ENOMEM; 1598 1599 async->work_buf = kzalloc(map->format.buf_size, 1600 GFP_KERNEL | GFP_DMA); 1601 if (!async->work_buf) { 1602 kfree(async); 1603 return -ENOMEM; 1604 } 1605 } 1606 1607 async->map = map; 1608 1609 /* If the caller supplied the value we can use it safely. */ 1610 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1611 map->format.reg_bytes + map->format.val_bytes); 1612 1613 spin_lock_irqsave(&map->async_lock, flags); 1614 list_add_tail(&async->list, &map->async_list); 1615 spin_unlock_irqrestore(&map->async_lock, flags); 1616 1617 if (val != work_val) 1618 ret = map->bus->async_write(map->bus_context, 1619 async->work_buf, 1620 map->format.reg_bytes + 1621 map->format.pad_bytes, 1622 val, val_len, async); 1623 else 1624 ret = map->bus->async_write(map->bus_context, 1625 async->work_buf, 1626 map->format.reg_bytes + 1627 map->format.pad_bytes + 1628 val_len, NULL, 0, async); 1629 1630 if (ret != 0) { 1631 dev_err(map->dev, "Failed to schedule write: %d\n", 1632 ret); 1633 1634 spin_lock_irqsave(&map->async_lock, flags); 1635 list_move(&async->list, &map->async_free); 1636 spin_unlock_irqrestore(&map->async_lock, flags); 1637 } 1638 1639 return ret; 1640 } 1641 1642 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1643 1644 /* If we're doing a single register write we can probably just 1645 * send the work_buf directly, otherwise try to do a gather 1646 * write. 1647 */ 1648 if (val == work_val) 1649 ret = map->bus->write(map->bus_context, map->work_buf, 1650 map->format.reg_bytes + 1651 map->format.pad_bytes + 1652 val_len); 1653 else if (map->bus->gather_write) 1654 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1655 map->format.reg_bytes + 1656 map->format.pad_bytes, 1657 val, val_len); 1658 else 1659 ret = -ENOTSUPP; 1660 1661 /* If that didn't work fall back on linearising by hand. */ 1662 if (ret == -ENOTSUPP) { 1663 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1664 buf = kzalloc(len, GFP_KERNEL); 1665 if (!buf) 1666 return -ENOMEM; 1667 1668 memcpy(buf, map->work_buf, map->format.reg_bytes); 1669 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1670 val, val_len); 1671 ret = map->bus->write(map->bus_context, buf, len); 1672 1673 kfree(buf); 1674 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1675 /* regcache_drop_region() takes lock that we already have, 1676 * thus call map->cache_ops->drop() directly 1677 */ 1678 if (map->cache_ops && map->cache_ops->drop) 1679 map->cache_ops->drop(map, reg, reg + 1); 1680 } 1681 1682 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1683 1684 return ret; 1685 } 1686 1687 /** 1688 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1689 * 1690 * @map: Map to check. 1691 */ 1692 bool regmap_can_raw_write(struct regmap *map) 1693 { 1694 return map->bus && map->bus->write && map->format.format_val && 1695 map->format.format_reg; 1696 } 1697 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1698 1699 /** 1700 * regmap_get_raw_read_max - Get the maximum size we can read 1701 * 1702 * @map: Map to check. 1703 */ 1704 size_t regmap_get_raw_read_max(struct regmap *map) 1705 { 1706 return map->max_raw_read; 1707 } 1708 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1709 1710 /** 1711 * regmap_get_raw_write_max - Get the maximum size we can read 1712 * 1713 * @map: Map to check. 1714 */ 1715 size_t regmap_get_raw_write_max(struct regmap *map) 1716 { 1717 return map->max_raw_write; 1718 } 1719 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1720 1721 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1722 unsigned int val) 1723 { 1724 int ret; 1725 struct regmap_range_node *range; 1726 struct regmap *map = context; 1727 1728 WARN_ON(!map->bus || !map->format.format_write); 1729 1730 range = _regmap_range_lookup(map, reg); 1731 if (range) { 1732 ret = _regmap_select_page(map, ®, range, 1); 1733 if (ret != 0) 1734 return ret; 1735 } 1736 1737 map->format.format_write(map, reg, val); 1738 1739 trace_regmap_hw_write_start(map, reg, 1); 1740 1741 ret = map->bus->write(map->bus_context, map->work_buf, 1742 map->format.buf_size); 1743 1744 trace_regmap_hw_write_done(map, reg, 1); 1745 1746 return ret; 1747 } 1748 1749 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1750 unsigned int val) 1751 { 1752 struct regmap *map = context; 1753 1754 return map->bus->reg_write(map->bus_context, reg, val); 1755 } 1756 1757 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1758 unsigned int val) 1759 { 1760 struct regmap *map = context; 1761 1762 WARN_ON(!map->bus || !map->format.format_val); 1763 1764 map->format.format_val(map->work_buf + map->format.reg_bytes 1765 + map->format.pad_bytes, val, 0); 1766 return _regmap_raw_write_impl(map, reg, 1767 map->work_buf + 1768 map->format.reg_bytes + 1769 map->format.pad_bytes, 1770 map->format.val_bytes, 1771 false); 1772 } 1773 1774 static inline void *_regmap_map_get_context(struct regmap *map) 1775 { 1776 return (map->bus) ? map : map->bus_context; 1777 } 1778 1779 int _regmap_write(struct regmap *map, unsigned int reg, 1780 unsigned int val) 1781 { 1782 int ret; 1783 void *context = _regmap_map_get_context(map); 1784 1785 if (!regmap_writeable(map, reg)) 1786 return -EIO; 1787 1788 if (!map->cache_bypass && !map->defer_caching) { 1789 ret = regcache_write(map, reg, val); 1790 if (ret != 0) 1791 return ret; 1792 if (map->cache_only) { 1793 map->cache_dirty = true; 1794 return 0; 1795 } 1796 } 1797 1798 if (regmap_should_log(map)) 1799 dev_info(map->dev, "%x <= %x\n", reg, val); 1800 1801 trace_regmap_reg_write(map, reg, val); 1802 1803 return map->reg_write(context, reg, val); 1804 } 1805 1806 /** 1807 * regmap_write() - Write a value to a single register 1808 * 1809 * @map: Register map to write to 1810 * @reg: Register to write to 1811 * @val: Value to be written 1812 * 1813 * A value of zero will be returned on success, a negative errno will 1814 * be returned in error cases. 1815 */ 1816 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1817 { 1818 int ret; 1819 1820 if (!IS_ALIGNED(reg, map->reg_stride)) 1821 return -EINVAL; 1822 1823 map->lock(map->lock_arg); 1824 1825 ret = _regmap_write(map, reg, val); 1826 1827 map->unlock(map->lock_arg); 1828 1829 return ret; 1830 } 1831 EXPORT_SYMBOL_GPL(regmap_write); 1832 1833 /** 1834 * regmap_write_async() - Write a value to a single register asynchronously 1835 * 1836 * @map: Register map to write to 1837 * @reg: Register to write to 1838 * @val: Value to be written 1839 * 1840 * A value of zero will be returned on success, a negative errno will 1841 * be returned in error cases. 1842 */ 1843 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1844 { 1845 int ret; 1846 1847 if (!IS_ALIGNED(reg, map->reg_stride)) 1848 return -EINVAL; 1849 1850 map->lock(map->lock_arg); 1851 1852 map->async = true; 1853 1854 ret = _regmap_write(map, reg, val); 1855 1856 map->async = false; 1857 1858 map->unlock(map->lock_arg); 1859 1860 return ret; 1861 } 1862 EXPORT_SYMBOL_GPL(regmap_write_async); 1863 1864 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1865 const void *val, size_t val_len, bool noinc) 1866 { 1867 size_t val_bytes = map->format.val_bytes; 1868 size_t val_count = val_len / val_bytes; 1869 size_t chunk_count, chunk_bytes; 1870 size_t chunk_regs = val_count; 1871 int ret, i; 1872 1873 if (!val_count) 1874 return -EINVAL; 1875 1876 if (map->use_single_write) 1877 chunk_regs = 1; 1878 else if (map->max_raw_write && val_len > map->max_raw_write) 1879 chunk_regs = map->max_raw_write / val_bytes; 1880 1881 chunk_count = val_count / chunk_regs; 1882 chunk_bytes = chunk_regs * val_bytes; 1883 1884 /* Write as many bytes as possible with chunk_size */ 1885 for (i = 0; i < chunk_count; i++) { 1886 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); 1887 if (ret) 1888 return ret; 1889 1890 reg += regmap_get_offset(map, chunk_regs); 1891 val += chunk_bytes; 1892 val_len -= chunk_bytes; 1893 } 1894 1895 /* Write remaining bytes */ 1896 if (val_len) 1897 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); 1898 1899 return ret; 1900 } 1901 1902 /** 1903 * regmap_raw_write() - Write raw values to one or more registers 1904 * 1905 * @map: Register map to write to 1906 * @reg: Initial register to write to 1907 * @val: Block of data to be written, laid out for direct transmission to the 1908 * device 1909 * @val_len: Length of data pointed to by val. 1910 * 1911 * This function is intended to be used for things like firmware 1912 * download where a large block of data needs to be transferred to the 1913 * device. No formatting will be done on the data provided. 1914 * 1915 * A value of zero will be returned on success, a negative errno will 1916 * be returned in error cases. 1917 */ 1918 int regmap_raw_write(struct regmap *map, unsigned int reg, 1919 const void *val, size_t val_len) 1920 { 1921 int ret; 1922 1923 if (!regmap_can_raw_write(map)) 1924 return -EINVAL; 1925 if (val_len % map->format.val_bytes) 1926 return -EINVAL; 1927 1928 map->lock(map->lock_arg); 1929 1930 ret = _regmap_raw_write(map, reg, val, val_len, false); 1931 1932 map->unlock(map->lock_arg); 1933 1934 return ret; 1935 } 1936 EXPORT_SYMBOL_GPL(regmap_raw_write); 1937 1938 /** 1939 * regmap_noinc_write(): Write data from a register without incrementing the 1940 * register number 1941 * 1942 * @map: Register map to write to 1943 * @reg: Register to write to 1944 * @val: Pointer to data buffer 1945 * @val_len: Length of output buffer in bytes. 1946 * 1947 * The regmap API usually assumes that bulk bus write operations will write a 1948 * range of registers. Some devices have certain registers for which a write 1949 * operation can write to an internal FIFO. 1950 * 1951 * The target register must be volatile but registers after it can be 1952 * completely unrelated cacheable registers. 1953 * 1954 * This will attempt multiple writes as required to write val_len bytes. 1955 * 1956 * A value of zero will be returned on success, a negative errno will be 1957 * returned in error cases. 1958 */ 1959 int regmap_noinc_write(struct regmap *map, unsigned int reg, 1960 const void *val, size_t val_len) 1961 { 1962 size_t write_len; 1963 int ret; 1964 1965 if (!map->bus) 1966 return -EINVAL; 1967 if (!map->bus->write) 1968 return -ENOTSUPP; 1969 if (val_len % map->format.val_bytes) 1970 return -EINVAL; 1971 if (!IS_ALIGNED(reg, map->reg_stride)) 1972 return -EINVAL; 1973 if (val_len == 0) 1974 return -EINVAL; 1975 1976 map->lock(map->lock_arg); 1977 1978 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 1979 ret = -EINVAL; 1980 goto out_unlock; 1981 } 1982 1983 while (val_len) { 1984 if (map->max_raw_write && map->max_raw_write < val_len) 1985 write_len = map->max_raw_write; 1986 else 1987 write_len = val_len; 1988 ret = _regmap_raw_write(map, reg, val, write_len, true); 1989 if (ret) 1990 goto out_unlock; 1991 val = ((u8 *)val) + write_len; 1992 val_len -= write_len; 1993 } 1994 1995 out_unlock: 1996 map->unlock(map->lock_arg); 1997 return ret; 1998 } 1999 EXPORT_SYMBOL_GPL(regmap_noinc_write); 2000 2001 /** 2002 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 2003 * register field. 2004 * 2005 * @field: Register field to write to 2006 * @mask: Bitmask to change 2007 * @val: Value to be written 2008 * @change: Boolean indicating if a write was done 2009 * @async: Boolean indicating asynchronously 2010 * @force: Boolean indicating use force update 2011 * 2012 * Perform a read/modify/write cycle on the register field with change, 2013 * async, force option. 2014 * 2015 * A value of zero will be returned on success, a negative errno will 2016 * be returned in error cases. 2017 */ 2018 int regmap_field_update_bits_base(struct regmap_field *field, 2019 unsigned int mask, unsigned int val, 2020 bool *change, bool async, bool force) 2021 { 2022 mask = (mask << field->shift) & field->mask; 2023 2024 return regmap_update_bits_base(field->regmap, field->reg, 2025 mask, val << field->shift, 2026 change, async, force); 2027 } 2028 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2029 2030 /** 2031 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2032 * register field with port ID 2033 * 2034 * @field: Register field to write to 2035 * @id: port ID 2036 * @mask: Bitmask to change 2037 * @val: Value to be written 2038 * @change: Boolean indicating if a write was done 2039 * @async: Boolean indicating asynchronously 2040 * @force: Boolean indicating use force update 2041 * 2042 * A value of zero will be returned on success, a negative errno will 2043 * be returned in error cases. 2044 */ 2045 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2046 unsigned int mask, unsigned int val, 2047 bool *change, bool async, bool force) 2048 { 2049 if (id >= field->id_size) 2050 return -EINVAL; 2051 2052 mask = (mask << field->shift) & field->mask; 2053 2054 return regmap_update_bits_base(field->regmap, 2055 field->reg + (field->id_offset * id), 2056 mask, val << field->shift, 2057 change, async, force); 2058 } 2059 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2060 2061 /** 2062 * regmap_bulk_write() - Write multiple registers to the device 2063 * 2064 * @map: Register map to write to 2065 * @reg: First register to be write from 2066 * @val: Block of data to be written, in native register size for device 2067 * @val_count: Number of registers to write 2068 * 2069 * This function is intended to be used for writing a large block of 2070 * data to the device either in single transfer or multiple transfer. 2071 * 2072 * A value of zero will be returned on success, a negative errno will 2073 * be returned in error cases. 2074 */ 2075 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2076 size_t val_count) 2077 { 2078 int ret = 0, i; 2079 size_t val_bytes = map->format.val_bytes; 2080 2081 if (!IS_ALIGNED(reg, map->reg_stride)) 2082 return -EINVAL; 2083 2084 /* 2085 * Some devices don't support bulk write, for them we have a series of 2086 * single write operations. 2087 */ 2088 if (!map->bus || !map->format.parse_inplace) { 2089 map->lock(map->lock_arg); 2090 for (i = 0; i < val_count; i++) { 2091 unsigned int ival; 2092 2093 switch (val_bytes) { 2094 case 1: 2095 ival = *(u8 *)(val + (i * val_bytes)); 2096 break; 2097 case 2: 2098 ival = *(u16 *)(val + (i * val_bytes)); 2099 break; 2100 case 4: 2101 ival = *(u32 *)(val + (i * val_bytes)); 2102 break; 2103 #ifdef CONFIG_64BIT 2104 case 8: 2105 ival = *(u64 *)(val + (i * val_bytes)); 2106 break; 2107 #endif 2108 default: 2109 ret = -EINVAL; 2110 goto out; 2111 } 2112 2113 ret = _regmap_write(map, 2114 reg + regmap_get_offset(map, i), 2115 ival); 2116 if (ret != 0) 2117 goto out; 2118 } 2119 out: 2120 map->unlock(map->lock_arg); 2121 } else { 2122 void *wval; 2123 2124 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2125 if (!wval) 2126 return -ENOMEM; 2127 2128 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2129 map->format.parse_inplace(wval + i); 2130 2131 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2132 2133 kfree(wval); 2134 } 2135 return ret; 2136 } 2137 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2138 2139 /* 2140 * _regmap_raw_multi_reg_write() 2141 * 2142 * the (register,newvalue) pairs in regs have not been formatted, but 2143 * they are all in the same page and have been changed to being page 2144 * relative. The page register has been written if that was necessary. 2145 */ 2146 static int _regmap_raw_multi_reg_write(struct regmap *map, 2147 const struct reg_sequence *regs, 2148 size_t num_regs) 2149 { 2150 int ret; 2151 void *buf; 2152 int i; 2153 u8 *u8; 2154 size_t val_bytes = map->format.val_bytes; 2155 size_t reg_bytes = map->format.reg_bytes; 2156 size_t pad_bytes = map->format.pad_bytes; 2157 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2158 size_t len = pair_size * num_regs; 2159 2160 if (!len) 2161 return -EINVAL; 2162 2163 buf = kzalloc(len, GFP_KERNEL); 2164 if (!buf) 2165 return -ENOMEM; 2166 2167 /* We have to linearise by hand. */ 2168 2169 u8 = buf; 2170 2171 for (i = 0; i < num_regs; i++) { 2172 unsigned int reg = regs[i].reg; 2173 unsigned int val = regs[i].def; 2174 trace_regmap_hw_write_start(map, reg, 1); 2175 map->format.format_reg(u8, reg, map->reg_shift); 2176 u8 += reg_bytes + pad_bytes; 2177 map->format.format_val(u8, val, 0); 2178 u8 += val_bytes; 2179 } 2180 u8 = buf; 2181 *u8 |= map->write_flag_mask; 2182 2183 ret = map->bus->write(map->bus_context, buf, len); 2184 2185 kfree(buf); 2186 2187 for (i = 0; i < num_regs; i++) { 2188 int reg = regs[i].reg; 2189 trace_regmap_hw_write_done(map, reg, 1); 2190 } 2191 return ret; 2192 } 2193 2194 static unsigned int _regmap_register_page(struct regmap *map, 2195 unsigned int reg, 2196 struct regmap_range_node *range) 2197 { 2198 unsigned int win_page = (reg - range->range_min) / range->window_len; 2199 2200 return win_page; 2201 } 2202 2203 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2204 struct reg_sequence *regs, 2205 size_t num_regs) 2206 { 2207 int ret; 2208 int i, n; 2209 struct reg_sequence *base; 2210 unsigned int this_page = 0; 2211 unsigned int page_change = 0; 2212 /* 2213 * the set of registers are not neccessarily in order, but 2214 * since the order of write must be preserved this algorithm 2215 * chops the set each time the page changes. This also applies 2216 * if there is a delay required at any point in the sequence. 2217 */ 2218 base = regs; 2219 for (i = 0, n = 0; i < num_regs; i++, n++) { 2220 unsigned int reg = regs[i].reg; 2221 struct regmap_range_node *range; 2222 2223 range = _regmap_range_lookup(map, reg); 2224 if (range) { 2225 unsigned int win_page = _regmap_register_page(map, reg, 2226 range); 2227 2228 if (i == 0) 2229 this_page = win_page; 2230 if (win_page != this_page) { 2231 this_page = win_page; 2232 page_change = 1; 2233 } 2234 } 2235 2236 /* If we have both a page change and a delay make sure to 2237 * write the regs and apply the delay before we change the 2238 * page. 2239 */ 2240 2241 if (page_change || regs[i].delay_us) { 2242 2243 /* For situations where the first write requires 2244 * a delay we need to make sure we don't call 2245 * raw_multi_reg_write with n=0 2246 * This can't occur with page breaks as we 2247 * never write on the first iteration 2248 */ 2249 if (regs[i].delay_us && i == 0) 2250 n = 1; 2251 2252 ret = _regmap_raw_multi_reg_write(map, base, n); 2253 if (ret != 0) 2254 return ret; 2255 2256 if (regs[i].delay_us) 2257 udelay(regs[i].delay_us); 2258 2259 base += n; 2260 n = 0; 2261 2262 if (page_change) { 2263 ret = _regmap_select_page(map, 2264 &base[n].reg, 2265 range, 1); 2266 if (ret != 0) 2267 return ret; 2268 2269 page_change = 0; 2270 } 2271 2272 } 2273 2274 } 2275 if (n > 0) 2276 return _regmap_raw_multi_reg_write(map, base, n); 2277 return 0; 2278 } 2279 2280 static int _regmap_multi_reg_write(struct regmap *map, 2281 const struct reg_sequence *regs, 2282 size_t num_regs) 2283 { 2284 int i; 2285 int ret; 2286 2287 if (!map->can_multi_write) { 2288 for (i = 0; i < num_regs; i++) { 2289 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2290 if (ret != 0) 2291 return ret; 2292 2293 if (regs[i].delay_us) 2294 udelay(regs[i].delay_us); 2295 } 2296 return 0; 2297 } 2298 2299 if (!map->format.parse_inplace) 2300 return -EINVAL; 2301 2302 if (map->writeable_reg) 2303 for (i = 0; i < num_regs; i++) { 2304 int reg = regs[i].reg; 2305 if (!map->writeable_reg(map->dev, reg)) 2306 return -EINVAL; 2307 if (!IS_ALIGNED(reg, map->reg_stride)) 2308 return -EINVAL; 2309 } 2310 2311 if (!map->cache_bypass) { 2312 for (i = 0; i < num_regs; i++) { 2313 unsigned int val = regs[i].def; 2314 unsigned int reg = regs[i].reg; 2315 ret = regcache_write(map, reg, val); 2316 if (ret) { 2317 dev_err(map->dev, 2318 "Error in caching of register: %x ret: %d\n", 2319 reg, ret); 2320 return ret; 2321 } 2322 } 2323 if (map->cache_only) { 2324 map->cache_dirty = true; 2325 return 0; 2326 } 2327 } 2328 2329 WARN_ON(!map->bus); 2330 2331 for (i = 0; i < num_regs; i++) { 2332 unsigned int reg = regs[i].reg; 2333 struct regmap_range_node *range; 2334 2335 /* Coalesce all the writes between a page break or a delay 2336 * in a sequence 2337 */ 2338 range = _regmap_range_lookup(map, reg); 2339 if (range || regs[i].delay_us) { 2340 size_t len = sizeof(struct reg_sequence)*num_regs; 2341 struct reg_sequence *base = kmemdup(regs, len, 2342 GFP_KERNEL); 2343 if (!base) 2344 return -ENOMEM; 2345 ret = _regmap_range_multi_paged_reg_write(map, base, 2346 num_regs); 2347 kfree(base); 2348 2349 return ret; 2350 } 2351 } 2352 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2353 } 2354 2355 /** 2356 * regmap_multi_reg_write() - Write multiple registers to the device 2357 * 2358 * @map: Register map to write to 2359 * @regs: Array of structures containing register,value to be written 2360 * @num_regs: Number of registers to write 2361 * 2362 * Write multiple registers to the device where the set of register, value 2363 * pairs are supplied in any order, possibly not all in a single range. 2364 * 2365 * The 'normal' block write mode will send ultimately send data on the 2366 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2367 * addressed. However, this alternative block multi write mode will send 2368 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2369 * must of course support the mode. 2370 * 2371 * A value of zero will be returned on success, a negative errno will be 2372 * returned in error cases. 2373 */ 2374 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2375 int num_regs) 2376 { 2377 int ret; 2378 2379 map->lock(map->lock_arg); 2380 2381 ret = _regmap_multi_reg_write(map, regs, num_regs); 2382 2383 map->unlock(map->lock_arg); 2384 2385 return ret; 2386 } 2387 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2388 2389 /** 2390 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2391 * device but not the cache 2392 * 2393 * @map: Register map to write to 2394 * @regs: Array of structures containing register,value to be written 2395 * @num_regs: Number of registers to write 2396 * 2397 * Write multiple registers to the device but not the cache where the set 2398 * of register are supplied in any order. 2399 * 2400 * This function is intended to be used for writing a large block of data 2401 * atomically to the device in single transfer for those I2C client devices 2402 * that implement this alternative block write mode. 2403 * 2404 * A value of zero will be returned on success, a negative errno will 2405 * be returned in error cases. 2406 */ 2407 int regmap_multi_reg_write_bypassed(struct regmap *map, 2408 const struct reg_sequence *regs, 2409 int num_regs) 2410 { 2411 int ret; 2412 bool bypass; 2413 2414 map->lock(map->lock_arg); 2415 2416 bypass = map->cache_bypass; 2417 map->cache_bypass = true; 2418 2419 ret = _regmap_multi_reg_write(map, regs, num_regs); 2420 2421 map->cache_bypass = bypass; 2422 2423 map->unlock(map->lock_arg); 2424 2425 return ret; 2426 } 2427 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2428 2429 /** 2430 * regmap_raw_write_async() - Write raw values to one or more registers 2431 * asynchronously 2432 * 2433 * @map: Register map to write to 2434 * @reg: Initial register to write to 2435 * @val: Block of data to be written, laid out for direct transmission to the 2436 * device. Must be valid until regmap_async_complete() is called. 2437 * @val_len: Length of data pointed to by val. 2438 * 2439 * This function is intended to be used for things like firmware 2440 * download where a large block of data needs to be transferred to the 2441 * device. No formatting will be done on the data provided. 2442 * 2443 * If supported by the underlying bus the write will be scheduled 2444 * asynchronously, helping maximise I/O speed on higher speed buses 2445 * like SPI. regmap_async_complete() can be called to ensure that all 2446 * asynchrnous writes have been completed. 2447 * 2448 * A value of zero will be returned on success, a negative errno will 2449 * be returned in error cases. 2450 */ 2451 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2452 const void *val, size_t val_len) 2453 { 2454 int ret; 2455 2456 if (val_len % map->format.val_bytes) 2457 return -EINVAL; 2458 if (!IS_ALIGNED(reg, map->reg_stride)) 2459 return -EINVAL; 2460 2461 map->lock(map->lock_arg); 2462 2463 map->async = true; 2464 2465 ret = _regmap_raw_write(map, reg, val, val_len, false); 2466 2467 map->async = false; 2468 2469 map->unlock(map->lock_arg); 2470 2471 return ret; 2472 } 2473 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2474 2475 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2476 unsigned int val_len, bool noinc) 2477 { 2478 struct regmap_range_node *range; 2479 int ret; 2480 2481 WARN_ON(!map->bus); 2482 2483 if (!map->bus || !map->bus->read) 2484 return -EINVAL; 2485 2486 range = _regmap_range_lookup(map, reg); 2487 if (range) { 2488 ret = _regmap_select_page(map, ®, range, 2489 noinc ? 1 : val_len / map->format.val_bytes); 2490 if (ret != 0) 2491 return ret; 2492 } 2493 2494 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2495 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2496 map->read_flag_mask); 2497 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2498 2499 ret = map->bus->read(map->bus_context, map->work_buf, 2500 map->format.reg_bytes + map->format.pad_bytes, 2501 val, val_len); 2502 2503 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2504 2505 return ret; 2506 } 2507 2508 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2509 unsigned int *val) 2510 { 2511 struct regmap *map = context; 2512 2513 return map->bus->reg_read(map->bus_context, reg, val); 2514 } 2515 2516 static int _regmap_bus_read(void *context, unsigned int reg, 2517 unsigned int *val) 2518 { 2519 int ret; 2520 struct regmap *map = context; 2521 void *work_val = map->work_buf + map->format.reg_bytes + 2522 map->format.pad_bytes; 2523 2524 if (!map->format.parse_val) 2525 return -EINVAL; 2526 2527 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); 2528 if (ret == 0) 2529 *val = map->format.parse_val(work_val); 2530 2531 return ret; 2532 } 2533 2534 static int _regmap_read(struct regmap *map, unsigned int reg, 2535 unsigned int *val) 2536 { 2537 int ret; 2538 void *context = _regmap_map_get_context(map); 2539 2540 if (!map->cache_bypass) { 2541 ret = regcache_read(map, reg, val); 2542 if (ret == 0) 2543 return 0; 2544 } 2545 2546 if (map->cache_only) 2547 return -EBUSY; 2548 2549 if (!regmap_readable(map, reg)) 2550 return -EIO; 2551 2552 ret = map->reg_read(context, reg, val); 2553 if (ret == 0) { 2554 if (regmap_should_log(map)) 2555 dev_info(map->dev, "%x => %x\n", reg, *val); 2556 2557 trace_regmap_reg_read(map, reg, *val); 2558 2559 if (!map->cache_bypass) 2560 regcache_write(map, reg, *val); 2561 } 2562 2563 return ret; 2564 } 2565 2566 /** 2567 * regmap_read() - Read a value from a single register 2568 * 2569 * @map: Register map to read from 2570 * @reg: Register to be read from 2571 * @val: Pointer to store read value 2572 * 2573 * A value of zero will be returned on success, a negative errno will 2574 * be returned in error cases. 2575 */ 2576 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2577 { 2578 int ret; 2579 2580 if (!IS_ALIGNED(reg, map->reg_stride)) 2581 return -EINVAL; 2582 2583 map->lock(map->lock_arg); 2584 2585 ret = _regmap_read(map, reg, val); 2586 2587 map->unlock(map->lock_arg); 2588 2589 return ret; 2590 } 2591 EXPORT_SYMBOL_GPL(regmap_read); 2592 2593 /** 2594 * regmap_raw_read() - Read raw data from the device 2595 * 2596 * @map: Register map to read from 2597 * @reg: First register to be read from 2598 * @val: Pointer to store read value 2599 * @val_len: Size of data to read 2600 * 2601 * A value of zero will be returned on success, a negative errno will 2602 * be returned in error cases. 2603 */ 2604 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2605 size_t val_len) 2606 { 2607 size_t val_bytes = map->format.val_bytes; 2608 size_t val_count = val_len / val_bytes; 2609 unsigned int v; 2610 int ret, i; 2611 2612 if (!map->bus) 2613 return -EINVAL; 2614 if (val_len % map->format.val_bytes) 2615 return -EINVAL; 2616 if (!IS_ALIGNED(reg, map->reg_stride)) 2617 return -EINVAL; 2618 if (val_count == 0) 2619 return -EINVAL; 2620 2621 map->lock(map->lock_arg); 2622 2623 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2624 map->cache_type == REGCACHE_NONE) { 2625 size_t chunk_count, chunk_bytes; 2626 size_t chunk_regs = val_count; 2627 2628 if (!map->bus->read) { 2629 ret = -ENOTSUPP; 2630 goto out; 2631 } 2632 2633 if (map->use_single_read) 2634 chunk_regs = 1; 2635 else if (map->max_raw_read && val_len > map->max_raw_read) 2636 chunk_regs = map->max_raw_read / val_bytes; 2637 2638 chunk_count = val_count / chunk_regs; 2639 chunk_bytes = chunk_regs * val_bytes; 2640 2641 /* Read bytes that fit into whole chunks */ 2642 for (i = 0; i < chunk_count; i++) { 2643 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); 2644 if (ret != 0) 2645 goto out; 2646 2647 reg += regmap_get_offset(map, chunk_regs); 2648 val += chunk_bytes; 2649 val_len -= chunk_bytes; 2650 } 2651 2652 /* Read remaining bytes */ 2653 if (val_len) { 2654 ret = _regmap_raw_read(map, reg, val, val_len, false); 2655 if (ret != 0) 2656 goto out; 2657 } 2658 } else { 2659 /* Otherwise go word by word for the cache; should be low 2660 * cost as we expect to hit the cache. 2661 */ 2662 for (i = 0; i < val_count; i++) { 2663 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2664 &v); 2665 if (ret != 0) 2666 goto out; 2667 2668 map->format.format_val(val + (i * val_bytes), v, 0); 2669 } 2670 } 2671 2672 out: 2673 map->unlock(map->lock_arg); 2674 2675 return ret; 2676 } 2677 EXPORT_SYMBOL_GPL(regmap_raw_read); 2678 2679 /** 2680 * regmap_noinc_read(): Read data from a register without incrementing the 2681 * register number 2682 * 2683 * @map: Register map to read from 2684 * @reg: Register to read from 2685 * @val: Pointer to data buffer 2686 * @val_len: Length of output buffer in bytes. 2687 * 2688 * The regmap API usually assumes that bulk bus read operations will read a 2689 * range of registers. Some devices have certain registers for which a read 2690 * operation read will read from an internal FIFO. 2691 * 2692 * The target register must be volatile but registers after it can be 2693 * completely unrelated cacheable registers. 2694 * 2695 * This will attempt multiple reads as required to read val_len bytes. 2696 * 2697 * A value of zero will be returned on success, a negative errno will be 2698 * returned in error cases. 2699 */ 2700 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2701 void *val, size_t val_len) 2702 { 2703 size_t read_len; 2704 int ret; 2705 2706 if (!map->bus) 2707 return -EINVAL; 2708 if (!map->bus->read) 2709 return -ENOTSUPP; 2710 if (val_len % map->format.val_bytes) 2711 return -EINVAL; 2712 if (!IS_ALIGNED(reg, map->reg_stride)) 2713 return -EINVAL; 2714 if (val_len == 0) 2715 return -EINVAL; 2716 2717 map->lock(map->lock_arg); 2718 2719 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2720 ret = -EINVAL; 2721 goto out_unlock; 2722 } 2723 2724 while (val_len) { 2725 if (map->max_raw_read && map->max_raw_read < val_len) 2726 read_len = map->max_raw_read; 2727 else 2728 read_len = val_len; 2729 ret = _regmap_raw_read(map, reg, val, read_len, true); 2730 if (ret) 2731 goto out_unlock; 2732 val = ((u8 *)val) + read_len; 2733 val_len -= read_len; 2734 } 2735 2736 out_unlock: 2737 map->unlock(map->lock_arg); 2738 return ret; 2739 } 2740 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2741 2742 /** 2743 * regmap_field_read(): Read a value to a single register field 2744 * 2745 * @field: Register field to read from 2746 * @val: Pointer to store read value 2747 * 2748 * A value of zero will be returned on success, a negative errno will 2749 * be returned in error cases. 2750 */ 2751 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2752 { 2753 int ret; 2754 unsigned int reg_val; 2755 ret = regmap_read(field->regmap, field->reg, ®_val); 2756 if (ret != 0) 2757 return ret; 2758 2759 reg_val &= field->mask; 2760 reg_val >>= field->shift; 2761 *val = reg_val; 2762 2763 return ret; 2764 } 2765 EXPORT_SYMBOL_GPL(regmap_field_read); 2766 2767 /** 2768 * regmap_fields_read() - Read a value to a single register field with port ID 2769 * 2770 * @field: Register field to read from 2771 * @id: port ID 2772 * @val: Pointer to store read value 2773 * 2774 * A value of zero will be returned on success, a negative errno will 2775 * be returned in error cases. 2776 */ 2777 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2778 unsigned int *val) 2779 { 2780 int ret; 2781 unsigned int reg_val; 2782 2783 if (id >= field->id_size) 2784 return -EINVAL; 2785 2786 ret = regmap_read(field->regmap, 2787 field->reg + (field->id_offset * id), 2788 ®_val); 2789 if (ret != 0) 2790 return ret; 2791 2792 reg_val &= field->mask; 2793 reg_val >>= field->shift; 2794 *val = reg_val; 2795 2796 return ret; 2797 } 2798 EXPORT_SYMBOL_GPL(regmap_fields_read); 2799 2800 /** 2801 * regmap_bulk_read() - Read multiple registers from the device 2802 * 2803 * @map: Register map to read from 2804 * @reg: First register to be read from 2805 * @val: Pointer to store read value, in native register size for device 2806 * @val_count: Number of registers to read 2807 * 2808 * A value of zero will be returned on success, a negative errno will 2809 * be returned in error cases. 2810 */ 2811 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2812 size_t val_count) 2813 { 2814 int ret, i; 2815 size_t val_bytes = map->format.val_bytes; 2816 bool vol = regmap_volatile_range(map, reg, val_count); 2817 2818 if (!IS_ALIGNED(reg, map->reg_stride)) 2819 return -EINVAL; 2820 if (val_count == 0) 2821 return -EINVAL; 2822 2823 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2824 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2825 if (ret != 0) 2826 return ret; 2827 2828 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2829 map->format.parse_inplace(val + i); 2830 } else { 2831 #ifdef CONFIG_64BIT 2832 u64 *u64 = val; 2833 #endif 2834 u32 *u32 = val; 2835 u16 *u16 = val; 2836 u8 *u8 = val; 2837 2838 map->lock(map->lock_arg); 2839 2840 for (i = 0; i < val_count; i++) { 2841 unsigned int ival; 2842 2843 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2844 &ival); 2845 if (ret != 0) 2846 goto out; 2847 2848 switch (map->format.val_bytes) { 2849 #ifdef CONFIG_64BIT 2850 case 8: 2851 u64[i] = ival; 2852 break; 2853 #endif 2854 case 4: 2855 u32[i] = ival; 2856 break; 2857 case 2: 2858 u16[i] = ival; 2859 break; 2860 case 1: 2861 u8[i] = ival; 2862 break; 2863 default: 2864 ret = -EINVAL; 2865 goto out; 2866 } 2867 } 2868 2869 out: 2870 map->unlock(map->lock_arg); 2871 } 2872 2873 return ret; 2874 } 2875 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2876 2877 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2878 unsigned int mask, unsigned int val, 2879 bool *change, bool force_write) 2880 { 2881 int ret; 2882 unsigned int tmp, orig; 2883 2884 if (change) 2885 *change = false; 2886 2887 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2888 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2889 if (ret == 0 && change) 2890 *change = true; 2891 } else { 2892 ret = _regmap_read(map, reg, &orig); 2893 if (ret != 0) 2894 return ret; 2895 2896 tmp = orig & ~mask; 2897 tmp |= val & mask; 2898 2899 if (force_write || (tmp != orig)) { 2900 ret = _regmap_write(map, reg, tmp); 2901 if (ret == 0 && change) 2902 *change = true; 2903 } 2904 } 2905 2906 return ret; 2907 } 2908 2909 /** 2910 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2911 * 2912 * @map: Register map to update 2913 * @reg: Register to update 2914 * @mask: Bitmask to change 2915 * @val: New value for bitmask 2916 * @change: Boolean indicating if a write was done 2917 * @async: Boolean indicating asynchronously 2918 * @force: Boolean indicating use force update 2919 * 2920 * Perform a read/modify/write cycle on a register map with change, async, force 2921 * options. 2922 * 2923 * If async is true: 2924 * 2925 * With most buses the read must be done synchronously so this is most useful 2926 * for devices with a cache which do not need to interact with the hardware to 2927 * determine the current register value. 2928 * 2929 * Returns zero for success, a negative number on error. 2930 */ 2931 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2932 unsigned int mask, unsigned int val, 2933 bool *change, bool async, bool force) 2934 { 2935 int ret; 2936 2937 map->lock(map->lock_arg); 2938 2939 map->async = async; 2940 2941 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2942 2943 map->async = false; 2944 2945 map->unlock(map->lock_arg); 2946 2947 return ret; 2948 } 2949 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2950 2951 /** 2952 * regmap_test_bits() - Check if all specified bits are set in a register. 2953 * 2954 * @map: Register map to operate on 2955 * @reg: Register to read from 2956 * @bits: Bits to test 2957 * 2958 * Returns 0 if at least one of the tested bits is not set, 1 if all tested 2959 * bits are set and a negative error number if the underlying regmap_read() 2960 * fails. 2961 */ 2962 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 2963 { 2964 unsigned int val, ret; 2965 2966 ret = regmap_read(map, reg, &val); 2967 if (ret) 2968 return ret; 2969 2970 return (val & bits) == bits; 2971 } 2972 EXPORT_SYMBOL_GPL(regmap_test_bits); 2973 2974 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2975 { 2976 struct regmap *map = async->map; 2977 bool wake; 2978 2979 trace_regmap_async_io_complete(map); 2980 2981 spin_lock(&map->async_lock); 2982 list_move(&async->list, &map->async_free); 2983 wake = list_empty(&map->async_list); 2984 2985 if (ret != 0) 2986 map->async_ret = ret; 2987 2988 spin_unlock(&map->async_lock); 2989 2990 if (wake) 2991 wake_up(&map->async_waitq); 2992 } 2993 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2994 2995 static int regmap_async_is_done(struct regmap *map) 2996 { 2997 unsigned long flags; 2998 int ret; 2999 3000 spin_lock_irqsave(&map->async_lock, flags); 3001 ret = list_empty(&map->async_list); 3002 spin_unlock_irqrestore(&map->async_lock, flags); 3003 3004 return ret; 3005 } 3006 3007 /** 3008 * regmap_async_complete - Ensure all asynchronous I/O has completed. 3009 * 3010 * @map: Map to operate on. 3011 * 3012 * Blocks until any pending asynchronous I/O has completed. Returns 3013 * an error code for any failed I/O operations. 3014 */ 3015 int regmap_async_complete(struct regmap *map) 3016 { 3017 unsigned long flags; 3018 int ret; 3019 3020 /* Nothing to do with no async support */ 3021 if (!map->bus || !map->bus->async_write) 3022 return 0; 3023 3024 trace_regmap_async_complete_start(map); 3025 3026 wait_event(map->async_waitq, regmap_async_is_done(map)); 3027 3028 spin_lock_irqsave(&map->async_lock, flags); 3029 ret = map->async_ret; 3030 map->async_ret = 0; 3031 spin_unlock_irqrestore(&map->async_lock, flags); 3032 3033 trace_regmap_async_complete_done(map); 3034 3035 return ret; 3036 } 3037 EXPORT_SYMBOL_GPL(regmap_async_complete); 3038 3039 /** 3040 * regmap_register_patch - Register and apply register updates to be applied 3041 * on device initialistion 3042 * 3043 * @map: Register map to apply updates to. 3044 * @regs: Values to update. 3045 * @num_regs: Number of entries in regs. 3046 * 3047 * Register a set of register updates to be applied to the device 3048 * whenever the device registers are synchronised with the cache and 3049 * apply them immediately. Typically this is used to apply 3050 * corrections to be applied to the device defaults on startup, such 3051 * as the updates some vendors provide to undocumented registers. 3052 * 3053 * The caller must ensure that this function cannot be called 3054 * concurrently with either itself or regcache_sync(). 3055 */ 3056 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3057 int num_regs) 3058 { 3059 struct reg_sequence *p; 3060 int ret; 3061 bool bypass; 3062 3063 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3064 num_regs)) 3065 return 0; 3066 3067 p = krealloc(map->patch, 3068 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3069 GFP_KERNEL); 3070 if (p) { 3071 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3072 map->patch = p; 3073 map->patch_regs += num_regs; 3074 } else { 3075 return -ENOMEM; 3076 } 3077 3078 map->lock(map->lock_arg); 3079 3080 bypass = map->cache_bypass; 3081 3082 map->cache_bypass = true; 3083 map->async = true; 3084 3085 ret = _regmap_multi_reg_write(map, regs, num_regs); 3086 3087 map->async = false; 3088 map->cache_bypass = bypass; 3089 3090 map->unlock(map->lock_arg); 3091 3092 regmap_async_complete(map); 3093 3094 return ret; 3095 } 3096 EXPORT_SYMBOL_GPL(regmap_register_patch); 3097 3098 /** 3099 * regmap_get_val_bytes() - Report the size of a register value 3100 * 3101 * @map: Register map to operate on. 3102 * 3103 * Report the size of a register value, mainly intended to for use by 3104 * generic infrastructure built on top of regmap. 3105 */ 3106 int regmap_get_val_bytes(struct regmap *map) 3107 { 3108 if (map->format.format_write) 3109 return -EINVAL; 3110 3111 return map->format.val_bytes; 3112 } 3113 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3114 3115 /** 3116 * regmap_get_max_register() - Report the max register value 3117 * 3118 * @map: Register map to operate on. 3119 * 3120 * Report the max register value, mainly intended to for use by 3121 * generic infrastructure built on top of regmap. 3122 */ 3123 int regmap_get_max_register(struct regmap *map) 3124 { 3125 return map->max_register ? map->max_register : -EINVAL; 3126 } 3127 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3128 3129 /** 3130 * regmap_get_reg_stride() - Report the register address stride 3131 * 3132 * @map: Register map to operate on. 3133 * 3134 * Report the register address stride, mainly intended to for use by 3135 * generic infrastructure built on top of regmap. 3136 */ 3137 int regmap_get_reg_stride(struct regmap *map) 3138 { 3139 return map->reg_stride; 3140 } 3141 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3142 3143 int regmap_parse_val(struct regmap *map, const void *buf, 3144 unsigned int *val) 3145 { 3146 if (!map->format.parse_val) 3147 return -EINVAL; 3148 3149 *val = map->format.parse_val(buf); 3150 3151 return 0; 3152 } 3153 EXPORT_SYMBOL_GPL(regmap_parse_val); 3154 3155 static int __init regmap_initcall(void) 3156 { 3157 regmap_debugfs_initcall(); 3158 3159 return 0; 3160 } 3161 postcore_initcall(regmap_initcall); 3162