1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/property.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 #include <asm/unaligned.h> 21 22 #define CREATE_TRACE_POINTS 23 #include "trace.h" 24 25 #include "internal.h" 26 27 /* 28 * Sometimes for failures during very early init the trace 29 * infrastructure isn't available early enough to be used. For this 30 * sort of problem defining LOG_DEVICE will add printks for basic 31 * register I/O on a specific device. 32 */ 33 #undef LOG_DEVICE 34 35 #ifdef LOG_DEVICE 36 static inline bool regmap_should_log(struct regmap *map) 37 { 38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 39 } 40 #else 41 static inline bool regmap_should_log(struct regmap *map) { return false; } 42 #endif 43 44 45 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 46 unsigned int mask, unsigned int val, 47 bool *change, bool force_write); 48 49 static int _regmap_bus_reg_read(void *context, unsigned int reg, 50 unsigned int *val); 51 static int _regmap_bus_read(void *context, unsigned int reg, 52 unsigned int *val); 53 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 54 unsigned int val); 55 static int _regmap_bus_reg_write(void *context, unsigned int reg, 56 unsigned int val); 57 static int _regmap_bus_raw_write(void *context, unsigned int reg, 58 unsigned int val); 59 60 bool regmap_reg_in_ranges(unsigned int reg, 61 const struct regmap_range *ranges, 62 unsigned int nranges) 63 { 64 const struct regmap_range *r; 65 int i; 66 67 for (i = 0, r = ranges; i < nranges; i++, r++) 68 if (regmap_reg_in_range(reg, r)) 69 return true; 70 return false; 71 } 72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 73 74 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 75 const struct regmap_access_table *table) 76 { 77 /* Check "no ranges" first */ 78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 79 return false; 80 81 /* In case zero "yes ranges" are supplied, any reg is OK */ 82 if (!table->n_yes_ranges) 83 return true; 84 85 return regmap_reg_in_ranges(reg, table->yes_ranges, 86 table->n_yes_ranges); 87 } 88 EXPORT_SYMBOL_GPL(regmap_check_range_table); 89 90 bool regmap_writeable(struct regmap *map, unsigned int reg) 91 { 92 if (map->max_register && reg > map->max_register) 93 return false; 94 95 if (map->writeable_reg) 96 return map->writeable_reg(map->dev, reg); 97 98 if (map->wr_table) 99 return regmap_check_range_table(map, reg, map->wr_table); 100 101 return true; 102 } 103 104 bool regmap_cached(struct regmap *map, unsigned int reg) 105 { 106 int ret; 107 unsigned int val; 108 109 if (map->cache_type == REGCACHE_NONE) 110 return false; 111 112 if (!map->cache_ops) 113 return false; 114 115 if (map->max_register && reg > map->max_register) 116 return false; 117 118 map->lock(map->lock_arg); 119 ret = regcache_read(map, reg, &val); 120 map->unlock(map->lock_arg); 121 if (ret) 122 return false; 123 124 return true; 125 } 126 127 bool regmap_readable(struct regmap *map, unsigned int reg) 128 { 129 if (!map->reg_read) 130 return false; 131 132 if (map->max_register && reg > map->max_register) 133 return false; 134 135 if (map->format.format_write) 136 return false; 137 138 if (map->readable_reg) 139 return map->readable_reg(map->dev, reg); 140 141 if (map->rd_table) 142 return regmap_check_range_table(map, reg, map->rd_table); 143 144 return true; 145 } 146 147 bool regmap_volatile(struct regmap *map, unsigned int reg) 148 { 149 if (!map->format.format_write && !regmap_readable(map, reg)) 150 return false; 151 152 if (map->volatile_reg) 153 return map->volatile_reg(map->dev, reg); 154 155 if (map->volatile_table) 156 return regmap_check_range_table(map, reg, map->volatile_table); 157 158 if (map->cache_ops) 159 return false; 160 else 161 return true; 162 } 163 164 bool regmap_precious(struct regmap *map, unsigned int reg) 165 { 166 if (!regmap_readable(map, reg)) 167 return false; 168 169 if (map->precious_reg) 170 return map->precious_reg(map->dev, reg); 171 172 if (map->precious_table) 173 return regmap_check_range_table(map, reg, map->precious_table); 174 175 return false; 176 } 177 178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 179 { 180 if (map->writeable_noinc_reg) 181 return map->writeable_noinc_reg(map->dev, reg); 182 183 if (map->wr_noinc_table) 184 return regmap_check_range_table(map, reg, map->wr_noinc_table); 185 186 return true; 187 } 188 189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 190 { 191 if (map->readable_noinc_reg) 192 return map->readable_noinc_reg(map->dev, reg); 193 194 if (map->rd_noinc_table) 195 return regmap_check_range_table(map, reg, map->rd_noinc_table); 196 197 return true; 198 } 199 200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 201 size_t num) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num; i++) 206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 207 return false; 208 209 return true; 210 } 211 212 static void regmap_format_12_20_write(struct regmap *map, 213 unsigned int reg, unsigned int val) 214 { 215 u8 *out = map->work_buf; 216 217 out[0] = reg >> 4; 218 out[1] = (reg << 4) | (val >> 16); 219 out[2] = val >> 8; 220 out[3] = val; 221 } 222 223 224 static void regmap_format_2_6_write(struct regmap *map, 225 unsigned int reg, unsigned int val) 226 { 227 u8 *out = map->work_buf; 228 229 *out = (reg << 6) | val; 230 } 231 232 static void regmap_format_4_12_write(struct regmap *map, 233 unsigned int reg, unsigned int val) 234 { 235 __be16 *out = map->work_buf; 236 *out = cpu_to_be16((reg << 12) | val); 237 } 238 239 static void regmap_format_7_9_write(struct regmap *map, 240 unsigned int reg, unsigned int val) 241 { 242 __be16 *out = map->work_buf; 243 *out = cpu_to_be16((reg << 9) | val); 244 } 245 246 static void regmap_format_7_17_write(struct regmap *map, 247 unsigned int reg, unsigned int val) 248 { 249 u8 *out = map->work_buf; 250 251 out[2] = val; 252 out[1] = val >> 8; 253 out[0] = (val >> 16) | (reg << 1); 254 } 255 256 static void regmap_format_10_14_write(struct regmap *map, 257 unsigned int reg, unsigned int val) 258 { 259 u8 *out = map->work_buf; 260 261 out[2] = val; 262 out[1] = (val >> 8) | (reg << 6); 263 out[0] = reg >> 2; 264 } 265 266 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 267 { 268 u8 *b = buf; 269 270 b[0] = val << shift; 271 } 272 273 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 274 { 275 put_unaligned_be16(val << shift, buf); 276 } 277 278 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 279 { 280 put_unaligned_le16(val << shift, buf); 281 } 282 283 static void regmap_format_16_native(void *buf, unsigned int val, 284 unsigned int shift) 285 { 286 u16 v = val << shift; 287 288 memcpy(buf, &v, sizeof(v)); 289 } 290 291 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 292 { 293 u8 *b = buf; 294 295 val <<= shift; 296 297 b[0] = val >> 16; 298 b[1] = val >> 8; 299 b[2] = val; 300 } 301 302 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 303 { 304 put_unaligned_be32(val << shift, buf); 305 } 306 307 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 308 { 309 put_unaligned_le32(val << shift, buf); 310 } 311 312 static void regmap_format_32_native(void *buf, unsigned int val, 313 unsigned int shift) 314 { 315 u32 v = val << shift; 316 317 memcpy(buf, &v, sizeof(v)); 318 } 319 320 #ifdef CONFIG_64BIT 321 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 322 { 323 put_unaligned_be64((u64) val << shift, buf); 324 } 325 326 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 327 { 328 put_unaligned_le64((u64) val << shift, buf); 329 } 330 331 static void regmap_format_64_native(void *buf, unsigned int val, 332 unsigned int shift) 333 { 334 u64 v = (u64) val << shift; 335 336 memcpy(buf, &v, sizeof(v)); 337 } 338 #endif 339 340 static void regmap_parse_inplace_noop(void *buf) 341 { 342 } 343 344 static unsigned int regmap_parse_8(const void *buf) 345 { 346 const u8 *b = buf; 347 348 return b[0]; 349 } 350 351 static unsigned int regmap_parse_16_be(const void *buf) 352 { 353 return get_unaligned_be16(buf); 354 } 355 356 static unsigned int regmap_parse_16_le(const void *buf) 357 { 358 return get_unaligned_le16(buf); 359 } 360 361 static void regmap_parse_16_be_inplace(void *buf) 362 { 363 u16 v = get_unaligned_be16(buf); 364 365 memcpy(buf, &v, sizeof(v)); 366 } 367 368 static void regmap_parse_16_le_inplace(void *buf) 369 { 370 u16 v = get_unaligned_le16(buf); 371 372 memcpy(buf, &v, sizeof(v)); 373 } 374 375 static unsigned int regmap_parse_16_native(const void *buf) 376 { 377 u16 v; 378 379 memcpy(&v, buf, sizeof(v)); 380 return v; 381 } 382 383 static unsigned int regmap_parse_24(const void *buf) 384 { 385 const u8 *b = buf; 386 unsigned int ret = b[2]; 387 ret |= ((unsigned int)b[1]) << 8; 388 ret |= ((unsigned int)b[0]) << 16; 389 390 return ret; 391 } 392 393 static unsigned int regmap_parse_32_be(const void *buf) 394 { 395 return get_unaligned_be32(buf); 396 } 397 398 static unsigned int regmap_parse_32_le(const void *buf) 399 { 400 return get_unaligned_le32(buf); 401 } 402 403 static void regmap_parse_32_be_inplace(void *buf) 404 { 405 u32 v = get_unaligned_be32(buf); 406 407 memcpy(buf, &v, sizeof(v)); 408 } 409 410 static void regmap_parse_32_le_inplace(void *buf) 411 { 412 u32 v = get_unaligned_le32(buf); 413 414 memcpy(buf, &v, sizeof(v)); 415 } 416 417 static unsigned int regmap_parse_32_native(const void *buf) 418 { 419 u32 v; 420 421 memcpy(&v, buf, sizeof(v)); 422 return v; 423 } 424 425 #ifdef CONFIG_64BIT 426 static unsigned int regmap_parse_64_be(const void *buf) 427 { 428 return get_unaligned_be64(buf); 429 } 430 431 static unsigned int regmap_parse_64_le(const void *buf) 432 { 433 return get_unaligned_le64(buf); 434 } 435 436 static void regmap_parse_64_be_inplace(void *buf) 437 { 438 u64 v = get_unaligned_be64(buf); 439 440 memcpy(buf, &v, sizeof(v)); 441 } 442 443 static void regmap_parse_64_le_inplace(void *buf) 444 { 445 u64 v = get_unaligned_le64(buf); 446 447 memcpy(buf, &v, sizeof(v)); 448 } 449 450 static unsigned int regmap_parse_64_native(const void *buf) 451 { 452 u64 v; 453 454 memcpy(&v, buf, sizeof(v)); 455 return v; 456 } 457 #endif 458 459 static void regmap_lock_hwlock(void *__map) 460 { 461 struct regmap *map = __map; 462 463 hwspin_lock_timeout(map->hwlock, UINT_MAX); 464 } 465 466 static void regmap_lock_hwlock_irq(void *__map) 467 { 468 struct regmap *map = __map; 469 470 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 471 } 472 473 static void regmap_lock_hwlock_irqsave(void *__map) 474 { 475 struct regmap *map = __map; 476 477 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 478 &map->spinlock_flags); 479 } 480 481 static void regmap_unlock_hwlock(void *__map) 482 { 483 struct regmap *map = __map; 484 485 hwspin_unlock(map->hwlock); 486 } 487 488 static void regmap_unlock_hwlock_irq(void *__map) 489 { 490 struct regmap *map = __map; 491 492 hwspin_unlock_irq(map->hwlock); 493 } 494 495 static void regmap_unlock_hwlock_irqrestore(void *__map) 496 { 497 struct regmap *map = __map; 498 499 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 500 } 501 502 static void regmap_lock_unlock_none(void *__map) 503 { 504 505 } 506 507 static void regmap_lock_mutex(void *__map) 508 { 509 struct regmap *map = __map; 510 mutex_lock(&map->mutex); 511 } 512 513 static void regmap_unlock_mutex(void *__map) 514 { 515 struct regmap *map = __map; 516 mutex_unlock(&map->mutex); 517 } 518 519 static void regmap_lock_spinlock(void *__map) 520 __acquires(&map->spinlock) 521 { 522 struct regmap *map = __map; 523 unsigned long flags; 524 525 spin_lock_irqsave(&map->spinlock, flags); 526 map->spinlock_flags = flags; 527 } 528 529 static void regmap_unlock_spinlock(void *__map) 530 __releases(&map->spinlock) 531 { 532 struct regmap *map = __map; 533 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 534 } 535 536 static void regmap_lock_raw_spinlock(void *__map) 537 __acquires(&map->raw_spinlock) 538 { 539 struct regmap *map = __map; 540 unsigned long flags; 541 542 raw_spin_lock_irqsave(&map->raw_spinlock, flags); 543 map->raw_spinlock_flags = flags; 544 } 545 546 static void regmap_unlock_raw_spinlock(void *__map) 547 __releases(&map->raw_spinlock) 548 { 549 struct regmap *map = __map; 550 raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags); 551 } 552 553 static void dev_get_regmap_release(struct device *dev, void *res) 554 { 555 /* 556 * We don't actually have anything to do here; the goal here 557 * is not to manage the regmap but to provide a simple way to 558 * get the regmap back given a struct device. 559 */ 560 } 561 562 static bool _regmap_range_add(struct regmap *map, 563 struct regmap_range_node *data) 564 { 565 struct rb_root *root = &map->range_tree; 566 struct rb_node **new = &(root->rb_node), *parent = NULL; 567 568 while (*new) { 569 struct regmap_range_node *this = 570 rb_entry(*new, struct regmap_range_node, node); 571 572 parent = *new; 573 if (data->range_max < this->range_min) 574 new = &((*new)->rb_left); 575 else if (data->range_min > this->range_max) 576 new = &((*new)->rb_right); 577 else 578 return false; 579 } 580 581 rb_link_node(&data->node, parent, new); 582 rb_insert_color(&data->node, root); 583 584 return true; 585 } 586 587 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 588 unsigned int reg) 589 { 590 struct rb_node *node = map->range_tree.rb_node; 591 592 while (node) { 593 struct regmap_range_node *this = 594 rb_entry(node, struct regmap_range_node, node); 595 596 if (reg < this->range_min) 597 node = node->rb_left; 598 else if (reg > this->range_max) 599 node = node->rb_right; 600 else 601 return this; 602 } 603 604 return NULL; 605 } 606 607 static void regmap_range_exit(struct regmap *map) 608 { 609 struct rb_node *next; 610 struct regmap_range_node *range_node; 611 612 next = rb_first(&map->range_tree); 613 while (next) { 614 range_node = rb_entry(next, struct regmap_range_node, node); 615 next = rb_next(&range_node->node); 616 rb_erase(&range_node->node, &map->range_tree); 617 kfree(range_node); 618 } 619 620 kfree(map->selector_work_buf); 621 } 622 623 static int regmap_set_name(struct regmap *map, const struct regmap_config *config) 624 { 625 if (config->name) { 626 const char *name = kstrdup_const(config->name, GFP_KERNEL); 627 628 if (!name) 629 return -ENOMEM; 630 631 kfree_const(map->name); 632 map->name = name; 633 } 634 635 return 0; 636 } 637 638 int regmap_attach_dev(struct device *dev, struct regmap *map, 639 const struct regmap_config *config) 640 { 641 struct regmap **m; 642 int ret; 643 644 map->dev = dev; 645 646 ret = regmap_set_name(map, config); 647 if (ret) 648 return ret; 649 650 regmap_debugfs_exit(map); 651 regmap_debugfs_init(map); 652 653 /* Add a devres resource for dev_get_regmap() */ 654 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 655 if (!m) { 656 regmap_debugfs_exit(map); 657 return -ENOMEM; 658 } 659 *m = map; 660 devres_add(dev, m); 661 662 return 0; 663 } 664 EXPORT_SYMBOL_GPL(regmap_attach_dev); 665 666 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 667 const struct regmap_config *config) 668 { 669 enum regmap_endian endian; 670 671 /* Retrieve the endianness specification from the regmap config */ 672 endian = config->reg_format_endian; 673 674 /* If the regmap config specified a non-default value, use that */ 675 if (endian != REGMAP_ENDIAN_DEFAULT) 676 return endian; 677 678 /* Retrieve the endianness specification from the bus config */ 679 if (bus && bus->reg_format_endian_default) 680 endian = bus->reg_format_endian_default; 681 682 /* If the bus specified a non-default value, use that */ 683 if (endian != REGMAP_ENDIAN_DEFAULT) 684 return endian; 685 686 /* Use this if no other value was found */ 687 return REGMAP_ENDIAN_BIG; 688 } 689 690 enum regmap_endian regmap_get_val_endian(struct device *dev, 691 const struct regmap_bus *bus, 692 const struct regmap_config *config) 693 { 694 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; 695 enum regmap_endian endian; 696 697 /* Retrieve the endianness specification from the regmap config */ 698 endian = config->val_format_endian; 699 700 /* If the regmap config specified a non-default value, use that */ 701 if (endian != REGMAP_ENDIAN_DEFAULT) 702 return endian; 703 704 /* If the firmware node exist try to get endianness from it */ 705 if (fwnode_property_read_bool(fwnode, "big-endian")) 706 endian = REGMAP_ENDIAN_BIG; 707 else if (fwnode_property_read_bool(fwnode, "little-endian")) 708 endian = REGMAP_ENDIAN_LITTLE; 709 else if (fwnode_property_read_bool(fwnode, "native-endian")) 710 endian = REGMAP_ENDIAN_NATIVE; 711 712 /* If the endianness was specified in fwnode, use that */ 713 if (endian != REGMAP_ENDIAN_DEFAULT) 714 return endian; 715 716 /* Retrieve the endianness specification from the bus config */ 717 if (bus && bus->val_format_endian_default) 718 endian = bus->val_format_endian_default; 719 720 /* If the bus specified a non-default value, use that */ 721 if (endian != REGMAP_ENDIAN_DEFAULT) 722 return endian; 723 724 /* Use this if no other value was found */ 725 return REGMAP_ENDIAN_BIG; 726 } 727 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 728 729 struct regmap *__regmap_init(struct device *dev, 730 const struct regmap_bus *bus, 731 void *bus_context, 732 const struct regmap_config *config, 733 struct lock_class_key *lock_key, 734 const char *lock_name) 735 { 736 struct regmap *map; 737 int ret = -EINVAL; 738 enum regmap_endian reg_endian, val_endian; 739 int i, j; 740 741 if (!config) 742 goto err; 743 744 map = kzalloc(sizeof(*map), GFP_KERNEL); 745 if (map == NULL) { 746 ret = -ENOMEM; 747 goto err; 748 } 749 750 ret = regmap_set_name(map, config); 751 if (ret) 752 goto err_map; 753 754 ret = -EINVAL; /* Later error paths rely on this */ 755 756 if (config->disable_locking) { 757 map->lock = map->unlock = regmap_lock_unlock_none; 758 map->can_sleep = config->can_sleep; 759 regmap_debugfs_disable(map); 760 } else if (config->lock && config->unlock) { 761 map->lock = config->lock; 762 map->unlock = config->unlock; 763 map->lock_arg = config->lock_arg; 764 map->can_sleep = config->can_sleep; 765 } else if (config->use_hwlock) { 766 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 767 if (!map->hwlock) { 768 ret = -ENXIO; 769 goto err_name; 770 } 771 772 switch (config->hwlock_mode) { 773 case HWLOCK_IRQSTATE: 774 map->lock = regmap_lock_hwlock_irqsave; 775 map->unlock = regmap_unlock_hwlock_irqrestore; 776 break; 777 case HWLOCK_IRQ: 778 map->lock = regmap_lock_hwlock_irq; 779 map->unlock = regmap_unlock_hwlock_irq; 780 break; 781 default: 782 map->lock = regmap_lock_hwlock; 783 map->unlock = regmap_unlock_hwlock; 784 break; 785 } 786 787 map->lock_arg = map; 788 } else { 789 if ((bus && bus->fast_io) || 790 config->fast_io) { 791 if (config->use_raw_spinlock) { 792 raw_spin_lock_init(&map->raw_spinlock); 793 map->lock = regmap_lock_raw_spinlock; 794 map->unlock = regmap_unlock_raw_spinlock; 795 lockdep_set_class_and_name(&map->raw_spinlock, 796 lock_key, lock_name); 797 } else { 798 spin_lock_init(&map->spinlock); 799 map->lock = regmap_lock_spinlock; 800 map->unlock = regmap_unlock_spinlock; 801 lockdep_set_class_and_name(&map->spinlock, 802 lock_key, lock_name); 803 } 804 } else { 805 mutex_init(&map->mutex); 806 map->lock = regmap_lock_mutex; 807 map->unlock = regmap_unlock_mutex; 808 map->can_sleep = true; 809 lockdep_set_class_and_name(&map->mutex, 810 lock_key, lock_name); 811 } 812 map->lock_arg = map; 813 } 814 815 /* 816 * When we write in fast-paths with regmap_bulk_write() don't allocate 817 * scratch buffers with sleeping allocations. 818 */ 819 if ((bus && bus->fast_io) || config->fast_io) 820 map->alloc_flags = GFP_ATOMIC; 821 else 822 map->alloc_flags = GFP_KERNEL; 823 824 map->reg_base = config->reg_base; 825 826 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 827 map->format.pad_bytes = config->pad_bits / 8; 828 map->format.reg_downshift = config->reg_downshift; 829 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 830 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 831 config->val_bits + config->pad_bits, 8); 832 map->reg_shift = config->pad_bits % 8; 833 if (config->reg_stride) 834 map->reg_stride = config->reg_stride; 835 else 836 map->reg_stride = 1; 837 if (is_power_of_2(map->reg_stride)) 838 map->reg_stride_order = ilog2(map->reg_stride); 839 else 840 map->reg_stride_order = -1; 841 map->use_single_read = config->use_single_read || !bus || !bus->read; 842 map->use_single_write = config->use_single_write || !bus || !bus->write; 843 map->can_multi_write = config->can_multi_write && bus && bus->write; 844 if (bus) { 845 map->max_raw_read = bus->max_raw_read; 846 map->max_raw_write = bus->max_raw_write; 847 } 848 map->dev = dev; 849 map->bus = bus; 850 map->bus_context = bus_context; 851 map->max_register = config->max_register; 852 map->wr_table = config->wr_table; 853 map->rd_table = config->rd_table; 854 map->volatile_table = config->volatile_table; 855 map->precious_table = config->precious_table; 856 map->wr_noinc_table = config->wr_noinc_table; 857 map->rd_noinc_table = config->rd_noinc_table; 858 map->writeable_reg = config->writeable_reg; 859 map->readable_reg = config->readable_reg; 860 map->volatile_reg = config->volatile_reg; 861 map->precious_reg = config->precious_reg; 862 map->writeable_noinc_reg = config->writeable_noinc_reg; 863 map->readable_noinc_reg = config->readable_noinc_reg; 864 map->cache_type = config->cache_type; 865 866 spin_lock_init(&map->async_lock); 867 INIT_LIST_HEAD(&map->async_list); 868 INIT_LIST_HEAD(&map->async_free); 869 init_waitqueue_head(&map->async_waitq); 870 871 if (config->read_flag_mask || 872 config->write_flag_mask || 873 config->zero_flag_mask) { 874 map->read_flag_mask = config->read_flag_mask; 875 map->write_flag_mask = config->write_flag_mask; 876 } else if (bus) { 877 map->read_flag_mask = bus->read_flag_mask; 878 } 879 880 if (!bus) { 881 map->reg_read = config->reg_read; 882 map->reg_write = config->reg_write; 883 map->reg_update_bits = config->reg_update_bits; 884 885 map->defer_caching = false; 886 goto skip_format_initialization; 887 } else if (!bus->read || !bus->write) { 888 map->reg_read = _regmap_bus_reg_read; 889 map->reg_write = _regmap_bus_reg_write; 890 map->reg_update_bits = bus->reg_update_bits; 891 892 map->defer_caching = false; 893 goto skip_format_initialization; 894 } else { 895 map->reg_read = _regmap_bus_read; 896 map->reg_update_bits = bus->reg_update_bits; 897 } 898 899 reg_endian = regmap_get_reg_endian(bus, config); 900 val_endian = regmap_get_val_endian(dev, bus, config); 901 902 switch (config->reg_bits + map->reg_shift) { 903 case 2: 904 switch (config->val_bits) { 905 case 6: 906 map->format.format_write = regmap_format_2_6_write; 907 break; 908 default: 909 goto err_hwlock; 910 } 911 break; 912 913 case 4: 914 switch (config->val_bits) { 915 case 12: 916 map->format.format_write = regmap_format_4_12_write; 917 break; 918 default: 919 goto err_hwlock; 920 } 921 break; 922 923 case 7: 924 switch (config->val_bits) { 925 case 9: 926 map->format.format_write = regmap_format_7_9_write; 927 break; 928 case 17: 929 map->format.format_write = regmap_format_7_17_write; 930 break; 931 default: 932 goto err_hwlock; 933 } 934 break; 935 936 case 10: 937 switch (config->val_bits) { 938 case 14: 939 map->format.format_write = regmap_format_10_14_write; 940 break; 941 default: 942 goto err_hwlock; 943 } 944 break; 945 946 case 12: 947 switch (config->val_bits) { 948 case 20: 949 map->format.format_write = regmap_format_12_20_write; 950 break; 951 default: 952 goto err_hwlock; 953 } 954 break; 955 956 case 8: 957 map->format.format_reg = regmap_format_8; 958 break; 959 960 case 16: 961 switch (reg_endian) { 962 case REGMAP_ENDIAN_BIG: 963 map->format.format_reg = regmap_format_16_be; 964 break; 965 case REGMAP_ENDIAN_LITTLE: 966 map->format.format_reg = regmap_format_16_le; 967 break; 968 case REGMAP_ENDIAN_NATIVE: 969 map->format.format_reg = regmap_format_16_native; 970 break; 971 default: 972 goto err_hwlock; 973 } 974 break; 975 976 case 24: 977 if (reg_endian != REGMAP_ENDIAN_BIG) 978 goto err_hwlock; 979 map->format.format_reg = regmap_format_24; 980 break; 981 982 case 32: 983 switch (reg_endian) { 984 case REGMAP_ENDIAN_BIG: 985 map->format.format_reg = regmap_format_32_be; 986 break; 987 case REGMAP_ENDIAN_LITTLE: 988 map->format.format_reg = regmap_format_32_le; 989 break; 990 case REGMAP_ENDIAN_NATIVE: 991 map->format.format_reg = regmap_format_32_native; 992 break; 993 default: 994 goto err_hwlock; 995 } 996 break; 997 998 #ifdef CONFIG_64BIT 999 case 64: 1000 switch (reg_endian) { 1001 case REGMAP_ENDIAN_BIG: 1002 map->format.format_reg = regmap_format_64_be; 1003 break; 1004 case REGMAP_ENDIAN_LITTLE: 1005 map->format.format_reg = regmap_format_64_le; 1006 break; 1007 case REGMAP_ENDIAN_NATIVE: 1008 map->format.format_reg = regmap_format_64_native; 1009 break; 1010 default: 1011 goto err_hwlock; 1012 } 1013 break; 1014 #endif 1015 1016 default: 1017 goto err_hwlock; 1018 } 1019 1020 if (val_endian == REGMAP_ENDIAN_NATIVE) 1021 map->format.parse_inplace = regmap_parse_inplace_noop; 1022 1023 switch (config->val_bits) { 1024 case 8: 1025 map->format.format_val = regmap_format_8; 1026 map->format.parse_val = regmap_parse_8; 1027 map->format.parse_inplace = regmap_parse_inplace_noop; 1028 break; 1029 case 16: 1030 switch (val_endian) { 1031 case REGMAP_ENDIAN_BIG: 1032 map->format.format_val = regmap_format_16_be; 1033 map->format.parse_val = regmap_parse_16_be; 1034 map->format.parse_inplace = regmap_parse_16_be_inplace; 1035 break; 1036 case REGMAP_ENDIAN_LITTLE: 1037 map->format.format_val = regmap_format_16_le; 1038 map->format.parse_val = regmap_parse_16_le; 1039 map->format.parse_inplace = regmap_parse_16_le_inplace; 1040 break; 1041 case REGMAP_ENDIAN_NATIVE: 1042 map->format.format_val = regmap_format_16_native; 1043 map->format.parse_val = regmap_parse_16_native; 1044 break; 1045 default: 1046 goto err_hwlock; 1047 } 1048 break; 1049 case 24: 1050 if (val_endian != REGMAP_ENDIAN_BIG) 1051 goto err_hwlock; 1052 map->format.format_val = regmap_format_24; 1053 map->format.parse_val = regmap_parse_24; 1054 break; 1055 case 32: 1056 switch (val_endian) { 1057 case REGMAP_ENDIAN_BIG: 1058 map->format.format_val = regmap_format_32_be; 1059 map->format.parse_val = regmap_parse_32_be; 1060 map->format.parse_inplace = regmap_parse_32_be_inplace; 1061 break; 1062 case REGMAP_ENDIAN_LITTLE: 1063 map->format.format_val = regmap_format_32_le; 1064 map->format.parse_val = regmap_parse_32_le; 1065 map->format.parse_inplace = regmap_parse_32_le_inplace; 1066 break; 1067 case REGMAP_ENDIAN_NATIVE: 1068 map->format.format_val = regmap_format_32_native; 1069 map->format.parse_val = regmap_parse_32_native; 1070 break; 1071 default: 1072 goto err_hwlock; 1073 } 1074 break; 1075 #ifdef CONFIG_64BIT 1076 case 64: 1077 switch (val_endian) { 1078 case REGMAP_ENDIAN_BIG: 1079 map->format.format_val = regmap_format_64_be; 1080 map->format.parse_val = regmap_parse_64_be; 1081 map->format.parse_inplace = regmap_parse_64_be_inplace; 1082 break; 1083 case REGMAP_ENDIAN_LITTLE: 1084 map->format.format_val = regmap_format_64_le; 1085 map->format.parse_val = regmap_parse_64_le; 1086 map->format.parse_inplace = regmap_parse_64_le_inplace; 1087 break; 1088 case REGMAP_ENDIAN_NATIVE: 1089 map->format.format_val = regmap_format_64_native; 1090 map->format.parse_val = regmap_parse_64_native; 1091 break; 1092 default: 1093 goto err_hwlock; 1094 } 1095 break; 1096 #endif 1097 } 1098 1099 if (map->format.format_write) { 1100 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1101 (val_endian != REGMAP_ENDIAN_BIG)) 1102 goto err_hwlock; 1103 map->use_single_write = true; 1104 } 1105 1106 if (!map->format.format_write && 1107 !(map->format.format_reg && map->format.format_val)) 1108 goto err_hwlock; 1109 1110 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1111 if (map->work_buf == NULL) { 1112 ret = -ENOMEM; 1113 goto err_hwlock; 1114 } 1115 1116 if (map->format.format_write) { 1117 map->defer_caching = false; 1118 map->reg_write = _regmap_bus_formatted_write; 1119 } else if (map->format.format_val) { 1120 map->defer_caching = true; 1121 map->reg_write = _regmap_bus_raw_write; 1122 } 1123 1124 skip_format_initialization: 1125 1126 map->range_tree = RB_ROOT; 1127 for (i = 0; i < config->num_ranges; i++) { 1128 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1129 struct regmap_range_node *new; 1130 1131 /* Sanity check */ 1132 if (range_cfg->range_max < range_cfg->range_min) { 1133 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1134 range_cfg->range_max, range_cfg->range_min); 1135 goto err_range; 1136 } 1137 1138 if (range_cfg->range_max > map->max_register) { 1139 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1140 range_cfg->range_max, map->max_register); 1141 goto err_range; 1142 } 1143 1144 if (range_cfg->selector_reg > map->max_register) { 1145 dev_err(map->dev, 1146 "Invalid range %d: selector out of map\n", i); 1147 goto err_range; 1148 } 1149 1150 if (range_cfg->window_len == 0) { 1151 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1152 i); 1153 goto err_range; 1154 } 1155 1156 /* Make sure, that this register range has no selector 1157 or data window within its boundary */ 1158 for (j = 0; j < config->num_ranges; j++) { 1159 unsigned int sel_reg = config->ranges[j].selector_reg; 1160 unsigned int win_min = config->ranges[j].window_start; 1161 unsigned int win_max = win_min + 1162 config->ranges[j].window_len - 1; 1163 1164 /* Allow data window inside its own virtual range */ 1165 if (j == i) 1166 continue; 1167 1168 if (range_cfg->range_min <= sel_reg && 1169 sel_reg <= range_cfg->range_max) { 1170 dev_err(map->dev, 1171 "Range %d: selector for %d in window\n", 1172 i, j); 1173 goto err_range; 1174 } 1175 1176 if (!(win_max < range_cfg->range_min || 1177 win_min > range_cfg->range_max)) { 1178 dev_err(map->dev, 1179 "Range %d: window for %d in window\n", 1180 i, j); 1181 goto err_range; 1182 } 1183 } 1184 1185 new = kzalloc(sizeof(*new), GFP_KERNEL); 1186 if (new == NULL) { 1187 ret = -ENOMEM; 1188 goto err_range; 1189 } 1190 1191 new->map = map; 1192 new->name = range_cfg->name; 1193 new->range_min = range_cfg->range_min; 1194 new->range_max = range_cfg->range_max; 1195 new->selector_reg = range_cfg->selector_reg; 1196 new->selector_mask = range_cfg->selector_mask; 1197 new->selector_shift = range_cfg->selector_shift; 1198 new->window_start = range_cfg->window_start; 1199 new->window_len = range_cfg->window_len; 1200 1201 if (!_regmap_range_add(map, new)) { 1202 dev_err(map->dev, "Failed to add range %d\n", i); 1203 kfree(new); 1204 goto err_range; 1205 } 1206 1207 if (map->selector_work_buf == NULL) { 1208 map->selector_work_buf = 1209 kzalloc(map->format.buf_size, GFP_KERNEL); 1210 if (map->selector_work_buf == NULL) { 1211 ret = -ENOMEM; 1212 goto err_range; 1213 } 1214 } 1215 } 1216 1217 ret = regcache_init(map, config); 1218 if (ret != 0) 1219 goto err_range; 1220 1221 if (dev) { 1222 ret = regmap_attach_dev(dev, map, config); 1223 if (ret != 0) 1224 goto err_regcache; 1225 } else { 1226 regmap_debugfs_init(map); 1227 } 1228 1229 return map; 1230 1231 err_regcache: 1232 regcache_exit(map); 1233 err_range: 1234 regmap_range_exit(map); 1235 kfree(map->work_buf); 1236 err_hwlock: 1237 if (map->hwlock) 1238 hwspin_lock_free(map->hwlock); 1239 err_name: 1240 kfree_const(map->name); 1241 err_map: 1242 kfree(map); 1243 err: 1244 return ERR_PTR(ret); 1245 } 1246 EXPORT_SYMBOL_GPL(__regmap_init); 1247 1248 static void devm_regmap_release(struct device *dev, void *res) 1249 { 1250 regmap_exit(*(struct regmap **)res); 1251 } 1252 1253 struct regmap *__devm_regmap_init(struct device *dev, 1254 const struct regmap_bus *bus, 1255 void *bus_context, 1256 const struct regmap_config *config, 1257 struct lock_class_key *lock_key, 1258 const char *lock_name) 1259 { 1260 struct regmap **ptr, *regmap; 1261 1262 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1263 if (!ptr) 1264 return ERR_PTR(-ENOMEM); 1265 1266 regmap = __regmap_init(dev, bus, bus_context, config, 1267 lock_key, lock_name); 1268 if (!IS_ERR(regmap)) { 1269 *ptr = regmap; 1270 devres_add(dev, ptr); 1271 } else { 1272 devres_free(ptr); 1273 } 1274 1275 return regmap; 1276 } 1277 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1278 1279 static void regmap_field_init(struct regmap_field *rm_field, 1280 struct regmap *regmap, struct reg_field reg_field) 1281 { 1282 rm_field->regmap = regmap; 1283 rm_field->reg = reg_field.reg; 1284 rm_field->shift = reg_field.lsb; 1285 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1286 rm_field->id_size = reg_field.id_size; 1287 rm_field->id_offset = reg_field.id_offset; 1288 } 1289 1290 /** 1291 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1292 * 1293 * @dev: Device that will be interacted with 1294 * @regmap: regmap bank in which this register field is located. 1295 * @reg_field: Register field with in the bank. 1296 * 1297 * The return value will be an ERR_PTR() on error or a valid pointer 1298 * to a struct regmap_field. The regmap_field will be automatically freed 1299 * by the device management code. 1300 */ 1301 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1302 struct regmap *regmap, struct reg_field reg_field) 1303 { 1304 struct regmap_field *rm_field = devm_kzalloc(dev, 1305 sizeof(*rm_field), GFP_KERNEL); 1306 if (!rm_field) 1307 return ERR_PTR(-ENOMEM); 1308 1309 regmap_field_init(rm_field, regmap, reg_field); 1310 1311 return rm_field; 1312 1313 } 1314 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1315 1316 1317 /** 1318 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field. 1319 * 1320 * @regmap: regmap bank in which this register field is located. 1321 * @rm_field: regmap register fields within the bank. 1322 * @reg_field: Register fields within the bank. 1323 * @num_fields: Number of register fields. 1324 * 1325 * The return value will be an -ENOMEM on error or zero for success. 1326 * Newly allocated regmap_fields should be freed by calling 1327 * regmap_field_bulk_free() 1328 */ 1329 int regmap_field_bulk_alloc(struct regmap *regmap, 1330 struct regmap_field **rm_field, 1331 const struct reg_field *reg_field, 1332 int num_fields) 1333 { 1334 struct regmap_field *rf; 1335 int i; 1336 1337 rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL); 1338 if (!rf) 1339 return -ENOMEM; 1340 1341 for (i = 0; i < num_fields; i++) { 1342 regmap_field_init(&rf[i], regmap, reg_field[i]); 1343 rm_field[i] = &rf[i]; 1344 } 1345 1346 return 0; 1347 } 1348 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc); 1349 1350 /** 1351 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register 1352 * fields. 1353 * 1354 * @dev: Device that will be interacted with 1355 * @regmap: regmap bank in which this register field is located. 1356 * @rm_field: regmap register fields within the bank. 1357 * @reg_field: Register fields within the bank. 1358 * @num_fields: Number of register fields. 1359 * 1360 * The return value will be an -ENOMEM on error or zero for success. 1361 * Newly allocated regmap_fields will be automatically freed by the 1362 * device management code. 1363 */ 1364 int devm_regmap_field_bulk_alloc(struct device *dev, 1365 struct regmap *regmap, 1366 struct regmap_field **rm_field, 1367 const struct reg_field *reg_field, 1368 int num_fields) 1369 { 1370 struct regmap_field *rf; 1371 int i; 1372 1373 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL); 1374 if (!rf) 1375 return -ENOMEM; 1376 1377 for (i = 0; i < num_fields; i++) { 1378 regmap_field_init(&rf[i], regmap, reg_field[i]); 1379 rm_field[i] = &rf[i]; 1380 } 1381 1382 return 0; 1383 } 1384 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc); 1385 1386 /** 1387 * regmap_field_bulk_free() - Free register field allocated using 1388 * regmap_field_bulk_alloc. 1389 * 1390 * @field: regmap fields which should be freed. 1391 */ 1392 void regmap_field_bulk_free(struct regmap_field *field) 1393 { 1394 kfree(field); 1395 } 1396 EXPORT_SYMBOL_GPL(regmap_field_bulk_free); 1397 1398 /** 1399 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using 1400 * devm_regmap_field_bulk_alloc. 1401 * 1402 * @dev: Device that will be interacted with 1403 * @field: regmap field which should be freed. 1404 * 1405 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually 1406 * drivers need not call this function, as the memory allocated via devm 1407 * will be freed as per device-driver life-cycle. 1408 */ 1409 void devm_regmap_field_bulk_free(struct device *dev, 1410 struct regmap_field *field) 1411 { 1412 devm_kfree(dev, field); 1413 } 1414 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free); 1415 1416 /** 1417 * devm_regmap_field_free() - Free a register field allocated using 1418 * devm_regmap_field_alloc. 1419 * 1420 * @dev: Device that will be interacted with 1421 * @field: regmap field which should be freed. 1422 * 1423 * Free register field allocated using devm_regmap_field_alloc(). Usually 1424 * drivers need not call this function, as the memory allocated via devm 1425 * will be freed as per device-driver life-cyle. 1426 */ 1427 void devm_regmap_field_free(struct device *dev, 1428 struct regmap_field *field) 1429 { 1430 devm_kfree(dev, field); 1431 } 1432 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1433 1434 /** 1435 * regmap_field_alloc() - Allocate and initialise a register field. 1436 * 1437 * @regmap: regmap bank in which this register field is located. 1438 * @reg_field: Register field with in the bank. 1439 * 1440 * The return value will be an ERR_PTR() on error or a valid pointer 1441 * to a struct regmap_field. The regmap_field should be freed by the 1442 * user once its finished working with it using regmap_field_free(). 1443 */ 1444 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1445 struct reg_field reg_field) 1446 { 1447 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1448 1449 if (!rm_field) 1450 return ERR_PTR(-ENOMEM); 1451 1452 regmap_field_init(rm_field, regmap, reg_field); 1453 1454 return rm_field; 1455 } 1456 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1457 1458 /** 1459 * regmap_field_free() - Free register field allocated using 1460 * regmap_field_alloc. 1461 * 1462 * @field: regmap field which should be freed. 1463 */ 1464 void regmap_field_free(struct regmap_field *field) 1465 { 1466 kfree(field); 1467 } 1468 EXPORT_SYMBOL_GPL(regmap_field_free); 1469 1470 /** 1471 * regmap_reinit_cache() - Reinitialise the current register cache 1472 * 1473 * @map: Register map to operate on. 1474 * @config: New configuration. Only the cache data will be used. 1475 * 1476 * Discard any existing register cache for the map and initialize a 1477 * new cache. This can be used to restore the cache to defaults or to 1478 * update the cache configuration to reflect runtime discovery of the 1479 * hardware. 1480 * 1481 * No explicit locking is done here, the user needs to ensure that 1482 * this function will not race with other calls to regmap. 1483 */ 1484 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1485 { 1486 int ret; 1487 1488 regcache_exit(map); 1489 regmap_debugfs_exit(map); 1490 1491 map->max_register = config->max_register; 1492 map->writeable_reg = config->writeable_reg; 1493 map->readable_reg = config->readable_reg; 1494 map->volatile_reg = config->volatile_reg; 1495 map->precious_reg = config->precious_reg; 1496 map->writeable_noinc_reg = config->writeable_noinc_reg; 1497 map->readable_noinc_reg = config->readable_noinc_reg; 1498 map->cache_type = config->cache_type; 1499 1500 ret = regmap_set_name(map, config); 1501 if (ret) 1502 return ret; 1503 1504 regmap_debugfs_init(map); 1505 1506 map->cache_bypass = false; 1507 map->cache_only = false; 1508 1509 return regcache_init(map, config); 1510 } 1511 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1512 1513 /** 1514 * regmap_exit() - Free a previously allocated register map 1515 * 1516 * @map: Register map to operate on. 1517 */ 1518 void regmap_exit(struct regmap *map) 1519 { 1520 struct regmap_async *async; 1521 1522 regcache_exit(map); 1523 regmap_debugfs_exit(map); 1524 regmap_range_exit(map); 1525 if (map->bus && map->bus->free_context) 1526 map->bus->free_context(map->bus_context); 1527 kfree(map->work_buf); 1528 while (!list_empty(&map->async_free)) { 1529 async = list_first_entry_or_null(&map->async_free, 1530 struct regmap_async, 1531 list); 1532 list_del(&async->list); 1533 kfree(async->work_buf); 1534 kfree(async); 1535 } 1536 if (map->hwlock) 1537 hwspin_lock_free(map->hwlock); 1538 if (map->lock == regmap_lock_mutex) 1539 mutex_destroy(&map->mutex); 1540 kfree_const(map->name); 1541 kfree(map->patch); 1542 if (map->bus && map->bus->free_on_exit) 1543 kfree(map->bus); 1544 kfree(map); 1545 } 1546 EXPORT_SYMBOL_GPL(regmap_exit); 1547 1548 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1549 { 1550 struct regmap **r = res; 1551 if (!r || !*r) { 1552 WARN_ON(!r || !*r); 1553 return 0; 1554 } 1555 1556 /* If the user didn't specify a name match any */ 1557 if (data) 1558 return !strcmp((*r)->name, data); 1559 else 1560 return 1; 1561 } 1562 1563 /** 1564 * dev_get_regmap() - Obtain the regmap (if any) for a device 1565 * 1566 * @dev: Device to retrieve the map for 1567 * @name: Optional name for the register map, usually NULL. 1568 * 1569 * Returns the regmap for the device if one is present, or NULL. If 1570 * name is specified then it must match the name specified when 1571 * registering the device, if it is NULL then the first regmap found 1572 * will be used. Devices with multiple register maps are very rare, 1573 * generic code should normally not need to specify a name. 1574 */ 1575 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1576 { 1577 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1578 dev_get_regmap_match, (void *)name); 1579 1580 if (!r) 1581 return NULL; 1582 return *r; 1583 } 1584 EXPORT_SYMBOL_GPL(dev_get_regmap); 1585 1586 /** 1587 * regmap_get_device() - Obtain the device from a regmap 1588 * 1589 * @map: Register map to operate on. 1590 * 1591 * Returns the underlying device that the regmap has been created for. 1592 */ 1593 struct device *regmap_get_device(struct regmap *map) 1594 { 1595 return map->dev; 1596 } 1597 EXPORT_SYMBOL_GPL(regmap_get_device); 1598 1599 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1600 struct regmap_range_node *range, 1601 unsigned int val_num) 1602 { 1603 void *orig_work_buf; 1604 unsigned int win_offset; 1605 unsigned int win_page; 1606 bool page_chg; 1607 int ret; 1608 1609 win_offset = (*reg - range->range_min) % range->window_len; 1610 win_page = (*reg - range->range_min) / range->window_len; 1611 1612 if (val_num > 1) { 1613 /* Bulk write shouldn't cross range boundary */ 1614 if (*reg + val_num - 1 > range->range_max) 1615 return -EINVAL; 1616 1617 /* ... or single page boundary */ 1618 if (val_num > range->window_len - win_offset) 1619 return -EINVAL; 1620 } 1621 1622 /* It is possible to have selector register inside data window. 1623 In that case, selector register is located on every page and 1624 it needs no page switching, when accessed alone. */ 1625 if (val_num > 1 || 1626 range->window_start + win_offset != range->selector_reg) { 1627 /* Use separate work_buf during page switching */ 1628 orig_work_buf = map->work_buf; 1629 map->work_buf = map->selector_work_buf; 1630 1631 ret = _regmap_update_bits(map, range->selector_reg, 1632 range->selector_mask, 1633 win_page << range->selector_shift, 1634 &page_chg, false); 1635 1636 map->work_buf = orig_work_buf; 1637 1638 if (ret != 0) 1639 return ret; 1640 } 1641 1642 *reg = range->window_start + win_offset; 1643 1644 return 0; 1645 } 1646 1647 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1648 unsigned long mask) 1649 { 1650 u8 *buf; 1651 int i; 1652 1653 if (!mask || !map->work_buf) 1654 return; 1655 1656 buf = map->work_buf; 1657 1658 for (i = 0; i < max_bytes; i++) 1659 buf[i] |= (mask >> (8 * i)) & 0xff; 1660 } 1661 1662 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1663 const void *val, size_t val_len, bool noinc) 1664 { 1665 struct regmap_range_node *range; 1666 unsigned long flags; 1667 void *work_val = map->work_buf + map->format.reg_bytes + 1668 map->format.pad_bytes; 1669 void *buf; 1670 int ret = -ENOTSUPP; 1671 size_t len; 1672 int i; 1673 1674 WARN_ON(!map->bus); 1675 1676 /* Check for unwritable or noinc registers in range 1677 * before we start 1678 */ 1679 if (!regmap_writeable_noinc(map, reg)) { 1680 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1681 unsigned int element = 1682 reg + regmap_get_offset(map, i); 1683 if (!regmap_writeable(map, element) || 1684 regmap_writeable_noinc(map, element)) 1685 return -EINVAL; 1686 } 1687 } 1688 1689 if (!map->cache_bypass && map->format.parse_val) { 1690 unsigned int ival; 1691 int val_bytes = map->format.val_bytes; 1692 for (i = 0; i < val_len / val_bytes; i++) { 1693 ival = map->format.parse_val(val + (i * val_bytes)); 1694 ret = regcache_write(map, 1695 reg + regmap_get_offset(map, i), 1696 ival); 1697 if (ret) { 1698 dev_err(map->dev, 1699 "Error in caching of register: %x ret: %d\n", 1700 reg + regmap_get_offset(map, i), ret); 1701 return ret; 1702 } 1703 } 1704 if (map->cache_only) { 1705 map->cache_dirty = true; 1706 return 0; 1707 } 1708 } 1709 1710 range = _regmap_range_lookup(map, reg); 1711 if (range) { 1712 int val_num = val_len / map->format.val_bytes; 1713 int win_offset = (reg - range->range_min) % range->window_len; 1714 int win_residue = range->window_len - win_offset; 1715 1716 /* If the write goes beyond the end of the window split it */ 1717 while (val_num > win_residue) { 1718 dev_dbg(map->dev, "Writing window %d/%zu\n", 1719 win_residue, val_len / map->format.val_bytes); 1720 ret = _regmap_raw_write_impl(map, reg, val, 1721 win_residue * 1722 map->format.val_bytes, noinc); 1723 if (ret != 0) 1724 return ret; 1725 1726 reg += win_residue; 1727 val_num -= win_residue; 1728 val += win_residue * map->format.val_bytes; 1729 val_len -= win_residue * map->format.val_bytes; 1730 1731 win_offset = (reg - range->range_min) % 1732 range->window_len; 1733 win_residue = range->window_len - win_offset; 1734 } 1735 1736 ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); 1737 if (ret != 0) 1738 return ret; 1739 } 1740 1741 reg += map->reg_base; 1742 reg >>= map->format.reg_downshift; 1743 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1744 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1745 map->write_flag_mask); 1746 1747 /* 1748 * Essentially all I/O mechanisms will be faster with a single 1749 * buffer to write. Since register syncs often generate raw 1750 * writes of single registers optimise that case. 1751 */ 1752 if (val != work_val && val_len == map->format.val_bytes) { 1753 memcpy(work_val, val, map->format.val_bytes); 1754 val = work_val; 1755 } 1756 1757 if (map->async && map->bus->async_write) { 1758 struct regmap_async *async; 1759 1760 trace_regmap_async_write_start(map, reg, val_len); 1761 1762 spin_lock_irqsave(&map->async_lock, flags); 1763 async = list_first_entry_or_null(&map->async_free, 1764 struct regmap_async, 1765 list); 1766 if (async) 1767 list_del(&async->list); 1768 spin_unlock_irqrestore(&map->async_lock, flags); 1769 1770 if (!async) { 1771 async = map->bus->async_alloc(); 1772 if (!async) 1773 return -ENOMEM; 1774 1775 async->work_buf = kzalloc(map->format.buf_size, 1776 GFP_KERNEL | GFP_DMA); 1777 if (!async->work_buf) { 1778 kfree(async); 1779 return -ENOMEM; 1780 } 1781 } 1782 1783 async->map = map; 1784 1785 /* If the caller supplied the value we can use it safely. */ 1786 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1787 map->format.reg_bytes + map->format.val_bytes); 1788 1789 spin_lock_irqsave(&map->async_lock, flags); 1790 list_add_tail(&async->list, &map->async_list); 1791 spin_unlock_irqrestore(&map->async_lock, flags); 1792 1793 if (val != work_val) 1794 ret = map->bus->async_write(map->bus_context, 1795 async->work_buf, 1796 map->format.reg_bytes + 1797 map->format.pad_bytes, 1798 val, val_len, async); 1799 else 1800 ret = map->bus->async_write(map->bus_context, 1801 async->work_buf, 1802 map->format.reg_bytes + 1803 map->format.pad_bytes + 1804 val_len, NULL, 0, async); 1805 1806 if (ret != 0) { 1807 dev_err(map->dev, "Failed to schedule write: %d\n", 1808 ret); 1809 1810 spin_lock_irqsave(&map->async_lock, flags); 1811 list_move(&async->list, &map->async_free); 1812 spin_unlock_irqrestore(&map->async_lock, flags); 1813 } 1814 1815 return ret; 1816 } 1817 1818 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1819 1820 /* If we're doing a single register write we can probably just 1821 * send the work_buf directly, otherwise try to do a gather 1822 * write. 1823 */ 1824 if (val == work_val) 1825 ret = map->bus->write(map->bus_context, map->work_buf, 1826 map->format.reg_bytes + 1827 map->format.pad_bytes + 1828 val_len); 1829 else if (map->bus->gather_write) 1830 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1831 map->format.reg_bytes + 1832 map->format.pad_bytes, 1833 val, val_len); 1834 else 1835 ret = -ENOTSUPP; 1836 1837 /* If that didn't work fall back on linearising by hand. */ 1838 if (ret == -ENOTSUPP) { 1839 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1840 buf = kzalloc(len, GFP_KERNEL); 1841 if (!buf) 1842 return -ENOMEM; 1843 1844 memcpy(buf, map->work_buf, map->format.reg_bytes); 1845 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1846 val, val_len); 1847 ret = map->bus->write(map->bus_context, buf, len); 1848 1849 kfree(buf); 1850 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1851 /* regcache_drop_region() takes lock that we already have, 1852 * thus call map->cache_ops->drop() directly 1853 */ 1854 if (map->cache_ops && map->cache_ops->drop) 1855 map->cache_ops->drop(map, reg, reg + 1); 1856 } 1857 1858 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1859 1860 return ret; 1861 } 1862 1863 /** 1864 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1865 * 1866 * @map: Map to check. 1867 */ 1868 bool regmap_can_raw_write(struct regmap *map) 1869 { 1870 return map->bus && map->bus->write && map->format.format_val && 1871 map->format.format_reg; 1872 } 1873 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1874 1875 /** 1876 * regmap_get_raw_read_max - Get the maximum size we can read 1877 * 1878 * @map: Map to check. 1879 */ 1880 size_t regmap_get_raw_read_max(struct regmap *map) 1881 { 1882 return map->max_raw_read; 1883 } 1884 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1885 1886 /** 1887 * regmap_get_raw_write_max - Get the maximum size we can read 1888 * 1889 * @map: Map to check. 1890 */ 1891 size_t regmap_get_raw_write_max(struct regmap *map) 1892 { 1893 return map->max_raw_write; 1894 } 1895 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1896 1897 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1898 unsigned int val) 1899 { 1900 int ret; 1901 struct regmap_range_node *range; 1902 struct regmap *map = context; 1903 1904 WARN_ON(!map->bus || !map->format.format_write); 1905 1906 range = _regmap_range_lookup(map, reg); 1907 if (range) { 1908 ret = _regmap_select_page(map, ®, range, 1); 1909 if (ret != 0) 1910 return ret; 1911 } 1912 1913 reg += map->reg_base; 1914 reg >>= map->format.reg_downshift; 1915 map->format.format_write(map, reg, val); 1916 1917 trace_regmap_hw_write_start(map, reg, 1); 1918 1919 ret = map->bus->write(map->bus_context, map->work_buf, 1920 map->format.buf_size); 1921 1922 trace_regmap_hw_write_done(map, reg, 1); 1923 1924 return ret; 1925 } 1926 1927 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1928 unsigned int val) 1929 { 1930 struct regmap *map = context; 1931 1932 return map->bus->reg_write(map->bus_context, reg, val); 1933 } 1934 1935 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1936 unsigned int val) 1937 { 1938 struct regmap *map = context; 1939 1940 WARN_ON(!map->bus || !map->format.format_val); 1941 1942 map->format.format_val(map->work_buf + map->format.reg_bytes 1943 + map->format.pad_bytes, val, 0); 1944 return _regmap_raw_write_impl(map, reg, 1945 map->work_buf + 1946 map->format.reg_bytes + 1947 map->format.pad_bytes, 1948 map->format.val_bytes, 1949 false); 1950 } 1951 1952 static inline void *_regmap_map_get_context(struct regmap *map) 1953 { 1954 return (map->bus) ? map : map->bus_context; 1955 } 1956 1957 int _regmap_write(struct regmap *map, unsigned int reg, 1958 unsigned int val) 1959 { 1960 int ret; 1961 void *context = _regmap_map_get_context(map); 1962 1963 if (!regmap_writeable(map, reg)) 1964 return -EIO; 1965 1966 if (!map->cache_bypass && !map->defer_caching) { 1967 ret = regcache_write(map, reg, val); 1968 if (ret != 0) 1969 return ret; 1970 if (map->cache_only) { 1971 map->cache_dirty = true; 1972 return 0; 1973 } 1974 } 1975 1976 ret = map->reg_write(context, reg, val); 1977 if (ret == 0) { 1978 if (regmap_should_log(map)) 1979 dev_info(map->dev, "%x <= %x\n", reg, val); 1980 1981 trace_regmap_reg_write(map, reg, val); 1982 } 1983 1984 return ret; 1985 } 1986 1987 /** 1988 * regmap_write() - Write a value to a single register 1989 * 1990 * @map: Register map to write to 1991 * @reg: Register to write to 1992 * @val: Value to be written 1993 * 1994 * A value of zero will be returned on success, a negative errno will 1995 * be returned in error cases. 1996 */ 1997 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1998 { 1999 int ret; 2000 2001 if (!IS_ALIGNED(reg, map->reg_stride)) 2002 return -EINVAL; 2003 2004 map->lock(map->lock_arg); 2005 2006 ret = _regmap_write(map, reg, val); 2007 2008 map->unlock(map->lock_arg); 2009 2010 return ret; 2011 } 2012 EXPORT_SYMBOL_GPL(regmap_write); 2013 2014 /** 2015 * regmap_write_async() - Write a value to a single register asynchronously 2016 * 2017 * @map: Register map to write to 2018 * @reg: Register to write to 2019 * @val: Value to be written 2020 * 2021 * A value of zero will be returned on success, a negative errno will 2022 * be returned in error cases. 2023 */ 2024 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 2025 { 2026 int ret; 2027 2028 if (!IS_ALIGNED(reg, map->reg_stride)) 2029 return -EINVAL; 2030 2031 map->lock(map->lock_arg); 2032 2033 map->async = true; 2034 2035 ret = _regmap_write(map, reg, val); 2036 2037 map->async = false; 2038 2039 map->unlock(map->lock_arg); 2040 2041 return ret; 2042 } 2043 EXPORT_SYMBOL_GPL(regmap_write_async); 2044 2045 int _regmap_raw_write(struct regmap *map, unsigned int reg, 2046 const void *val, size_t val_len, bool noinc) 2047 { 2048 size_t val_bytes = map->format.val_bytes; 2049 size_t val_count = val_len / val_bytes; 2050 size_t chunk_count, chunk_bytes; 2051 size_t chunk_regs = val_count; 2052 int ret, i; 2053 2054 if (!val_count) 2055 return -EINVAL; 2056 2057 if (map->use_single_write) 2058 chunk_regs = 1; 2059 else if (map->max_raw_write && val_len > map->max_raw_write) 2060 chunk_regs = map->max_raw_write / val_bytes; 2061 2062 chunk_count = val_count / chunk_regs; 2063 chunk_bytes = chunk_regs * val_bytes; 2064 2065 /* Write as many bytes as possible with chunk_size */ 2066 for (i = 0; i < chunk_count; i++) { 2067 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); 2068 if (ret) 2069 return ret; 2070 2071 reg += regmap_get_offset(map, chunk_regs); 2072 val += chunk_bytes; 2073 val_len -= chunk_bytes; 2074 } 2075 2076 /* Write remaining bytes */ 2077 if (val_len) 2078 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); 2079 2080 return ret; 2081 } 2082 2083 /** 2084 * regmap_raw_write() - Write raw values to one or more registers 2085 * 2086 * @map: Register map to write to 2087 * @reg: Initial register to write to 2088 * @val: Block of data to be written, laid out for direct transmission to the 2089 * device 2090 * @val_len: Length of data pointed to by val. 2091 * 2092 * This function is intended to be used for things like firmware 2093 * download where a large block of data needs to be transferred to the 2094 * device. No formatting will be done on the data provided. 2095 * 2096 * A value of zero will be returned on success, a negative errno will 2097 * be returned in error cases. 2098 */ 2099 int regmap_raw_write(struct regmap *map, unsigned int reg, 2100 const void *val, size_t val_len) 2101 { 2102 int ret; 2103 2104 if (!regmap_can_raw_write(map)) 2105 return -EINVAL; 2106 if (val_len % map->format.val_bytes) 2107 return -EINVAL; 2108 2109 map->lock(map->lock_arg); 2110 2111 ret = _regmap_raw_write(map, reg, val, val_len, false); 2112 2113 map->unlock(map->lock_arg); 2114 2115 return ret; 2116 } 2117 EXPORT_SYMBOL_GPL(regmap_raw_write); 2118 2119 /** 2120 * regmap_noinc_write(): Write data from a register without incrementing the 2121 * register number 2122 * 2123 * @map: Register map to write to 2124 * @reg: Register to write to 2125 * @val: Pointer to data buffer 2126 * @val_len: Length of output buffer in bytes. 2127 * 2128 * The regmap API usually assumes that bulk bus write operations will write a 2129 * range of registers. Some devices have certain registers for which a write 2130 * operation can write to an internal FIFO. 2131 * 2132 * The target register must be volatile but registers after it can be 2133 * completely unrelated cacheable registers. 2134 * 2135 * This will attempt multiple writes as required to write val_len bytes. 2136 * 2137 * A value of zero will be returned on success, a negative errno will be 2138 * returned in error cases. 2139 */ 2140 int regmap_noinc_write(struct regmap *map, unsigned int reg, 2141 const void *val, size_t val_len) 2142 { 2143 size_t write_len; 2144 int ret; 2145 2146 if (!map->bus) 2147 return -EINVAL; 2148 if (!map->bus->write) 2149 return -ENOTSUPP; 2150 if (val_len % map->format.val_bytes) 2151 return -EINVAL; 2152 if (!IS_ALIGNED(reg, map->reg_stride)) 2153 return -EINVAL; 2154 if (val_len == 0) 2155 return -EINVAL; 2156 2157 map->lock(map->lock_arg); 2158 2159 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 2160 ret = -EINVAL; 2161 goto out_unlock; 2162 } 2163 2164 while (val_len) { 2165 if (map->max_raw_write && map->max_raw_write < val_len) 2166 write_len = map->max_raw_write; 2167 else 2168 write_len = val_len; 2169 ret = _regmap_raw_write(map, reg, val, write_len, true); 2170 if (ret) 2171 goto out_unlock; 2172 val = ((u8 *)val) + write_len; 2173 val_len -= write_len; 2174 } 2175 2176 out_unlock: 2177 map->unlock(map->lock_arg); 2178 return ret; 2179 } 2180 EXPORT_SYMBOL_GPL(regmap_noinc_write); 2181 2182 /** 2183 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 2184 * register field. 2185 * 2186 * @field: Register field to write to 2187 * @mask: Bitmask to change 2188 * @val: Value to be written 2189 * @change: Boolean indicating if a write was done 2190 * @async: Boolean indicating asynchronously 2191 * @force: Boolean indicating use force update 2192 * 2193 * Perform a read/modify/write cycle on the register field with change, 2194 * async, force option. 2195 * 2196 * A value of zero will be returned on success, a negative errno will 2197 * be returned in error cases. 2198 */ 2199 int regmap_field_update_bits_base(struct regmap_field *field, 2200 unsigned int mask, unsigned int val, 2201 bool *change, bool async, bool force) 2202 { 2203 mask = (mask << field->shift) & field->mask; 2204 2205 return regmap_update_bits_base(field->regmap, field->reg, 2206 mask, val << field->shift, 2207 change, async, force); 2208 } 2209 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2210 2211 /** 2212 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2213 * register field with port ID 2214 * 2215 * @field: Register field to write to 2216 * @id: port ID 2217 * @mask: Bitmask to change 2218 * @val: Value to be written 2219 * @change: Boolean indicating if a write was done 2220 * @async: Boolean indicating asynchronously 2221 * @force: Boolean indicating use force update 2222 * 2223 * A value of zero will be returned on success, a negative errno will 2224 * be returned in error cases. 2225 */ 2226 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2227 unsigned int mask, unsigned int val, 2228 bool *change, bool async, bool force) 2229 { 2230 if (id >= field->id_size) 2231 return -EINVAL; 2232 2233 mask = (mask << field->shift) & field->mask; 2234 2235 return regmap_update_bits_base(field->regmap, 2236 field->reg + (field->id_offset * id), 2237 mask, val << field->shift, 2238 change, async, force); 2239 } 2240 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2241 2242 /** 2243 * regmap_bulk_write() - Write multiple registers to the device 2244 * 2245 * @map: Register map to write to 2246 * @reg: First register to be write from 2247 * @val: Block of data to be written, in native register size for device 2248 * @val_count: Number of registers to write 2249 * 2250 * This function is intended to be used for writing a large block of 2251 * data to the device either in single transfer or multiple transfer. 2252 * 2253 * A value of zero will be returned on success, a negative errno will 2254 * be returned in error cases. 2255 */ 2256 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2257 size_t val_count) 2258 { 2259 int ret = 0, i; 2260 size_t val_bytes = map->format.val_bytes; 2261 2262 if (!IS_ALIGNED(reg, map->reg_stride)) 2263 return -EINVAL; 2264 2265 /* 2266 * Some devices don't support bulk write, for them we have a series of 2267 * single write operations. 2268 */ 2269 if (!map->bus || !map->format.parse_inplace) { 2270 map->lock(map->lock_arg); 2271 for (i = 0; i < val_count; i++) { 2272 unsigned int ival; 2273 2274 switch (val_bytes) { 2275 case 1: 2276 ival = *(u8 *)(val + (i * val_bytes)); 2277 break; 2278 case 2: 2279 ival = *(u16 *)(val + (i * val_bytes)); 2280 break; 2281 case 4: 2282 ival = *(u32 *)(val + (i * val_bytes)); 2283 break; 2284 #ifdef CONFIG_64BIT 2285 case 8: 2286 ival = *(u64 *)(val + (i * val_bytes)); 2287 break; 2288 #endif 2289 default: 2290 ret = -EINVAL; 2291 goto out; 2292 } 2293 2294 ret = _regmap_write(map, 2295 reg + regmap_get_offset(map, i), 2296 ival); 2297 if (ret != 0) 2298 goto out; 2299 } 2300 out: 2301 map->unlock(map->lock_arg); 2302 } else { 2303 void *wval; 2304 2305 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2306 if (!wval) 2307 return -ENOMEM; 2308 2309 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2310 map->format.parse_inplace(wval + i); 2311 2312 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2313 2314 kfree(wval); 2315 } 2316 return ret; 2317 } 2318 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2319 2320 /* 2321 * _regmap_raw_multi_reg_write() 2322 * 2323 * the (register,newvalue) pairs in regs have not been formatted, but 2324 * they are all in the same page and have been changed to being page 2325 * relative. The page register has been written if that was necessary. 2326 */ 2327 static int _regmap_raw_multi_reg_write(struct regmap *map, 2328 const struct reg_sequence *regs, 2329 size_t num_regs) 2330 { 2331 int ret; 2332 void *buf; 2333 int i; 2334 u8 *u8; 2335 size_t val_bytes = map->format.val_bytes; 2336 size_t reg_bytes = map->format.reg_bytes; 2337 size_t pad_bytes = map->format.pad_bytes; 2338 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2339 size_t len = pair_size * num_regs; 2340 2341 if (!len) 2342 return -EINVAL; 2343 2344 buf = kzalloc(len, GFP_KERNEL); 2345 if (!buf) 2346 return -ENOMEM; 2347 2348 /* We have to linearise by hand. */ 2349 2350 u8 = buf; 2351 2352 for (i = 0; i < num_regs; i++) { 2353 unsigned int reg = regs[i].reg; 2354 unsigned int val = regs[i].def; 2355 trace_regmap_hw_write_start(map, reg, 1); 2356 reg += map->reg_base; 2357 reg >>= map->format.reg_downshift; 2358 map->format.format_reg(u8, reg, map->reg_shift); 2359 u8 += reg_bytes + pad_bytes; 2360 map->format.format_val(u8, val, 0); 2361 u8 += val_bytes; 2362 } 2363 u8 = buf; 2364 *u8 |= map->write_flag_mask; 2365 2366 ret = map->bus->write(map->bus_context, buf, len); 2367 2368 kfree(buf); 2369 2370 for (i = 0; i < num_regs; i++) { 2371 int reg = regs[i].reg; 2372 trace_regmap_hw_write_done(map, reg, 1); 2373 } 2374 return ret; 2375 } 2376 2377 static unsigned int _regmap_register_page(struct regmap *map, 2378 unsigned int reg, 2379 struct regmap_range_node *range) 2380 { 2381 unsigned int win_page = (reg - range->range_min) / range->window_len; 2382 2383 return win_page; 2384 } 2385 2386 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2387 struct reg_sequence *regs, 2388 size_t num_regs) 2389 { 2390 int ret; 2391 int i, n; 2392 struct reg_sequence *base; 2393 unsigned int this_page = 0; 2394 unsigned int page_change = 0; 2395 /* 2396 * the set of registers are not neccessarily in order, but 2397 * since the order of write must be preserved this algorithm 2398 * chops the set each time the page changes. This also applies 2399 * if there is a delay required at any point in the sequence. 2400 */ 2401 base = regs; 2402 for (i = 0, n = 0; i < num_regs; i++, n++) { 2403 unsigned int reg = regs[i].reg; 2404 struct regmap_range_node *range; 2405 2406 range = _regmap_range_lookup(map, reg); 2407 if (range) { 2408 unsigned int win_page = _regmap_register_page(map, reg, 2409 range); 2410 2411 if (i == 0) 2412 this_page = win_page; 2413 if (win_page != this_page) { 2414 this_page = win_page; 2415 page_change = 1; 2416 } 2417 } 2418 2419 /* If we have both a page change and a delay make sure to 2420 * write the regs and apply the delay before we change the 2421 * page. 2422 */ 2423 2424 if (page_change || regs[i].delay_us) { 2425 2426 /* For situations where the first write requires 2427 * a delay we need to make sure we don't call 2428 * raw_multi_reg_write with n=0 2429 * This can't occur with page breaks as we 2430 * never write on the first iteration 2431 */ 2432 if (regs[i].delay_us && i == 0) 2433 n = 1; 2434 2435 ret = _regmap_raw_multi_reg_write(map, base, n); 2436 if (ret != 0) 2437 return ret; 2438 2439 if (regs[i].delay_us) { 2440 if (map->can_sleep) 2441 fsleep(regs[i].delay_us); 2442 else 2443 udelay(regs[i].delay_us); 2444 } 2445 2446 base += n; 2447 n = 0; 2448 2449 if (page_change) { 2450 ret = _regmap_select_page(map, 2451 &base[n].reg, 2452 range, 1); 2453 if (ret != 0) 2454 return ret; 2455 2456 page_change = 0; 2457 } 2458 2459 } 2460 2461 } 2462 if (n > 0) 2463 return _regmap_raw_multi_reg_write(map, base, n); 2464 return 0; 2465 } 2466 2467 static int _regmap_multi_reg_write(struct regmap *map, 2468 const struct reg_sequence *regs, 2469 size_t num_regs) 2470 { 2471 int i; 2472 int ret; 2473 2474 if (!map->can_multi_write) { 2475 for (i = 0; i < num_regs; i++) { 2476 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2477 if (ret != 0) 2478 return ret; 2479 2480 if (regs[i].delay_us) { 2481 if (map->can_sleep) 2482 fsleep(regs[i].delay_us); 2483 else 2484 udelay(regs[i].delay_us); 2485 } 2486 } 2487 return 0; 2488 } 2489 2490 if (!map->format.parse_inplace) 2491 return -EINVAL; 2492 2493 if (map->writeable_reg) 2494 for (i = 0; i < num_regs; i++) { 2495 int reg = regs[i].reg; 2496 if (!map->writeable_reg(map->dev, reg)) 2497 return -EINVAL; 2498 if (!IS_ALIGNED(reg, map->reg_stride)) 2499 return -EINVAL; 2500 } 2501 2502 if (!map->cache_bypass) { 2503 for (i = 0; i < num_regs; i++) { 2504 unsigned int val = regs[i].def; 2505 unsigned int reg = regs[i].reg; 2506 ret = regcache_write(map, reg, val); 2507 if (ret) { 2508 dev_err(map->dev, 2509 "Error in caching of register: %x ret: %d\n", 2510 reg, ret); 2511 return ret; 2512 } 2513 } 2514 if (map->cache_only) { 2515 map->cache_dirty = true; 2516 return 0; 2517 } 2518 } 2519 2520 WARN_ON(!map->bus); 2521 2522 for (i = 0; i < num_regs; i++) { 2523 unsigned int reg = regs[i].reg; 2524 struct regmap_range_node *range; 2525 2526 /* Coalesce all the writes between a page break or a delay 2527 * in a sequence 2528 */ 2529 range = _regmap_range_lookup(map, reg); 2530 if (range || regs[i].delay_us) { 2531 size_t len = sizeof(struct reg_sequence)*num_regs; 2532 struct reg_sequence *base = kmemdup(regs, len, 2533 GFP_KERNEL); 2534 if (!base) 2535 return -ENOMEM; 2536 ret = _regmap_range_multi_paged_reg_write(map, base, 2537 num_regs); 2538 kfree(base); 2539 2540 return ret; 2541 } 2542 } 2543 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2544 } 2545 2546 /** 2547 * regmap_multi_reg_write() - Write multiple registers to the device 2548 * 2549 * @map: Register map to write to 2550 * @regs: Array of structures containing register,value to be written 2551 * @num_regs: Number of registers to write 2552 * 2553 * Write multiple registers to the device where the set of register, value 2554 * pairs are supplied in any order, possibly not all in a single range. 2555 * 2556 * The 'normal' block write mode will send ultimately send data on the 2557 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2558 * addressed. However, this alternative block multi write mode will send 2559 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2560 * must of course support the mode. 2561 * 2562 * A value of zero will be returned on success, a negative errno will be 2563 * returned in error cases. 2564 */ 2565 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2566 int num_regs) 2567 { 2568 int ret; 2569 2570 map->lock(map->lock_arg); 2571 2572 ret = _regmap_multi_reg_write(map, regs, num_regs); 2573 2574 map->unlock(map->lock_arg); 2575 2576 return ret; 2577 } 2578 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2579 2580 /** 2581 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2582 * device but not the cache 2583 * 2584 * @map: Register map to write to 2585 * @regs: Array of structures containing register,value to be written 2586 * @num_regs: Number of registers to write 2587 * 2588 * Write multiple registers to the device but not the cache where the set 2589 * of register are supplied in any order. 2590 * 2591 * This function is intended to be used for writing a large block of data 2592 * atomically to the device in single transfer for those I2C client devices 2593 * that implement this alternative block write mode. 2594 * 2595 * A value of zero will be returned on success, a negative errno will 2596 * be returned in error cases. 2597 */ 2598 int regmap_multi_reg_write_bypassed(struct regmap *map, 2599 const struct reg_sequence *regs, 2600 int num_regs) 2601 { 2602 int ret; 2603 bool bypass; 2604 2605 map->lock(map->lock_arg); 2606 2607 bypass = map->cache_bypass; 2608 map->cache_bypass = true; 2609 2610 ret = _regmap_multi_reg_write(map, regs, num_regs); 2611 2612 map->cache_bypass = bypass; 2613 2614 map->unlock(map->lock_arg); 2615 2616 return ret; 2617 } 2618 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2619 2620 /** 2621 * regmap_raw_write_async() - Write raw values to one or more registers 2622 * asynchronously 2623 * 2624 * @map: Register map to write to 2625 * @reg: Initial register to write to 2626 * @val: Block of data to be written, laid out for direct transmission to the 2627 * device. Must be valid until regmap_async_complete() is called. 2628 * @val_len: Length of data pointed to by val. 2629 * 2630 * This function is intended to be used for things like firmware 2631 * download where a large block of data needs to be transferred to the 2632 * device. No formatting will be done on the data provided. 2633 * 2634 * If supported by the underlying bus the write will be scheduled 2635 * asynchronously, helping maximise I/O speed on higher speed buses 2636 * like SPI. regmap_async_complete() can be called to ensure that all 2637 * asynchrnous writes have been completed. 2638 * 2639 * A value of zero will be returned on success, a negative errno will 2640 * be returned in error cases. 2641 */ 2642 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2643 const void *val, size_t val_len) 2644 { 2645 int ret; 2646 2647 if (val_len % map->format.val_bytes) 2648 return -EINVAL; 2649 if (!IS_ALIGNED(reg, map->reg_stride)) 2650 return -EINVAL; 2651 2652 map->lock(map->lock_arg); 2653 2654 map->async = true; 2655 2656 ret = _regmap_raw_write(map, reg, val, val_len, false); 2657 2658 map->async = false; 2659 2660 map->unlock(map->lock_arg); 2661 2662 return ret; 2663 } 2664 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2665 2666 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2667 unsigned int val_len, bool noinc) 2668 { 2669 struct regmap_range_node *range; 2670 int ret; 2671 2672 WARN_ON(!map->bus); 2673 2674 if (!map->bus || !map->bus->read) 2675 return -EINVAL; 2676 2677 range = _regmap_range_lookup(map, reg); 2678 if (range) { 2679 ret = _regmap_select_page(map, ®, range, 2680 noinc ? 1 : val_len / map->format.val_bytes); 2681 if (ret != 0) 2682 return ret; 2683 } 2684 2685 reg += map->reg_base; 2686 reg >>= map->format.reg_downshift; 2687 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2688 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2689 map->read_flag_mask); 2690 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2691 2692 ret = map->bus->read(map->bus_context, map->work_buf, 2693 map->format.reg_bytes + map->format.pad_bytes, 2694 val, val_len); 2695 2696 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2697 2698 return ret; 2699 } 2700 2701 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2702 unsigned int *val) 2703 { 2704 struct regmap *map = context; 2705 2706 return map->bus->reg_read(map->bus_context, reg, val); 2707 } 2708 2709 static int _regmap_bus_read(void *context, unsigned int reg, 2710 unsigned int *val) 2711 { 2712 int ret; 2713 struct regmap *map = context; 2714 void *work_val = map->work_buf + map->format.reg_bytes + 2715 map->format.pad_bytes; 2716 2717 if (!map->format.parse_val) 2718 return -EINVAL; 2719 2720 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); 2721 if (ret == 0) 2722 *val = map->format.parse_val(work_val); 2723 2724 return ret; 2725 } 2726 2727 static int _regmap_read(struct regmap *map, unsigned int reg, 2728 unsigned int *val) 2729 { 2730 int ret; 2731 void *context = _regmap_map_get_context(map); 2732 2733 if (!map->cache_bypass) { 2734 ret = regcache_read(map, reg, val); 2735 if (ret == 0) 2736 return 0; 2737 } 2738 2739 if (map->cache_only) 2740 return -EBUSY; 2741 2742 if (!regmap_readable(map, reg)) 2743 return -EIO; 2744 2745 ret = map->reg_read(context, reg, val); 2746 if (ret == 0) { 2747 if (regmap_should_log(map)) 2748 dev_info(map->dev, "%x => %x\n", reg, *val); 2749 2750 trace_regmap_reg_read(map, reg, *val); 2751 2752 if (!map->cache_bypass) 2753 regcache_write(map, reg, *val); 2754 } 2755 2756 return ret; 2757 } 2758 2759 /** 2760 * regmap_read() - Read a value from a single register 2761 * 2762 * @map: Register map to read from 2763 * @reg: Register to be read from 2764 * @val: Pointer to store read value 2765 * 2766 * A value of zero will be returned on success, a negative errno will 2767 * be returned in error cases. 2768 */ 2769 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2770 { 2771 int ret; 2772 2773 if (!IS_ALIGNED(reg, map->reg_stride)) 2774 return -EINVAL; 2775 2776 map->lock(map->lock_arg); 2777 2778 ret = _regmap_read(map, reg, val); 2779 2780 map->unlock(map->lock_arg); 2781 2782 return ret; 2783 } 2784 EXPORT_SYMBOL_GPL(regmap_read); 2785 2786 /** 2787 * regmap_raw_read() - Read raw data from the device 2788 * 2789 * @map: Register map to read from 2790 * @reg: First register to be read from 2791 * @val: Pointer to store read value 2792 * @val_len: Size of data to read 2793 * 2794 * A value of zero will be returned on success, a negative errno will 2795 * be returned in error cases. 2796 */ 2797 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2798 size_t val_len) 2799 { 2800 size_t val_bytes = map->format.val_bytes; 2801 size_t val_count = val_len / val_bytes; 2802 unsigned int v; 2803 int ret, i; 2804 2805 if (!map->bus) 2806 return -EINVAL; 2807 if (val_len % map->format.val_bytes) 2808 return -EINVAL; 2809 if (!IS_ALIGNED(reg, map->reg_stride)) 2810 return -EINVAL; 2811 if (val_count == 0) 2812 return -EINVAL; 2813 2814 map->lock(map->lock_arg); 2815 2816 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2817 map->cache_type == REGCACHE_NONE) { 2818 size_t chunk_count, chunk_bytes; 2819 size_t chunk_regs = val_count; 2820 2821 if (!map->bus->read) { 2822 ret = -ENOTSUPP; 2823 goto out; 2824 } 2825 2826 if (map->use_single_read) 2827 chunk_regs = 1; 2828 else if (map->max_raw_read && val_len > map->max_raw_read) 2829 chunk_regs = map->max_raw_read / val_bytes; 2830 2831 chunk_count = val_count / chunk_regs; 2832 chunk_bytes = chunk_regs * val_bytes; 2833 2834 /* Read bytes that fit into whole chunks */ 2835 for (i = 0; i < chunk_count; i++) { 2836 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); 2837 if (ret != 0) 2838 goto out; 2839 2840 reg += regmap_get_offset(map, chunk_regs); 2841 val += chunk_bytes; 2842 val_len -= chunk_bytes; 2843 } 2844 2845 /* Read remaining bytes */ 2846 if (val_len) { 2847 ret = _regmap_raw_read(map, reg, val, val_len, false); 2848 if (ret != 0) 2849 goto out; 2850 } 2851 } else { 2852 /* Otherwise go word by word for the cache; should be low 2853 * cost as we expect to hit the cache. 2854 */ 2855 for (i = 0; i < val_count; i++) { 2856 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2857 &v); 2858 if (ret != 0) 2859 goto out; 2860 2861 map->format.format_val(val + (i * val_bytes), v, 0); 2862 } 2863 } 2864 2865 out: 2866 map->unlock(map->lock_arg); 2867 2868 return ret; 2869 } 2870 EXPORT_SYMBOL_GPL(regmap_raw_read); 2871 2872 /** 2873 * regmap_noinc_read(): Read data from a register without incrementing the 2874 * register number 2875 * 2876 * @map: Register map to read from 2877 * @reg: Register to read from 2878 * @val: Pointer to data buffer 2879 * @val_len: Length of output buffer in bytes. 2880 * 2881 * The regmap API usually assumes that bulk bus read operations will read a 2882 * range of registers. Some devices have certain registers for which a read 2883 * operation read will read from an internal FIFO. 2884 * 2885 * The target register must be volatile but registers after it can be 2886 * completely unrelated cacheable registers. 2887 * 2888 * This will attempt multiple reads as required to read val_len bytes. 2889 * 2890 * A value of zero will be returned on success, a negative errno will be 2891 * returned in error cases. 2892 */ 2893 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2894 void *val, size_t val_len) 2895 { 2896 size_t read_len; 2897 int ret; 2898 2899 if (!map->bus) 2900 return -EINVAL; 2901 if (!map->bus->read) 2902 return -ENOTSUPP; 2903 if (val_len % map->format.val_bytes) 2904 return -EINVAL; 2905 if (!IS_ALIGNED(reg, map->reg_stride)) 2906 return -EINVAL; 2907 if (val_len == 0) 2908 return -EINVAL; 2909 2910 map->lock(map->lock_arg); 2911 2912 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2913 ret = -EINVAL; 2914 goto out_unlock; 2915 } 2916 2917 while (val_len) { 2918 if (map->max_raw_read && map->max_raw_read < val_len) 2919 read_len = map->max_raw_read; 2920 else 2921 read_len = val_len; 2922 ret = _regmap_raw_read(map, reg, val, read_len, true); 2923 if (ret) 2924 goto out_unlock; 2925 val = ((u8 *)val) + read_len; 2926 val_len -= read_len; 2927 } 2928 2929 out_unlock: 2930 map->unlock(map->lock_arg); 2931 return ret; 2932 } 2933 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2934 2935 /** 2936 * regmap_field_read(): Read a value to a single register field 2937 * 2938 * @field: Register field to read from 2939 * @val: Pointer to store read value 2940 * 2941 * A value of zero will be returned on success, a negative errno will 2942 * be returned in error cases. 2943 */ 2944 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2945 { 2946 int ret; 2947 unsigned int reg_val; 2948 ret = regmap_read(field->regmap, field->reg, ®_val); 2949 if (ret != 0) 2950 return ret; 2951 2952 reg_val &= field->mask; 2953 reg_val >>= field->shift; 2954 *val = reg_val; 2955 2956 return ret; 2957 } 2958 EXPORT_SYMBOL_GPL(regmap_field_read); 2959 2960 /** 2961 * regmap_fields_read() - Read a value to a single register field with port ID 2962 * 2963 * @field: Register field to read from 2964 * @id: port ID 2965 * @val: Pointer to store read value 2966 * 2967 * A value of zero will be returned on success, a negative errno will 2968 * be returned in error cases. 2969 */ 2970 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2971 unsigned int *val) 2972 { 2973 int ret; 2974 unsigned int reg_val; 2975 2976 if (id >= field->id_size) 2977 return -EINVAL; 2978 2979 ret = regmap_read(field->regmap, 2980 field->reg + (field->id_offset * id), 2981 ®_val); 2982 if (ret != 0) 2983 return ret; 2984 2985 reg_val &= field->mask; 2986 reg_val >>= field->shift; 2987 *val = reg_val; 2988 2989 return ret; 2990 } 2991 EXPORT_SYMBOL_GPL(regmap_fields_read); 2992 2993 /** 2994 * regmap_bulk_read() - Read multiple registers from the device 2995 * 2996 * @map: Register map to read from 2997 * @reg: First register to be read from 2998 * @val: Pointer to store read value, in native register size for device 2999 * @val_count: Number of registers to read 3000 * 3001 * A value of zero will be returned on success, a negative errno will 3002 * be returned in error cases. 3003 */ 3004 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 3005 size_t val_count) 3006 { 3007 int ret, i; 3008 size_t val_bytes = map->format.val_bytes; 3009 bool vol = regmap_volatile_range(map, reg, val_count); 3010 3011 if (!IS_ALIGNED(reg, map->reg_stride)) 3012 return -EINVAL; 3013 if (val_count == 0) 3014 return -EINVAL; 3015 3016 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 3017 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 3018 if (ret != 0) 3019 return ret; 3020 3021 for (i = 0; i < val_count * val_bytes; i += val_bytes) 3022 map->format.parse_inplace(val + i); 3023 } else { 3024 #ifdef CONFIG_64BIT 3025 u64 *u64 = val; 3026 #endif 3027 u32 *u32 = val; 3028 u16 *u16 = val; 3029 u8 *u8 = val; 3030 3031 map->lock(map->lock_arg); 3032 3033 for (i = 0; i < val_count; i++) { 3034 unsigned int ival; 3035 3036 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 3037 &ival); 3038 if (ret != 0) 3039 goto out; 3040 3041 switch (map->format.val_bytes) { 3042 #ifdef CONFIG_64BIT 3043 case 8: 3044 u64[i] = ival; 3045 break; 3046 #endif 3047 case 4: 3048 u32[i] = ival; 3049 break; 3050 case 2: 3051 u16[i] = ival; 3052 break; 3053 case 1: 3054 u8[i] = ival; 3055 break; 3056 default: 3057 ret = -EINVAL; 3058 goto out; 3059 } 3060 } 3061 3062 out: 3063 map->unlock(map->lock_arg); 3064 } 3065 3066 return ret; 3067 } 3068 EXPORT_SYMBOL_GPL(regmap_bulk_read); 3069 3070 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 3071 unsigned int mask, unsigned int val, 3072 bool *change, bool force_write) 3073 { 3074 int ret; 3075 unsigned int tmp, orig; 3076 3077 if (change) 3078 *change = false; 3079 3080 if (regmap_volatile(map, reg) && map->reg_update_bits) { 3081 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 3082 if (ret == 0 && change) 3083 *change = true; 3084 } else { 3085 ret = _regmap_read(map, reg, &orig); 3086 if (ret != 0) 3087 return ret; 3088 3089 tmp = orig & ~mask; 3090 tmp |= val & mask; 3091 3092 if (force_write || (tmp != orig)) { 3093 ret = _regmap_write(map, reg, tmp); 3094 if (ret == 0 && change) 3095 *change = true; 3096 } 3097 } 3098 3099 return ret; 3100 } 3101 3102 /** 3103 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 3104 * 3105 * @map: Register map to update 3106 * @reg: Register to update 3107 * @mask: Bitmask to change 3108 * @val: New value for bitmask 3109 * @change: Boolean indicating if a write was done 3110 * @async: Boolean indicating asynchronously 3111 * @force: Boolean indicating use force update 3112 * 3113 * Perform a read/modify/write cycle on a register map with change, async, force 3114 * options. 3115 * 3116 * If async is true: 3117 * 3118 * With most buses the read must be done synchronously so this is most useful 3119 * for devices with a cache which do not need to interact with the hardware to 3120 * determine the current register value. 3121 * 3122 * Returns zero for success, a negative number on error. 3123 */ 3124 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 3125 unsigned int mask, unsigned int val, 3126 bool *change, bool async, bool force) 3127 { 3128 int ret; 3129 3130 map->lock(map->lock_arg); 3131 3132 map->async = async; 3133 3134 ret = _regmap_update_bits(map, reg, mask, val, change, force); 3135 3136 map->async = false; 3137 3138 map->unlock(map->lock_arg); 3139 3140 return ret; 3141 } 3142 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 3143 3144 /** 3145 * regmap_test_bits() - Check if all specified bits are set in a register. 3146 * 3147 * @map: Register map to operate on 3148 * @reg: Register to read from 3149 * @bits: Bits to test 3150 * 3151 * Returns 0 if at least one of the tested bits is not set, 1 if all tested 3152 * bits are set and a negative error number if the underlying regmap_read() 3153 * fails. 3154 */ 3155 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 3156 { 3157 unsigned int val, ret; 3158 3159 ret = regmap_read(map, reg, &val); 3160 if (ret) 3161 return ret; 3162 3163 return (val & bits) == bits; 3164 } 3165 EXPORT_SYMBOL_GPL(regmap_test_bits); 3166 3167 void regmap_async_complete_cb(struct regmap_async *async, int ret) 3168 { 3169 struct regmap *map = async->map; 3170 bool wake; 3171 3172 trace_regmap_async_io_complete(map); 3173 3174 spin_lock(&map->async_lock); 3175 list_move(&async->list, &map->async_free); 3176 wake = list_empty(&map->async_list); 3177 3178 if (ret != 0) 3179 map->async_ret = ret; 3180 3181 spin_unlock(&map->async_lock); 3182 3183 if (wake) 3184 wake_up(&map->async_waitq); 3185 } 3186 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 3187 3188 static int regmap_async_is_done(struct regmap *map) 3189 { 3190 unsigned long flags; 3191 int ret; 3192 3193 spin_lock_irqsave(&map->async_lock, flags); 3194 ret = list_empty(&map->async_list); 3195 spin_unlock_irqrestore(&map->async_lock, flags); 3196 3197 return ret; 3198 } 3199 3200 /** 3201 * regmap_async_complete - Ensure all asynchronous I/O has completed. 3202 * 3203 * @map: Map to operate on. 3204 * 3205 * Blocks until any pending asynchronous I/O has completed. Returns 3206 * an error code for any failed I/O operations. 3207 */ 3208 int regmap_async_complete(struct regmap *map) 3209 { 3210 unsigned long flags; 3211 int ret; 3212 3213 /* Nothing to do with no async support */ 3214 if (!map->bus || !map->bus->async_write) 3215 return 0; 3216 3217 trace_regmap_async_complete_start(map); 3218 3219 wait_event(map->async_waitq, regmap_async_is_done(map)); 3220 3221 spin_lock_irqsave(&map->async_lock, flags); 3222 ret = map->async_ret; 3223 map->async_ret = 0; 3224 spin_unlock_irqrestore(&map->async_lock, flags); 3225 3226 trace_regmap_async_complete_done(map); 3227 3228 return ret; 3229 } 3230 EXPORT_SYMBOL_GPL(regmap_async_complete); 3231 3232 /** 3233 * regmap_register_patch - Register and apply register updates to be applied 3234 * on device initialistion 3235 * 3236 * @map: Register map to apply updates to. 3237 * @regs: Values to update. 3238 * @num_regs: Number of entries in regs. 3239 * 3240 * Register a set of register updates to be applied to the device 3241 * whenever the device registers are synchronised with the cache and 3242 * apply them immediately. Typically this is used to apply 3243 * corrections to be applied to the device defaults on startup, such 3244 * as the updates some vendors provide to undocumented registers. 3245 * 3246 * The caller must ensure that this function cannot be called 3247 * concurrently with either itself or regcache_sync(). 3248 */ 3249 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3250 int num_regs) 3251 { 3252 struct reg_sequence *p; 3253 int ret; 3254 bool bypass; 3255 3256 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3257 num_regs)) 3258 return 0; 3259 3260 p = krealloc(map->patch, 3261 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3262 GFP_KERNEL); 3263 if (p) { 3264 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3265 map->patch = p; 3266 map->patch_regs += num_regs; 3267 } else { 3268 return -ENOMEM; 3269 } 3270 3271 map->lock(map->lock_arg); 3272 3273 bypass = map->cache_bypass; 3274 3275 map->cache_bypass = true; 3276 map->async = true; 3277 3278 ret = _regmap_multi_reg_write(map, regs, num_regs); 3279 3280 map->async = false; 3281 map->cache_bypass = bypass; 3282 3283 map->unlock(map->lock_arg); 3284 3285 regmap_async_complete(map); 3286 3287 return ret; 3288 } 3289 EXPORT_SYMBOL_GPL(regmap_register_patch); 3290 3291 /** 3292 * regmap_get_val_bytes() - Report the size of a register value 3293 * 3294 * @map: Register map to operate on. 3295 * 3296 * Report the size of a register value, mainly intended to for use by 3297 * generic infrastructure built on top of regmap. 3298 */ 3299 int regmap_get_val_bytes(struct regmap *map) 3300 { 3301 if (map->format.format_write) 3302 return -EINVAL; 3303 3304 return map->format.val_bytes; 3305 } 3306 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3307 3308 /** 3309 * regmap_get_max_register() - Report the max register value 3310 * 3311 * @map: Register map to operate on. 3312 * 3313 * Report the max register value, mainly intended to for use by 3314 * generic infrastructure built on top of regmap. 3315 */ 3316 int regmap_get_max_register(struct regmap *map) 3317 { 3318 return map->max_register ? map->max_register : -EINVAL; 3319 } 3320 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3321 3322 /** 3323 * regmap_get_reg_stride() - Report the register address stride 3324 * 3325 * @map: Register map to operate on. 3326 * 3327 * Report the register address stride, mainly intended to for use by 3328 * generic infrastructure built on top of regmap. 3329 */ 3330 int regmap_get_reg_stride(struct regmap *map) 3331 { 3332 return map->reg_stride; 3333 } 3334 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3335 3336 int regmap_parse_val(struct regmap *map, const void *buf, 3337 unsigned int *val) 3338 { 3339 if (!map->format.parse_val) 3340 return -EINVAL; 3341 3342 *val = map->format.parse_val(buf); 3343 3344 return 0; 3345 } 3346 EXPORT_SYMBOL_GPL(regmap_parse_val); 3347 3348 static int __init regmap_initcall(void) 3349 { 3350 regmap_debugfs_initcall(); 3351 3352 return 0; 3353 } 3354 postcore_initcall(regmap_initcall); 3355