1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/property.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 #include <asm/unaligned.h> 21 22 #define CREATE_TRACE_POINTS 23 #include "trace.h" 24 25 #include "internal.h" 26 27 /* 28 * Sometimes for failures during very early init the trace 29 * infrastructure isn't available early enough to be used. For this 30 * sort of problem defining LOG_DEVICE will add printks for basic 31 * register I/O on a specific device. 32 */ 33 #undef LOG_DEVICE 34 35 #ifdef LOG_DEVICE 36 static inline bool regmap_should_log(struct regmap *map) 37 { 38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 39 } 40 #else 41 static inline bool regmap_should_log(struct regmap *map) { return false; } 42 #endif 43 44 45 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 46 unsigned int mask, unsigned int val, 47 bool *change, bool force_write); 48 49 static int _regmap_bus_reg_read(void *context, unsigned int reg, 50 unsigned int *val); 51 static int _regmap_bus_read(void *context, unsigned int reg, 52 unsigned int *val); 53 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 54 unsigned int val); 55 static int _regmap_bus_reg_write(void *context, unsigned int reg, 56 unsigned int val); 57 static int _regmap_bus_raw_write(void *context, unsigned int reg, 58 unsigned int val); 59 60 bool regmap_reg_in_ranges(unsigned int reg, 61 const struct regmap_range *ranges, 62 unsigned int nranges) 63 { 64 const struct regmap_range *r; 65 int i; 66 67 for (i = 0, r = ranges; i < nranges; i++, r++) 68 if (regmap_reg_in_range(reg, r)) 69 return true; 70 return false; 71 } 72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 73 74 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 75 const struct regmap_access_table *table) 76 { 77 /* Check "no ranges" first */ 78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 79 return false; 80 81 /* In case zero "yes ranges" are supplied, any reg is OK */ 82 if (!table->n_yes_ranges) 83 return true; 84 85 return regmap_reg_in_ranges(reg, table->yes_ranges, 86 table->n_yes_ranges); 87 } 88 EXPORT_SYMBOL_GPL(regmap_check_range_table); 89 90 bool regmap_writeable(struct regmap *map, unsigned int reg) 91 { 92 if (map->max_register && reg > map->max_register) 93 return false; 94 95 if (map->writeable_reg) 96 return map->writeable_reg(map->dev, reg); 97 98 if (map->wr_table) 99 return regmap_check_range_table(map, reg, map->wr_table); 100 101 return true; 102 } 103 104 bool regmap_cached(struct regmap *map, unsigned int reg) 105 { 106 int ret; 107 unsigned int val; 108 109 if (map->cache_type == REGCACHE_NONE) 110 return false; 111 112 if (!map->cache_ops) 113 return false; 114 115 if (map->max_register && reg > map->max_register) 116 return false; 117 118 map->lock(map->lock_arg); 119 ret = regcache_read(map, reg, &val); 120 map->unlock(map->lock_arg); 121 if (ret) 122 return false; 123 124 return true; 125 } 126 127 bool regmap_readable(struct regmap *map, unsigned int reg) 128 { 129 if (!map->reg_read) 130 return false; 131 132 if (map->max_register && reg > map->max_register) 133 return false; 134 135 if (map->format.format_write) 136 return false; 137 138 if (map->readable_reg) 139 return map->readable_reg(map->dev, reg); 140 141 if (map->rd_table) 142 return regmap_check_range_table(map, reg, map->rd_table); 143 144 return true; 145 } 146 147 bool regmap_volatile(struct regmap *map, unsigned int reg) 148 { 149 if (!map->format.format_write && !regmap_readable(map, reg)) 150 return false; 151 152 if (map->volatile_reg) 153 return map->volatile_reg(map->dev, reg); 154 155 if (map->volatile_table) 156 return regmap_check_range_table(map, reg, map->volatile_table); 157 158 if (map->cache_ops) 159 return false; 160 else 161 return true; 162 } 163 164 bool regmap_precious(struct regmap *map, unsigned int reg) 165 { 166 if (!regmap_readable(map, reg)) 167 return false; 168 169 if (map->precious_reg) 170 return map->precious_reg(map->dev, reg); 171 172 if (map->precious_table) 173 return regmap_check_range_table(map, reg, map->precious_table); 174 175 return false; 176 } 177 178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 179 { 180 if (map->writeable_noinc_reg) 181 return map->writeable_noinc_reg(map->dev, reg); 182 183 if (map->wr_noinc_table) 184 return regmap_check_range_table(map, reg, map->wr_noinc_table); 185 186 return true; 187 } 188 189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 190 { 191 if (map->readable_noinc_reg) 192 return map->readable_noinc_reg(map->dev, reg); 193 194 if (map->rd_noinc_table) 195 return regmap_check_range_table(map, reg, map->rd_noinc_table); 196 197 return true; 198 } 199 200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 201 size_t num) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num; i++) 206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 207 return false; 208 209 return true; 210 } 211 212 static void regmap_format_12_20_write(struct regmap *map, 213 unsigned int reg, unsigned int val) 214 { 215 u8 *out = map->work_buf; 216 217 out[0] = reg >> 4; 218 out[1] = (reg << 4) | (val >> 16); 219 out[2] = val >> 8; 220 out[3] = val; 221 } 222 223 224 static void regmap_format_2_6_write(struct regmap *map, 225 unsigned int reg, unsigned int val) 226 { 227 u8 *out = map->work_buf; 228 229 *out = (reg << 6) | val; 230 } 231 232 static void regmap_format_4_12_write(struct regmap *map, 233 unsigned int reg, unsigned int val) 234 { 235 __be16 *out = map->work_buf; 236 *out = cpu_to_be16((reg << 12) | val); 237 } 238 239 static void regmap_format_7_9_write(struct regmap *map, 240 unsigned int reg, unsigned int val) 241 { 242 __be16 *out = map->work_buf; 243 *out = cpu_to_be16((reg << 9) | val); 244 } 245 246 static void regmap_format_10_14_write(struct regmap *map, 247 unsigned int reg, unsigned int val) 248 { 249 u8 *out = map->work_buf; 250 251 out[2] = val; 252 out[1] = (val >> 8) | (reg << 6); 253 out[0] = reg >> 2; 254 } 255 256 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 257 { 258 u8 *b = buf; 259 260 b[0] = val << shift; 261 } 262 263 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 264 { 265 put_unaligned_be16(val << shift, buf); 266 } 267 268 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 269 { 270 put_unaligned_le16(val << shift, buf); 271 } 272 273 static void regmap_format_16_native(void *buf, unsigned int val, 274 unsigned int shift) 275 { 276 u16 v = val << shift; 277 278 memcpy(buf, &v, sizeof(v)); 279 } 280 281 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 282 { 283 u8 *b = buf; 284 285 val <<= shift; 286 287 b[0] = val >> 16; 288 b[1] = val >> 8; 289 b[2] = val; 290 } 291 292 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 293 { 294 put_unaligned_be32(val << shift, buf); 295 } 296 297 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 298 { 299 put_unaligned_le32(val << shift, buf); 300 } 301 302 static void regmap_format_32_native(void *buf, unsigned int val, 303 unsigned int shift) 304 { 305 u32 v = val << shift; 306 307 memcpy(buf, &v, sizeof(v)); 308 } 309 310 #ifdef CONFIG_64BIT 311 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 312 { 313 put_unaligned_be64((u64) val << shift, buf); 314 } 315 316 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 317 { 318 put_unaligned_le64((u64) val << shift, buf); 319 } 320 321 static void regmap_format_64_native(void *buf, unsigned int val, 322 unsigned int shift) 323 { 324 u64 v = (u64) val << shift; 325 326 memcpy(buf, &v, sizeof(v)); 327 } 328 #endif 329 330 static void regmap_parse_inplace_noop(void *buf) 331 { 332 } 333 334 static unsigned int regmap_parse_8(const void *buf) 335 { 336 const u8 *b = buf; 337 338 return b[0]; 339 } 340 341 static unsigned int regmap_parse_16_be(const void *buf) 342 { 343 return get_unaligned_be16(buf); 344 } 345 346 static unsigned int regmap_parse_16_le(const void *buf) 347 { 348 return get_unaligned_le16(buf); 349 } 350 351 static void regmap_parse_16_be_inplace(void *buf) 352 { 353 u16 v = get_unaligned_be16(buf); 354 355 memcpy(buf, &v, sizeof(v)); 356 } 357 358 static void regmap_parse_16_le_inplace(void *buf) 359 { 360 u16 v = get_unaligned_le16(buf); 361 362 memcpy(buf, &v, sizeof(v)); 363 } 364 365 static unsigned int regmap_parse_16_native(const void *buf) 366 { 367 u16 v; 368 369 memcpy(&v, buf, sizeof(v)); 370 return v; 371 } 372 373 static unsigned int regmap_parse_24(const void *buf) 374 { 375 const u8 *b = buf; 376 unsigned int ret = b[2]; 377 ret |= ((unsigned int)b[1]) << 8; 378 ret |= ((unsigned int)b[0]) << 16; 379 380 return ret; 381 } 382 383 static unsigned int regmap_parse_32_be(const void *buf) 384 { 385 return get_unaligned_be32(buf); 386 } 387 388 static unsigned int regmap_parse_32_le(const void *buf) 389 { 390 return get_unaligned_le32(buf); 391 } 392 393 static void regmap_parse_32_be_inplace(void *buf) 394 { 395 u32 v = get_unaligned_be32(buf); 396 397 memcpy(buf, &v, sizeof(v)); 398 } 399 400 static void regmap_parse_32_le_inplace(void *buf) 401 { 402 u32 v = get_unaligned_le32(buf); 403 404 memcpy(buf, &v, sizeof(v)); 405 } 406 407 static unsigned int regmap_parse_32_native(const void *buf) 408 { 409 u32 v; 410 411 memcpy(&v, buf, sizeof(v)); 412 return v; 413 } 414 415 #ifdef CONFIG_64BIT 416 static unsigned int regmap_parse_64_be(const void *buf) 417 { 418 return get_unaligned_be64(buf); 419 } 420 421 static unsigned int regmap_parse_64_le(const void *buf) 422 { 423 return get_unaligned_le64(buf); 424 } 425 426 static void regmap_parse_64_be_inplace(void *buf) 427 { 428 u64 v = get_unaligned_be64(buf); 429 430 memcpy(buf, &v, sizeof(v)); 431 } 432 433 static void regmap_parse_64_le_inplace(void *buf) 434 { 435 u64 v = get_unaligned_le64(buf); 436 437 memcpy(buf, &v, sizeof(v)); 438 } 439 440 static unsigned int regmap_parse_64_native(const void *buf) 441 { 442 u64 v; 443 444 memcpy(&v, buf, sizeof(v)); 445 return v; 446 } 447 #endif 448 449 static void regmap_lock_hwlock(void *__map) 450 { 451 struct regmap *map = __map; 452 453 hwspin_lock_timeout(map->hwlock, UINT_MAX); 454 } 455 456 static void regmap_lock_hwlock_irq(void *__map) 457 { 458 struct regmap *map = __map; 459 460 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 461 } 462 463 static void regmap_lock_hwlock_irqsave(void *__map) 464 { 465 struct regmap *map = __map; 466 467 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 468 &map->spinlock_flags); 469 } 470 471 static void regmap_unlock_hwlock(void *__map) 472 { 473 struct regmap *map = __map; 474 475 hwspin_unlock(map->hwlock); 476 } 477 478 static void regmap_unlock_hwlock_irq(void *__map) 479 { 480 struct regmap *map = __map; 481 482 hwspin_unlock_irq(map->hwlock); 483 } 484 485 static void regmap_unlock_hwlock_irqrestore(void *__map) 486 { 487 struct regmap *map = __map; 488 489 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 490 } 491 492 static void regmap_lock_unlock_none(void *__map) 493 { 494 495 } 496 497 static void regmap_lock_mutex(void *__map) 498 { 499 struct regmap *map = __map; 500 mutex_lock(&map->mutex); 501 } 502 503 static void regmap_unlock_mutex(void *__map) 504 { 505 struct regmap *map = __map; 506 mutex_unlock(&map->mutex); 507 } 508 509 static void regmap_lock_spinlock(void *__map) 510 __acquires(&map->spinlock) 511 { 512 struct regmap *map = __map; 513 unsigned long flags; 514 515 spin_lock_irqsave(&map->spinlock, flags); 516 map->spinlock_flags = flags; 517 } 518 519 static void regmap_unlock_spinlock(void *__map) 520 __releases(&map->spinlock) 521 { 522 struct regmap *map = __map; 523 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 524 } 525 526 static void dev_get_regmap_release(struct device *dev, void *res) 527 { 528 /* 529 * We don't actually have anything to do here; the goal here 530 * is not to manage the regmap but to provide a simple way to 531 * get the regmap back given a struct device. 532 */ 533 } 534 535 static bool _regmap_range_add(struct regmap *map, 536 struct regmap_range_node *data) 537 { 538 struct rb_root *root = &map->range_tree; 539 struct rb_node **new = &(root->rb_node), *parent = NULL; 540 541 while (*new) { 542 struct regmap_range_node *this = 543 rb_entry(*new, struct regmap_range_node, node); 544 545 parent = *new; 546 if (data->range_max < this->range_min) 547 new = &((*new)->rb_left); 548 else if (data->range_min > this->range_max) 549 new = &((*new)->rb_right); 550 else 551 return false; 552 } 553 554 rb_link_node(&data->node, parent, new); 555 rb_insert_color(&data->node, root); 556 557 return true; 558 } 559 560 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 561 unsigned int reg) 562 { 563 struct rb_node *node = map->range_tree.rb_node; 564 565 while (node) { 566 struct regmap_range_node *this = 567 rb_entry(node, struct regmap_range_node, node); 568 569 if (reg < this->range_min) 570 node = node->rb_left; 571 else if (reg > this->range_max) 572 node = node->rb_right; 573 else 574 return this; 575 } 576 577 return NULL; 578 } 579 580 static void regmap_range_exit(struct regmap *map) 581 { 582 struct rb_node *next; 583 struct regmap_range_node *range_node; 584 585 next = rb_first(&map->range_tree); 586 while (next) { 587 range_node = rb_entry(next, struct regmap_range_node, node); 588 next = rb_next(&range_node->node); 589 rb_erase(&range_node->node, &map->range_tree); 590 kfree(range_node); 591 } 592 593 kfree(map->selector_work_buf); 594 } 595 596 static int regmap_set_name(struct regmap *map, const struct regmap_config *config) 597 { 598 if (config->name) { 599 const char *name = kstrdup_const(config->name, GFP_KERNEL); 600 601 if (!name) 602 return -ENOMEM; 603 604 kfree_const(map->name); 605 map->name = name; 606 } 607 608 return 0; 609 } 610 611 int regmap_attach_dev(struct device *dev, struct regmap *map, 612 const struct regmap_config *config) 613 { 614 struct regmap **m; 615 int ret; 616 617 map->dev = dev; 618 619 ret = regmap_set_name(map, config); 620 if (ret) 621 return ret; 622 623 regmap_debugfs_init(map); 624 625 /* Add a devres resource for dev_get_regmap() */ 626 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 627 if (!m) { 628 regmap_debugfs_exit(map); 629 return -ENOMEM; 630 } 631 *m = map; 632 devres_add(dev, m); 633 634 return 0; 635 } 636 EXPORT_SYMBOL_GPL(regmap_attach_dev); 637 638 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 639 const struct regmap_config *config) 640 { 641 enum regmap_endian endian; 642 643 /* Retrieve the endianness specification from the regmap config */ 644 endian = config->reg_format_endian; 645 646 /* If the regmap config specified a non-default value, use that */ 647 if (endian != REGMAP_ENDIAN_DEFAULT) 648 return endian; 649 650 /* Retrieve the endianness specification from the bus config */ 651 if (bus && bus->reg_format_endian_default) 652 endian = bus->reg_format_endian_default; 653 654 /* If the bus specified a non-default value, use that */ 655 if (endian != REGMAP_ENDIAN_DEFAULT) 656 return endian; 657 658 /* Use this if no other value was found */ 659 return REGMAP_ENDIAN_BIG; 660 } 661 662 enum regmap_endian regmap_get_val_endian(struct device *dev, 663 const struct regmap_bus *bus, 664 const struct regmap_config *config) 665 { 666 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; 667 enum regmap_endian endian; 668 669 /* Retrieve the endianness specification from the regmap config */ 670 endian = config->val_format_endian; 671 672 /* If the regmap config specified a non-default value, use that */ 673 if (endian != REGMAP_ENDIAN_DEFAULT) 674 return endian; 675 676 /* If the firmware node exist try to get endianness from it */ 677 if (fwnode_property_read_bool(fwnode, "big-endian")) 678 endian = REGMAP_ENDIAN_BIG; 679 else if (fwnode_property_read_bool(fwnode, "little-endian")) 680 endian = REGMAP_ENDIAN_LITTLE; 681 else if (fwnode_property_read_bool(fwnode, "native-endian")) 682 endian = REGMAP_ENDIAN_NATIVE; 683 684 /* If the endianness was specified in fwnode, use that */ 685 if (endian != REGMAP_ENDIAN_DEFAULT) 686 return endian; 687 688 /* Retrieve the endianness specification from the bus config */ 689 if (bus && bus->val_format_endian_default) 690 endian = bus->val_format_endian_default; 691 692 /* If the bus specified a non-default value, use that */ 693 if (endian != REGMAP_ENDIAN_DEFAULT) 694 return endian; 695 696 /* Use this if no other value was found */ 697 return REGMAP_ENDIAN_BIG; 698 } 699 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 700 701 struct regmap *__regmap_init(struct device *dev, 702 const struct regmap_bus *bus, 703 void *bus_context, 704 const struct regmap_config *config, 705 struct lock_class_key *lock_key, 706 const char *lock_name) 707 { 708 struct regmap *map; 709 int ret = -EINVAL; 710 enum regmap_endian reg_endian, val_endian; 711 int i, j; 712 713 if (!config) 714 goto err; 715 716 map = kzalloc(sizeof(*map), GFP_KERNEL); 717 if (map == NULL) { 718 ret = -ENOMEM; 719 goto err; 720 } 721 722 ret = regmap_set_name(map, config); 723 if (ret) 724 goto err_map; 725 726 ret = -EINVAL; /* Later error paths rely on this */ 727 728 if (config->disable_locking) { 729 map->lock = map->unlock = regmap_lock_unlock_none; 730 map->can_sleep = config->can_sleep; 731 regmap_debugfs_disable(map); 732 } else if (config->lock && config->unlock) { 733 map->lock = config->lock; 734 map->unlock = config->unlock; 735 map->lock_arg = config->lock_arg; 736 map->can_sleep = config->can_sleep; 737 } else if (config->use_hwlock) { 738 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 739 if (!map->hwlock) { 740 ret = -ENXIO; 741 goto err_name; 742 } 743 744 switch (config->hwlock_mode) { 745 case HWLOCK_IRQSTATE: 746 map->lock = regmap_lock_hwlock_irqsave; 747 map->unlock = regmap_unlock_hwlock_irqrestore; 748 break; 749 case HWLOCK_IRQ: 750 map->lock = regmap_lock_hwlock_irq; 751 map->unlock = regmap_unlock_hwlock_irq; 752 break; 753 default: 754 map->lock = regmap_lock_hwlock; 755 map->unlock = regmap_unlock_hwlock; 756 break; 757 } 758 759 map->lock_arg = map; 760 } else { 761 if ((bus && bus->fast_io) || 762 config->fast_io) { 763 spin_lock_init(&map->spinlock); 764 map->lock = regmap_lock_spinlock; 765 map->unlock = regmap_unlock_spinlock; 766 lockdep_set_class_and_name(&map->spinlock, 767 lock_key, lock_name); 768 } else { 769 mutex_init(&map->mutex); 770 map->lock = regmap_lock_mutex; 771 map->unlock = regmap_unlock_mutex; 772 map->can_sleep = true; 773 lockdep_set_class_and_name(&map->mutex, 774 lock_key, lock_name); 775 } 776 map->lock_arg = map; 777 } 778 779 /* 780 * When we write in fast-paths with regmap_bulk_write() don't allocate 781 * scratch buffers with sleeping allocations. 782 */ 783 if ((bus && bus->fast_io) || config->fast_io) 784 map->alloc_flags = GFP_ATOMIC; 785 else 786 map->alloc_flags = GFP_KERNEL; 787 788 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 789 map->format.pad_bytes = config->pad_bits / 8; 790 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 791 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 792 config->val_bits + config->pad_bits, 8); 793 map->reg_shift = config->pad_bits % 8; 794 if (config->reg_stride) 795 map->reg_stride = config->reg_stride; 796 else 797 map->reg_stride = 1; 798 if (is_power_of_2(map->reg_stride)) 799 map->reg_stride_order = ilog2(map->reg_stride); 800 else 801 map->reg_stride_order = -1; 802 map->use_single_read = config->use_single_read || !bus || !bus->read; 803 map->use_single_write = config->use_single_write || !bus || !bus->write; 804 map->can_multi_write = config->can_multi_write && bus && bus->write; 805 if (bus) { 806 map->max_raw_read = bus->max_raw_read; 807 map->max_raw_write = bus->max_raw_write; 808 } 809 map->dev = dev; 810 map->bus = bus; 811 map->bus_context = bus_context; 812 map->max_register = config->max_register; 813 map->wr_table = config->wr_table; 814 map->rd_table = config->rd_table; 815 map->volatile_table = config->volatile_table; 816 map->precious_table = config->precious_table; 817 map->wr_noinc_table = config->wr_noinc_table; 818 map->rd_noinc_table = config->rd_noinc_table; 819 map->writeable_reg = config->writeable_reg; 820 map->readable_reg = config->readable_reg; 821 map->volatile_reg = config->volatile_reg; 822 map->precious_reg = config->precious_reg; 823 map->writeable_noinc_reg = config->writeable_noinc_reg; 824 map->readable_noinc_reg = config->readable_noinc_reg; 825 map->cache_type = config->cache_type; 826 827 spin_lock_init(&map->async_lock); 828 INIT_LIST_HEAD(&map->async_list); 829 INIT_LIST_HEAD(&map->async_free); 830 init_waitqueue_head(&map->async_waitq); 831 832 if (config->read_flag_mask || 833 config->write_flag_mask || 834 config->zero_flag_mask) { 835 map->read_flag_mask = config->read_flag_mask; 836 map->write_flag_mask = config->write_flag_mask; 837 } else if (bus) { 838 map->read_flag_mask = bus->read_flag_mask; 839 } 840 841 if (!bus) { 842 map->reg_read = config->reg_read; 843 map->reg_write = config->reg_write; 844 845 map->defer_caching = false; 846 goto skip_format_initialization; 847 } else if (!bus->read || !bus->write) { 848 map->reg_read = _regmap_bus_reg_read; 849 map->reg_write = _regmap_bus_reg_write; 850 map->reg_update_bits = bus->reg_update_bits; 851 852 map->defer_caching = false; 853 goto skip_format_initialization; 854 } else { 855 map->reg_read = _regmap_bus_read; 856 map->reg_update_bits = bus->reg_update_bits; 857 } 858 859 reg_endian = regmap_get_reg_endian(bus, config); 860 val_endian = regmap_get_val_endian(dev, bus, config); 861 862 switch (config->reg_bits + map->reg_shift) { 863 case 2: 864 switch (config->val_bits) { 865 case 6: 866 map->format.format_write = regmap_format_2_6_write; 867 break; 868 default: 869 goto err_hwlock; 870 } 871 break; 872 873 case 4: 874 switch (config->val_bits) { 875 case 12: 876 map->format.format_write = regmap_format_4_12_write; 877 break; 878 default: 879 goto err_hwlock; 880 } 881 break; 882 883 case 7: 884 switch (config->val_bits) { 885 case 9: 886 map->format.format_write = regmap_format_7_9_write; 887 break; 888 default: 889 goto err_hwlock; 890 } 891 break; 892 893 case 10: 894 switch (config->val_bits) { 895 case 14: 896 map->format.format_write = regmap_format_10_14_write; 897 break; 898 default: 899 goto err_hwlock; 900 } 901 break; 902 903 case 12: 904 switch (config->val_bits) { 905 case 20: 906 map->format.format_write = regmap_format_12_20_write; 907 break; 908 default: 909 goto err_hwlock; 910 } 911 break; 912 913 case 8: 914 map->format.format_reg = regmap_format_8; 915 break; 916 917 case 16: 918 switch (reg_endian) { 919 case REGMAP_ENDIAN_BIG: 920 map->format.format_reg = regmap_format_16_be; 921 break; 922 case REGMAP_ENDIAN_LITTLE: 923 map->format.format_reg = regmap_format_16_le; 924 break; 925 case REGMAP_ENDIAN_NATIVE: 926 map->format.format_reg = regmap_format_16_native; 927 break; 928 default: 929 goto err_hwlock; 930 } 931 break; 932 933 case 24: 934 if (reg_endian != REGMAP_ENDIAN_BIG) 935 goto err_hwlock; 936 map->format.format_reg = regmap_format_24; 937 break; 938 939 case 32: 940 switch (reg_endian) { 941 case REGMAP_ENDIAN_BIG: 942 map->format.format_reg = regmap_format_32_be; 943 break; 944 case REGMAP_ENDIAN_LITTLE: 945 map->format.format_reg = regmap_format_32_le; 946 break; 947 case REGMAP_ENDIAN_NATIVE: 948 map->format.format_reg = regmap_format_32_native; 949 break; 950 default: 951 goto err_hwlock; 952 } 953 break; 954 955 #ifdef CONFIG_64BIT 956 case 64: 957 switch (reg_endian) { 958 case REGMAP_ENDIAN_BIG: 959 map->format.format_reg = regmap_format_64_be; 960 break; 961 case REGMAP_ENDIAN_LITTLE: 962 map->format.format_reg = regmap_format_64_le; 963 break; 964 case REGMAP_ENDIAN_NATIVE: 965 map->format.format_reg = regmap_format_64_native; 966 break; 967 default: 968 goto err_hwlock; 969 } 970 break; 971 #endif 972 973 default: 974 goto err_hwlock; 975 } 976 977 if (val_endian == REGMAP_ENDIAN_NATIVE) 978 map->format.parse_inplace = regmap_parse_inplace_noop; 979 980 switch (config->val_bits) { 981 case 8: 982 map->format.format_val = regmap_format_8; 983 map->format.parse_val = regmap_parse_8; 984 map->format.parse_inplace = regmap_parse_inplace_noop; 985 break; 986 case 16: 987 switch (val_endian) { 988 case REGMAP_ENDIAN_BIG: 989 map->format.format_val = regmap_format_16_be; 990 map->format.parse_val = regmap_parse_16_be; 991 map->format.parse_inplace = regmap_parse_16_be_inplace; 992 break; 993 case REGMAP_ENDIAN_LITTLE: 994 map->format.format_val = regmap_format_16_le; 995 map->format.parse_val = regmap_parse_16_le; 996 map->format.parse_inplace = regmap_parse_16_le_inplace; 997 break; 998 case REGMAP_ENDIAN_NATIVE: 999 map->format.format_val = regmap_format_16_native; 1000 map->format.parse_val = regmap_parse_16_native; 1001 break; 1002 default: 1003 goto err_hwlock; 1004 } 1005 break; 1006 case 24: 1007 if (val_endian != REGMAP_ENDIAN_BIG) 1008 goto err_hwlock; 1009 map->format.format_val = regmap_format_24; 1010 map->format.parse_val = regmap_parse_24; 1011 break; 1012 case 32: 1013 switch (val_endian) { 1014 case REGMAP_ENDIAN_BIG: 1015 map->format.format_val = regmap_format_32_be; 1016 map->format.parse_val = regmap_parse_32_be; 1017 map->format.parse_inplace = regmap_parse_32_be_inplace; 1018 break; 1019 case REGMAP_ENDIAN_LITTLE: 1020 map->format.format_val = regmap_format_32_le; 1021 map->format.parse_val = regmap_parse_32_le; 1022 map->format.parse_inplace = regmap_parse_32_le_inplace; 1023 break; 1024 case REGMAP_ENDIAN_NATIVE: 1025 map->format.format_val = regmap_format_32_native; 1026 map->format.parse_val = regmap_parse_32_native; 1027 break; 1028 default: 1029 goto err_hwlock; 1030 } 1031 break; 1032 #ifdef CONFIG_64BIT 1033 case 64: 1034 switch (val_endian) { 1035 case REGMAP_ENDIAN_BIG: 1036 map->format.format_val = regmap_format_64_be; 1037 map->format.parse_val = regmap_parse_64_be; 1038 map->format.parse_inplace = regmap_parse_64_be_inplace; 1039 break; 1040 case REGMAP_ENDIAN_LITTLE: 1041 map->format.format_val = regmap_format_64_le; 1042 map->format.parse_val = regmap_parse_64_le; 1043 map->format.parse_inplace = regmap_parse_64_le_inplace; 1044 break; 1045 case REGMAP_ENDIAN_NATIVE: 1046 map->format.format_val = regmap_format_64_native; 1047 map->format.parse_val = regmap_parse_64_native; 1048 break; 1049 default: 1050 goto err_hwlock; 1051 } 1052 break; 1053 #endif 1054 } 1055 1056 if (map->format.format_write) { 1057 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1058 (val_endian != REGMAP_ENDIAN_BIG)) 1059 goto err_hwlock; 1060 map->use_single_write = true; 1061 } 1062 1063 if (!map->format.format_write && 1064 !(map->format.format_reg && map->format.format_val)) 1065 goto err_hwlock; 1066 1067 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1068 if (map->work_buf == NULL) { 1069 ret = -ENOMEM; 1070 goto err_hwlock; 1071 } 1072 1073 if (map->format.format_write) { 1074 map->defer_caching = false; 1075 map->reg_write = _regmap_bus_formatted_write; 1076 } else if (map->format.format_val) { 1077 map->defer_caching = true; 1078 map->reg_write = _regmap_bus_raw_write; 1079 } 1080 1081 skip_format_initialization: 1082 1083 map->range_tree = RB_ROOT; 1084 for (i = 0; i < config->num_ranges; i++) { 1085 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1086 struct regmap_range_node *new; 1087 1088 /* Sanity check */ 1089 if (range_cfg->range_max < range_cfg->range_min) { 1090 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1091 range_cfg->range_max, range_cfg->range_min); 1092 goto err_range; 1093 } 1094 1095 if (range_cfg->range_max > map->max_register) { 1096 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1097 range_cfg->range_max, map->max_register); 1098 goto err_range; 1099 } 1100 1101 if (range_cfg->selector_reg > map->max_register) { 1102 dev_err(map->dev, 1103 "Invalid range %d: selector out of map\n", i); 1104 goto err_range; 1105 } 1106 1107 if (range_cfg->window_len == 0) { 1108 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1109 i); 1110 goto err_range; 1111 } 1112 1113 /* Make sure, that this register range has no selector 1114 or data window within its boundary */ 1115 for (j = 0; j < config->num_ranges; j++) { 1116 unsigned sel_reg = config->ranges[j].selector_reg; 1117 unsigned win_min = config->ranges[j].window_start; 1118 unsigned win_max = win_min + 1119 config->ranges[j].window_len - 1; 1120 1121 /* Allow data window inside its own virtual range */ 1122 if (j == i) 1123 continue; 1124 1125 if (range_cfg->range_min <= sel_reg && 1126 sel_reg <= range_cfg->range_max) { 1127 dev_err(map->dev, 1128 "Range %d: selector for %d in window\n", 1129 i, j); 1130 goto err_range; 1131 } 1132 1133 if (!(win_max < range_cfg->range_min || 1134 win_min > range_cfg->range_max)) { 1135 dev_err(map->dev, 1136 "Range %d: window for %d in window\n", 1137 i, j); 1138 goto err_range; 1139 } 1140 } 1141 1142 new = kzalloc(sizeof(*new), GFP_KERNEL); 1143 if (new == NULL) { 1144 ret = -ENOMEM; 1145 goto err_range; 1146 } 1147 1148 new->map = map; 1149 new->name = range_cfg->name; 1150 new->range_min = range_cfg->range_min; 1151 new->range_max = range_cfg->range_max; 1152 new->selector_reg = range_cfg->selector_reg; 1153 new->selector_mask = range_cfg->selector_mask; 1154 new->selector_shift = range_cfg->selector_shift; 1155 new->window_start = range_cfg->window_start; 1156 new->window_len = range_cfg->window_len; 1157 1158 if (!_regmap_range_add(map, new)) { 1159 dev_err(map->dev, "Failed to add range %d\n", i); 1160 kfree(new); 1161 goto err_range; 1162 } 1163 1164 if (map->selector_work_buf == NULL) { 1165 map->selector_work_buf = 1166 kzalloc(map->format.buf_size, GFP_KERNEL); 1167 if (map->selector_work_buf == NULL) { 1168 ret = -ENOMEM; 1169 goto err_range; 1170 } 1171 } 1172 } 1173 1174 ret = regcache_init(map, config); 1175 if (ret != 0) 1176 goto err_range; 1177 1178 if (dev) { 1179 ret = regmap_attach_dev(dev, map, config); 1180 if (ret != 0) 1181 goto err_regcache; 1182 } else { 1183 regmap_debugfs_init(map); 1184 } 1185 1186 return map; 1187 1188 err_regcache: 1189 regcache_exit(map); 1190 err_range: 1191 regmap_range_exit(map); 1192 kfree(map->work_buf); 1193 err_hwlock: 1194 if (map->hwlock) 1195 hwspin_lock_free(map->hwlock); 1196 err_name: 1197 kfree_const(map->name); 1198 err_map: 1199 kfree(map); 1200 err: 1201 return ERR_PTR(ret); 1202 } 1203 EXPORT_SYMBOL_GPL(__regmap_init); 1204 1205 static void devm_regmap_release(struct device *dev, void *res) 1206 { 1207 regmap_exit(*(struct regmap **)res); 1208 } 1209 1210 struct regmap *__devm_regmap_init(struct device *dev, 1211 const struct regmap_bus *bus, 1212 void *bus_context, 1213 const struct regmap_config *config, 1214 struct lock_class_key *lock_key, 1215 const char *lock_name) 1216 { 1217 struct regmap **ptr, *regmap; 1218 1219 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1220 if (!ptr) 1221 return ERR_PTR(-ENOMEM); 1222 1223 regmap = __regmap_init(dev, bus, bus_context, config, 1224 lock_key, lock_name); 1225 if (!IS_ERR(regmap)) { 1226 *ptr = regmap; 1227 devres_add(dev, ptr); 1228 } else { 1229 devres_free(ptr); 1230 } 1231 1232 return regmap; 1233 } 1234 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1235 1236 static void regmap_field_init(struct regmap_field *rm_field, 1237 struct regmap *regmap, struct reg_field reg_field) 1238 { 1239 rm_field->regmap = regmap; 1240 rm_field->reg = reg_field.reg; 1241 rm_field->shift = reg_field.lsb; 1242 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1243 rm_field->id_size = reg_field.id_size; 1244 rm_field->id_offset = reg_field.id_offset; 1245 } 1246 1247 /** 1248 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1249 * 1250 * @dev: Device that will be interacted with 1251 * @regmap: regmap bank in which this register field is located. 1252 * @reg_field: Register field with in the bank. 1253 * 1254 * The return value will be an ERR_PTR() on error or a valid pointer 1255 * to a struct regmap_field. The regmap_field will be automatically freed 1256 * by the device management code. 1257 */ 1258 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1259 struct regmap *regmap, struct reg_field reg_field) 1260 { 1261 struct regmap_field *rm_field = devm_kzalloc(dev, 1262 sizeof(*rm_field), GFP_KERNEL); 1263 if (!rm_field) 1264 return ERR_PTR(-ENOMEM); 1265 1266 regmap_field_init(rm_field, regmap, reg_field); 1267 1268 return rm_field; 1269 1270 } 1271 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1272 1273 1274 /** 1275 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field. 1276 * 1277 * @regmap: regmap bank in which this register field is located. 1278 * @rm_field: regmap register fields within the bank. 1279 * @reg_field: Register fields within the bank. 1280 * @num_fields: Number of register fields. 1281 * 1282 * The return value will be an -ENOMEM on error or zero for success. 1283 * Newly allocated regmap_fields should be freed by calling 1284 * regmap_field_bulk_free() 1285 */ 1286 int regmap_field_bulk_alloc(struct regmap *regmap, 1287 struct regmap_field **rm_field, 1288 struct reg_field *reg_field, 1289 int num_fields) 1290 { 1291 struct regmap_field *rf; 1292 int i; 1293 1294 rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL); 1295 if (!rf) 1296 return -ENOMEM; 1297 1298 for (i = 0; i < num_fields; i++) { 1299 regmap_field_init(&rf[i], regmap, reg_field[i]); 1300 rm_field[i] = &rf[i]; 1301 } 1302 1303 return 0; 1304 } 1305 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc); 1306 1307 /** 1308 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register 1309 * fields. 1310 * 1311 * @dev: Device that will be interacted with 1312 * @regmap: regmap bank in which this register field is located. 1313 * @rm_field: regmap register fields within the bank. 1314 * @reg_field: Register fields within the bank. 1315 * @num_fields: Number of register fields. 1316 * 1317 * The return value will be an -ENOMEM on error or zero for success. 1318 * Newly allocated regmap_fields will be automatically freed by the 1319 * device management code. 1320 */ 1321 int devm_regmap_field_bulk_alloc(struct device *dev, 1322 struct regmap *regmap, 1323 struct regmap_field **rm_field, 1324 struct reg_field *reg_field, 1325 int num_fields) 1326 { 1327 struct regmap_field *rf; 1328 int i; 1329 1330 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL); 1331 if (!rf) 1332 return -ENOMEM; 1333 1334 for (i = 0; i < num_fields; i++) { 1335 regmap_field_init(&rf[i], regmap, reg_field[i]); 1336 rm_field[i] = &rf[i]; 1337 } 1338 1339 return 0; 1340 } 1341 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc); 1342 1343 /** 1344 * regmap_field_bulk_free() - Free register field allocated using 1345 * regmap_field_bulk_alloc. 1346 * 1347 * @field: regmap fields which should be freed. 1348 */ 1349 void regmap_field_bulk_free(struct regmap_field *field) 1350 { 1351 kfree(field); 1352 } 1353 EXPORT_SYMBOL_GPL(regmap_field_bulk_free); 1354 1355 /** 1356 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using 1357 * devm_regmap_field_bulk_alloc. 1358 * 1359 * @dev: Device that will be interacted with 1360 * @field: regmap field which should be freed. 1361 * 1362 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually 1363 * drivers need not call this function, as the memory allocated via devm 1364 * will be freed as per device-driver life-cycle. 1365 */ 1366 void devm_regmap_field_bulk_free(struct device *dev, 1367 struct regmap_field *field) 1368 { 1369 devm_kfree(dev, field); 1370 } 1371 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free); 1372 1373 /** 1374 * devm_regmap_field_free() - Free a register field allocated using 1375 * devm_regmap_field_alloc. 1376 * 1377 * @dev: Device that will be interacted with 1378 * @field: regmap field which should be freed. 1379 * 1380 * Free register field allocated using devm_regmap_field_alloc(). Usually 1381 * drivers need not call this function, as the memory allocated via devm 1382 * will be freed as per device-driver life-cyle. 1383 */ 1384 void devm_regmap_field_free(struct device *dev, 1385 struct regmap_field *field) 1386 { 1387 devm_kfree(dev, field); 1388 } 1389 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1390 1391 /** 1392 * regmap_field_alloc() - Allocate and initialise a register field. 1393 * 1394 * @regmap: regmap bank in which this register field is located. 1395 * @reg_field: Register field with in the bank. 1396 * 1397 * The return value will be an ERR_PTR() on error or a valid pointer 1398 * to a struct regmap_field. The regmap_field should be freed by the 1399 * user once its finished working with it using regmap_field_free(). 1400 */ 1401 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1402 struct reg_field reg_field) 1403 { 1404 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1405 1406 if (!rm_field) 1407 return ERR_PTR(-ENOMEM); 1408 1409 regmap_field_init(rm_field, regmap, reg_field); 1410 1411 return rm_field; 1412 } 1413 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1414 1415 /** 1416 * regmap_field_free() - Free register field allocated using 1417 * regmap_field_alloc. 1418 * 1419 * @field: regmap field which should be freed. 1420 */ 1421 void regmap_field_free(struct regmap_field *field) 1422 { 1423 kfree(field); 1424 } 1425 EXPORT_SYMBOL_GPL(regmap_field_free); 1426 1427 /** 1428 * regmap_reinit_cache() - Reinitialise the current register cache 1429 * 1430 * @map: Register map to operate on. 1431 * @config: New configuration. Only the cache data will be used. 1432 * 1433 * Discard any existing register cache for the map and initialize a 1434 * new cache. This can be used to restore the cache to defaults or to 1435 * update the cache configuration to reflect runtime discovery of the 1436 * hardware. 1437 * 1438 * No explicit locking is done here, the user needs to ensure that 1439 * this function will not race with other calls to regmap. 1440 */ 1441 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1442 { 1443 int ret; 1444 1445 regcache_exit(map); 1446 regmap_debugfs_exit(map); 1447 1448 map->max_register = config->max_register; 1449 map->writeable_reg = config->writeable_reg; 1450 map->readable_reg = config->readable_reg; 1451 map->volatile_reg = config->volatile_reg; 1452 map->precious_reg = config->precious_reg; 1453 map->writeable_noinc_reg = config->writeable_noinc_reg; 1454 map->readable_noinc_reg = config->readable_noinc_reg; 1455 map->cache_type = config->cache_type; 1456 1457 ret = regmap_set_name(map, config); 1458 if (ret) 1459 return ret; 1460 1461 regmap_debugfs_init(map); 1462 1463 map->cache_bypass = false; 1464 map->cache_only = false; 1465 1466 return regcache_init(map, config); 1467 } 1468 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1469 1470 /** 1471 * regmap_exit() - Free a previously allocated register map 1472 * 1473 * @map: Register map to operate on. 1474 */ 1475 void regmap_exit(struct regmap *map) 1476 { 1477 struct regmap_async *async; 1478 1479 regcache_exit(map); 1480 regmap_debugfs_exit(map); 1481 regmap_range_exit(map); 1482 if (map->bus && map->bus->free_context) 1483 map->bus->free_context(map->bus_context); 1484 kfree(map->work_buf); 1485 while (!list_empty(&map->async_free)) { 1486 async = list_first_entry_or_null(&map->async_free, 1487 struct regmap_async, 1488 list); 1489 list_del(&async->list); 1490 kfree(async->work_buf); 1491 kfree(async); 1492 } 1493 if (map->hwlock) 1494 hwspin_lock_free(map->hwlock); 1495 if (map->lock == regmap_lock_mutex) 1496 mutex_destroy(&map->mutex); 1497 kfree_const(map->name); 1498 kfree(map->patch); 1499 kfree(map); 1500 } 1501 EXPORT_SYMBOL_GPL(regmap_exit); 1502 1503 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1504 { 1505 struct regmap **r = res; 1506 if (!r || !*r) { 1507 WARN_ON(!r || !*r); 1508 return 0; 1509 } 1510 1511 /* If the user didn't specify a name match any */ 1512 if (data) 1513 return !strcmp((*r)->name, data); 1514 else 1515 return 1; 1516 } 1517 1518 /** 1519 * dev_get_regmap() - Obtain the regmap (if any) for a device 1520 * 1521 * @dev: Device to retrieve the map for 1522 * @name: Optional name for the register map, usually NULL. 1523 * 1524 * Returns the regmap for the device if one is present, or NULL. If 1525 * name is specified then it must match the name specified when 1526 * registering the device, if it is NULL then the first regmap found 1527 * will be used. Devices with multiple register maps are very rare, 1528 * generic code should normally not need to specify a name. 1529 */ 1530 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1531 { 1532 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1533 dev_get_regmap_match, (void *)name); 1534 1535 if (!r) 1536 return NULL; 1537 return *r; 1538 } 1539 EXPORT_SYMBOL_GPL(dev_get_regmap); 1540 1541 /** 1542 * regmap_get_device() - Obtain the device from a regmap 1543 * 1544 * @map: Register map to operate on. 1545 * 1546 * Returns the underlying device that the regmap has been created for. 1547 */ 1548 struct device *regmap_get_device(struct regmap *map) 1549 { 1550 return map->dev; 1551 } 1552 EXPORT_SYMBOL_GPL(regmap_get_device); 1553 1554 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1555 struct regmap_range_node *range, 1556 unsigned int val_num) 1557 { 1558 void *orig_work_buf; 1559 unsigned int win_offset; 1560 unsigned int win_page; 1561 bool page_chg; 1562 int ret; 1563 1564 win_offset = (*reg - range->range_min) % range->window_len; 1565 win_page = (*reg - range->range_min) / range->window_len; 1566 1567 if (val_num > 1) { 1568 /* Bulk write shouldn't cross range boundary */ 1569 if (*reg + val_num - 1 > range->range_max) 1570 return -EINVAL; 1571 1572 /* ... or single page boundary */ 1573 if (val_num > range->window_len - win_offset) 1574 return -EINVAL; 1575 } 1576 1577 /* It is possible to have selector register inside data window. 1578 In that case, selector register is located on every page and 1579 it needs no page switching, when accessed alone. */ 1580 if (val_num > 1 || 1581 range->window_start + win_offset != range->selector_reg) { 1582 /* Use separate work_buf during page switching */ 1583 orig_work_buf = map->work_buf; 1584 map->work_buf = map->selector_work_buf; 1585 1586 ret = _regmap_update_bits(map, range->selector_reg, 1587 range->selector_mask, 1588 win_page << range->selector_shift, 1589 &page_chg, false); 1590 1591 map->work_buf = orig_work_buf; 1592 1593 if (ret != 0) 1594 return ret; 1595 } 1596 1597 *reg = range->window_start + win_offset; 1598 1599 return 0; 1600 } 1601 1602 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1603 unsigned long mask) 1604 { 1605 u8 *buf; 1606 int i; 1607 1608 if (!mask || !map->work_buf) 1609 return; 1610 1611 buf = map->work_buf; 1612 1613 for (i = 0; i < max_bytes; i++) 1614 buf[i] |= (mask >> (8 * i)) & 0xff; 1615 } 1616 1617 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1618 const void *val, size_t val_len, bool noinc) 1619 { 1620 struct regmap_range_node *range; 1621 unsigned long flags; 1622 void *work_val = map->work_buf + map->format.reg_bytes + 1623 map->format.pad_bytes; 1624 void *buf; 1625 int ret = -ENOTSUPP; 1626 size_t len; 1627 int i; 1628 1629 WARN_ON(!map->bus); 1630 1631 /* Check for unwritable or noinc registers in range 1632 * before we start 1633 */ 1634 if (!regmap_writeable_noinc(map, reg)) { 1635 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1636 unsigned int element = 1637 reg + regmap_get_offset(map, i); 1638 if (!regmap_writeable(map, element) || 1639 regmap_writeable_noinc(map, element)) 1640 return -EINVAL; 1641 } 1642 } 1643 1644 if (!map->cache_bypass && map->format.parse_val) { 1645 unsigned int ival; 1646 int val_bytes = map->format.val_bytes; 1647 for (i = 0; i < val_len / val_bytes; i++) { 1648 ival = map->format.parse_val(val + (i * val_bytes)); 1649 ret = regcache_write(map, 1650 reg + regmap_get_offset(map, i), 1651 ival); 1652 if (ret) { 1653 dev_err(map->dev, 1654 "Error in caching of register: %x ret: %d\n", 1655 reg + i, ret); 1656 return ret; 1657 } 1658 } 1659 if (map->cache_only) { 1660 map->cache_dirty = true; 1661 return 0; 1662 } 1663 } 1664 1665 range = _regmap_range_lookup(map, reg); 1666 if (range) { 1667 int val_num = val_len / map->format.val_bytes; 1668 int win_offset = (reg - range->range_min) % range->window_len; 1669 int win_residue = range->window_len - win_offset; 1670 1671 /* If the write goes beyond the end of the window split it */ 1672 while (val_num > win_residue) { 1673 dev_dbg(map->dev, "Writing window %d/%zu\n", 1674 win_residue, val_len / map->format.val_bytes); 1675 ret = _regmap_raw_write_impl(map, reg, val, 1676 win_residue * 1677 map->format.val_bytes, noinc); 1678 if (ret != 0) 1679 return ret; 1680 1681 reg += win_residue; 1682 val_num -= win_residue; 1683 val += win_residue * map->format.val_bytes; 1684 val_len -= win_residue * map->format.val_bytes; 1685 1686 win_offset = (reg - range->range_min) % 1687 range->window_len; 1688 win_residue = range->window_len - win_offset; 1689 } 1690 1691 ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); 1692 if (ret != 0) 1693 return ret; 1694 } 1695 1696 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1697 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1698 map->write_flag_mask); 1699 1700 /* 1701 * Essentially all I/O mechanisms will be faster with a single 1702 * buffer to write. Since register syncs often generate raw 1703 * writes of single registers optimise that case. 1704 */ 1705 if (val != work_val && val_len == map->format.val_bytes) { 1706 memcpy(work_val, val, map->format.val_bytes); 1707 val = work_val; 1708 } 1709 1710 if (map->async && map->bus->async_write) { 1711 struct regmap_async *async; 1712 1713 trace_regmap_async_write_start(map, reg, val_len); 1714 1715 spin_lock_irqsave(&map->async_lock, flags); 1716 async = list_first_entry_or_null(&map->async_free, 1717 struct regmap_async, 1718 list); 1719 if (async) 1720 list_del(&async->list); 1721 spin_unlock_irqrestore(&map->async_lock, flags); 1722 1723 if (!async) { 1724 async = map->bus->async_alloc(); 1725 if (!async) 1726 return -ENOMEM; 1727 1728 async->work_buf = kzalloc(map->format.buf_size, 1729 GFP_KERNEL | GFP_DMA); 1730 if (!async->work_buf) { 1731 kfree(async); 1732 return -ENOMEM; 1733 } 1734 } 1735 1736 async->map = map; 1737 1738 /* If the caller supplied the value we can use it safely. */ 1739 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1740 map->format.reg_bytes + map->format.val_bytes); 1741 1742 spin_lock_irqsave(&map->async_lock, flags); 1743 list_add_tail(&async->list, &map->async_list); 1744 spin_unlock_irqrestore(&map->async_lock, flags); 1745 1746 if (val != work_val) 1747 ret = map->bus->async_write(map->bus_context, 1748 async->work_buf, 1749 map->format.reg_bytes + 1750 map->format.pad_bytes, 1751 val, val_len, async); 1752 else 1753 ret = map->bus->async_write(map->bus_context, 1754 async->work_buf, 1755 map->format.reg_bytes + 1756 map->format.pad_bytes + 1757 val_len, NULL, 0, async); 1758 1759 if (ret != 0) { 1760 dev_err(map->dev, "Failed to schedule write: %d\n", 1761 ret); 1762 1763 spin_lock_irqsave(&map->async_lock, flags); 1764 list_move(&async->list, &map->async_free); 1765 spin_unlock_irqrestore(&map->async_lock, flags); 1766 } 1767 1768 return ret; 1769 } 1770 1771 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1772 1773 /* If we're doing a single register write we can probably just 1774 * send the work_buf directly, otherwise try to do a gather 1775 * write. 1776 */ 1777 if (val == work_val) 1778 ret = map->bus->write(map->bus_context, map->work_buf, 1779 map->format.reg_bytes + 1780 map->format.pad_bytes + 1781 val_len); 1782 else if (map->bus->gather_write) 1783 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1784 map->format.reg_bytes + 1785 map->format.pad_bytes, 1786 val, val_len); 1787 else 1788 ret = -ENOTSUPP; 1789 1790 /* If that didn't work fall back on linearising by hand. */ 1791 if (ret == -ENOTSUPP) { 1792 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1793 buf = kzalloc(len, GFP_KERNEL); 1794 if (!buf) 1795 return -ENOMEM; 1796 1797 memcpy(buf, map->work_buf, map->format.reg_bytes); 1798 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1799 val, val_len); 1800 ret = map->bus->write(map->bus_context, buf, len); 1801 1802 kfree(buf); 1803 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1804 /* regcache_drop_region() takes lock that we already have, 1805 * thus call map->cache_ops->drop() directly 1806 */ 1807 if (map->cache_ops && map->cache_ops->drop) 1808 map->cache_ops->drop(map, reg, reg + 1); 1809 } 1810 1811 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1812 1813 return ret; 1814 } 1815 1816 /** 1817 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1818 * 1819 * @map: Map to check. 1820 */ 1821 bool regmap_can_raw_write(struct regmap *map) 1822 { 1823 return map->bus && map->bus->write && map->format.format_val && 1824 map->format.format_reg; 1825 } 1826 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1827 1828 /** 1829 * regmap_get_raw_read_max - Get the maximum size we can read 1830 * 1831 * @map: Map to check. 1832 */ 1833 size_t regmap_get_raw_read_max(struct regmap *map) 1834 { 1835 return map->max_raw_read; 1836 } 1837 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1838 1839 /** 1840 * regmap_get_raw_write_max - Get the maximum size we can read 1841 * 1842 * @map: Map to check. 1843 */ 1844 size_t regmap_get_raw_write_max(struct regmap *map) 1845 { 1846 return map->max_raw_write; 1847 } 1848 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1849 1850 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1851 unsigned int val) 1852 { 1853 int ret; 1854 struct regmap_range_node *range; 1855 struct regmap *map = context; 1856 1857 WARN_ON(!map->bus || !map->format.format_write); 1858 1859 range = _regmap_range_lookup(map, reg); 1860 if (range) { 1861 ret = _regmap_select_page(map, ®, range, 1); 1862 if (ret != 0) 1863 return ret; 1864 } 1865 1866 map->format.format_write(map, reg, val); 1867 1868 trace_regmap_hw_write_start(map, reg, 1); 1869 1870 ret = map->bus->write(map->bus_context, map->work_buf, 1871 map->format.buf_size); 1872 1873 trace_regmap_hw_write_done(map, reg, 1); 1874 1875 return ret; 1876 } 1877 1878 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1879 unsigned int val) 1880 { 1881 struct regmap *map = context; 1882 1883 return map->bus->reg_write(map->bus_context, reg, val); 1884 } 1885 1886 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1887 unsigned int val) 1888 { 1889 struct regmap *map = context; 1890 1891 WARN_ON(!map->bus || !map->format.format_val); 1892 1893 map->format.format_val(map->work_buf + map->format.reg_bytes 1894 + map->format.pad_bytes, val, 0); 1895 return _regmap_raw_write_impl(map, reg, 1896 map->work_buf + 1897 map->format.reg_bytes + 1898 map->format.pad_bytes, 1899 map->format.val_bytes, 1900 false); 1901 } 1902 1903 static inline void *_regmap_map_get_context(struct regmap *map) 1904 { 1905 return (map->bus) ? map : map->bus_context; 1906 } 1907 1908 int _regmap_write(struct regmap *map, unsigned int reg, 1909 unsigned int val) 1910 { 1911 int ret; 1912 void *context = _regmap_map_get_context(map); 1913 1914 if (!regmap_writeable(map, reg)) 1915 return -EIO; 1916 1917 if (!map->cache_bypass && !map->defer_caching) { 1918 ret = regcache_write(map, reg, val); 1919 if (ret != 0) 1920 return ret; 1921 if (map->cache_only) { 1922 map->cache_dirty = true; 1923 return 0; 1924 } 1925 } 1926 1927 if (regmap_should_log(map)) 1928 dev_info(map->dev, "%x <= %x\n", reg, val); 1929 1930 trace_regmap_reg_write(map, reg, val); 1931 1932 return map->reg_write(context, reg, val); 1933 } 1934 1935 /** 1936 * regmap_write() - Write a value to a single register 1937 * 1938 * @map: Register map to write to 1939 * @reg: Register to write to 1940 * @val: Value to be written 1941 * 1942 * A value of zero will be returned on success, a negative errno will 1943 * be returned in error cases. 1944 */ 1945 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1946 { 1947 int ret; 1948 1949 if (!IS_ALIGNED(reg, map->reg_stride)) 1950 return -EINVAL; 1951 1952 map->lock(map->lock_arg); 1953 1954 ret = _regmap_write(map, reg, val); 1955 1956 map->unlock(map->lock_arg); 1957 1958 return ret; 1959 } 1960 EXPORT_SYMBOL_GPL(regmap_write); 1961 1962 /** 1963 * regmap_write_async() - Write a value to a single register asynchronously 1964 * 1965 * @map: Register map to write to 1966 * @reg: Register to write to 1967 * @val: Value to be written 1968 * 1969 * A value of zero will be returned on success, a negative errno will 1970 * be returned in error cases. 1971 */ 1972 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1973 { 1974 int ret; 1975 1976 if (!IS_ALIGNED(reg, map->reg_stride)) 1977 return -EINVAL; 1978 1979 map->lock(map->lock_arg); 1980 1981 map->async = true; 1982 1983 ret = _regmap_write(map, reg, val); 1984 1985 map->async = false; 1986 1987 map->unlock(map->lock_arg); 1988 1989 return ret; 1990 } 1991 EXPORT_SYMBOL_GPL(regmap_write_async); 1992 1993 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1994 const void *val, size_t val_len, bool noinc) 1995 { 1996 size_t val_bytes = map->format.val_bytes; 1997 size_t val_count = val_len / val_bytes; 1998 size_t chunk_count, chunk_bytes; 1999 size_t chunk_regs = val_count; 2000 int ret, i; 2001 2002 if (!val_count) 2003 return -EINVAL; 2004 2005 if (map->use_single_write) 2006 chunk_regs = 1; 2007 else if (map->max_raw_write && val_len > map->max_raw_write) 2008 chunk_regs = map->max_raw_write / val_bytes; 2009 2010 chunk_count = val_count / chunk_regs; 2011 chunk_bytes = chunk_regs * val_bytes; 2012 2013 /* Write as many bytes as possible with chunk_size */ 2014 for (i = 0; i < chunk_count; i++) { 2015 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); 2016 if (ret) 2017 return ret; 2018 2019 reg += regmap_get_offset(map, chunk_regs); 2020 val += chunk_bytes; 2021 val_len -= chunk_bytes; 2022 } 2023 2024 /* Write remaining bytes */ 2025 if (val_len) 2026 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); 2027 2028 return ret; 2029 } 2030 2031 /** 2032 * regmap_raw_write() - Write raw values to one or more registers 2033 * 2034 * @map: Register map to write to 2035 * @reg: Initial register to write to 2036 * @val: Block of data to be written, laid out for direct transmission to the 2037 * device 2038 * @val_len: Length of data pointed to by val. 2039 * 2040 * This function is intended to be used for things like firmware 2041 * download where a large block of data needs to be transferred to the 2042 * device. No formatting will be done on the data provided. 2043 * 2044 * A value of zero will be returned on success, a negative errno will 2045 * be returned in error cases. 2046 */ 2047 int regmap_raw_write(struct regmap *map, unsigned int reg, 2048 const void *val, size_t val_len) 2049 { 2050 int ret; 2051 2052 if (!regmap_can_raw_write(map)) 2053 return -EINVAL; 2054 if (val_len % map->format.val_bytes) 2055 return -EINVAL; 2056 2057 map->lock(map->lock_arg); 2058 2059 ret = _regmap_raw_write(map, reg, val, val_len, false); 2060 2061 map->unlock(map->lock_arg); 2062 2063 return ret; 2064 } 2065 EXPORT_SYMBOL_GPL(regmap_raw_write); 2066 2067 /** 2068 * regmap_noinc_write(): Write data from a register without incrementing the 2069 * register number 2070 * 2071 * @map: Register map to write to 2072 * @reg: Register to write to 2073 * @val: Pointer to data buffer 2074 * @val_len: Length of output buffer in bytes. 2075 * 2076 * The regmap API usually assumes that bulk bus write operations will write a 2077 * range of registers. Some devices have certain registers for which a write 2078 * operation can write to an internal FIFO. 2079 * 2080 * The target register must be volatile but registers after it can be 2081 * completely unrelated cacheable registers. 2082 * 2083 * This will attempt multiple writes as required to write val_len bytes. 2084 * 2085 * A value of zero will be returned on success, a negative errno will be 2086 * returned in error cases. 2087 */ 2088 int regmap_noinc_write(struct regmap *map, unsigned int reg, 2089 const void *val, size_t val_len) 2090 { 2091 size_t write_len; 2092 int ret; 2093 2094 if (!map->bus) 2095 return -EINVAL; 2096 if (!map->bus->write) 2097 return -ENOTSUPP; 2098 if (val_len % map->format.val_bytes) 2099 return -EINVAL; 2100 if (!IS_ALIGNED(reg, map->reg_stride)) 2101 return -EINVAL; 2102 if (val_len == 0) 2103 return -EINVAL; 2104 2105 map->lock(map->lock_arg); 2106 2107 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 2108 ret = -EINVAL; 2109 goto out_unlock; 2110 } 2111 2112 while (val_len) { 2113 if (map->max_raw_write && map->max_raw_write < val_len) 2114 write_len = map->max_raw_write; 2115 else 2116 write_len = val_len; 2117 ret = _regmap_raw_write(map, reg, val, write_len, true); 2118 if (ret) 2119 goto out_unlock; 2120 val = ((u8 *)val) + write_len; 2121 val_len -= write_len; 2122 } 2123 2124 out_unlock: 2125 map->unlock(map->lock_arg); 2126 return ret; 2127 } 2128 EXPORT_SYMBOL_GPL(regmap_noinc_write); 2129 2130 /** 2131 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 2132 * register field. 2133 * 2134 * @field: Register field to write to 2135 * @mask: Bitmask to change 2136 * @val: Value to be written 2137 * @change: Boolean indicating if a write was done 2138 * @async: Boolean indicating asynchronously 2139 * @force: Boolean indicating use force update 2140 * 2141 * Perform a read/modify/write cycle on the register field with change, 2142 * async, force option. 2143 * 2144 * A value of zero will be returned on success, a negative errno will 2145 * be returned in error cases. 2146 */ 2147 int regmap_field_update_bits_base(struct regmap_field *field, 2148 unsigned int mask, unsigned int val, 2149 bool *change, bool async, bool force) 2150 { 2151 mask = (mask << field->shift) & field->mask; 2152 2153 return regmap_update_bits_base(field->regmap, field->reg, 2154 mask, val << field->shift, 2155 change, async, force); 2156 } 2157 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2158 2159 /** 2160 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2161 * register field with port ID 2162 * 2163 * @field: Register field to write to 2164 * @id: port ID 2165 * @mask: Bitmask to change 2166 * @val: Value to be written 2167 * @change: Boolean indicating if a write was done 2168 * @async: Boolean indicating asynchronously 2169 * @force: Boolean indicating use force update 2170 * 2171 * A value of zero will be returned on success, a negative errno will 2172 * be returned in error cases. 2173 */ 2174 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2175 unsigned int mask, unsigned int val, 2176 bool *change, bool async, bool force) 2177 { 2178 if (id >= field->id_size) 2179 return -EINVAL; 2180 2181 mask = (mask << field->shift) & field->mask; 2182 2183 return regmap_update_bits_base(field->regmap, 2184 field->reg + (field->id_offset * id), 2185 mask, val << field->shift, 2186 change, async, force); 2187 } 2188 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2189 2190 /** 2191 * regmap_bulk_write() - Write multiple registers to the device 2192 * 2193 * @map: Register map to write to 2194 * @reg: First register to be write from 2195 * @val: Block of data to be written, in native register size for device 2196 * @val_count: Number of registers to write 2197 * 2198 * This function is intended to be used for writing a large block of 2199 * data to the device either in single transfer or multiple transfer. 2200 * 2201 * A value of zero will be returned on success, a negative errno will 2202 * be returned in error cases. 2203 */ 2204 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2205 size_t val_count) 2206 { 2207 int ret = 0, i; 2208 size_t val_bytes = map->format.val_bytes; 2209 2210 if (!IS_ALIGNED(reg, map->reg_stride)) 2211 return -EINVAL; 2212 2213 /* 2214 * Some devices don't support bulk write, for them we have a series of 2215 * single write operations. 2216 */ 2217 if (!map->bus || !map->format.parse_inplace) { 2218 map->lock(map->lock_arg); 2219 for (i = 0; i < val_count; i++) { 2220 unsigned int ival; 2221 2222 switch (val_bytes) { 2223 case 1: 2224 ival = *(u8 *)(val + (i * val_bytes)); 2225 break; 2226 case 2: 2227 ival = *(u16 *)(val + (i * val_bytes)); 2228 break; 2229 case 4: 2230 ival = *(u32 *)(val + (i * val_bytes)); 2231 break; 2232 #ifdef CONFIG_64BIT 2233 case 8: 2234 ival = *(u64 *)(val + (i * val_bytes)); 2235 break; 2236 #endif 2237 default: 2238 ret = -EINVAL; 2239 goto out; 2240 } 2241 2242 ret = _regmap_write(map, 2243 reg + regmap_get_offset(map, i), 2244 ival); 2245 if (ret != 0) 2246 goto out; 2247 } 2248 out: 2249 map->unlock(map->lock_arg); 2250 } else { 2251 void *wval; 2252 2253 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2254 if (!wval) 2255 return -ENOMEM; 2256 2257 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2258 map->format.parse_inplace(wval + i); 2259 2260 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2261 2262 kfree(wval); 2263 } 2264 return ret; 2265 } 2266 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2267 2268 /* 2269 * _regmap_raw_multi_reg_write() 2270 * 2271 * the (register,newvalue) pairs in regs have not been formatted, but 2272 * they are all in the same page and have been changed to being page 2273 * relative. The page register has been written if that was necessary. 2274 */ 2275 static int _regmap_raw_multi_reg_write(struct regmap *map, 2276 const struct reg_sequence *regs, 2277 size_t num_regs) 2278 { 2279 int ret; 2280 void *buf; 2281 int i; 2282 u8 *u8; 2283 size_t val_bytes = map->format.val_bytes; 2284 size_t reg_bytes = map->format.reg_bytes; 2285 size_t pad_bytes = map->format.pad_bytes; 2286 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2287 size_t len = pair_size * num_regs; 2288 2289 if (!len) 2290 return -EINVAL; 2291 2292 buf = kzalloc(len, GFP_KERNEL); 2293 if (!buf) 2294 return -ENOMEM; 2295 2296 /* We have to linearise by hand. */ 2297 2298 u8 = buf; 2299 2300 for (i = 0; i < num_regs; i++) { 2301 unsigned int reg = regs[i].reg; 2302 unsigned int val = regs[i].def; 2303 trace_regmap_hw_write_start(map, reg, 1); 2304 map->format.format_reg(u8, reg, map->reg_shift); 2305 u8 += reg_bytes + pad_bytes; 2306 map->format.format_val(u8, val, 0); 2307 u8 += val_bytes; 2308 } 2309 u8 = buf; 2310 *u8 |= map->write_flag_mask; 2311 2312 ret = map->bus->write(map->bus_context, buf, len); 2313 2314 kfree(buf); 2315 2316 for (i = 0; i < num_regs; i++) { 2317 int reg = regs[i].reg; 2318 trace_regmap_hw_write_done(map, reg, 1); 2319 } 2320 return ret; 2321 } 2322 2323 static unsigned int _regmap_register_page(struct regmap *map, 2324 unsigned int reg, 2325 struct regmap_range_node *range) 2326 { 2327 unsigned int win_page = (reg - range->range_min) / range->window_len; 2328 2329 return win_page; 2330 } 2331 2332 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2333 struct reg_sequence *regs, 2334 size_t num_regs) 2335 { 2336 int ret; 2337 int i, n; 2338 struct reg_sequence *base; 2339 unsigned int this_page = 0; 2340 unsigned int page_change = 0; 2341 /* 2342 * the set of registers are not neccessarily in order, but 2343 * since the order of write must be preserved this algorithm 2344 * chops the set each time the page changes. This also applies 2345 * if there is a delay required at any point in the sequence. 2346 */ 2347 base = regs; 2348 for (i = 0, n = 0; i < num_regs; i++, n++) { 2349 unsigned int reg = regs[i].reg; 2350 struct regmap_range_node *range; 2351 2352 range = _regmap_range_lookup(map, reg); 2353 if (range) { 2354 unsigned int win_page = _regmap_register_page(map, reg, 2355 range); 2356 2357 if (i == 0) 2358 this_page = win_page; 2359 if (win_page != this_page) { 2360 this_page = win_page; 2361 page_change = 1; 2362 } 2363 } 2364 2365 /* If we have both a page change and a delay make sure to 2366 * write the regs and apply the delay before we change the 2367 * page. 2368 */ 2369 2370 if (page_change || regs[i].delay_us) { 2371 2372 /* For situations where the first write requires 2373 * a delay we need to make sure we don't call 2374 * raw_multi_reg_write with n=0 2375 * This can't occur with page breaks as we 2376 * never write on the first iteration 2377 */ 2378 if (regs[i].delay_us && i == 0) 2379 n = 1; 2380 2381 ret = _regmap_raw_multi_reg_write(map, base, n); 2382 if (ret != 0) 2383 return ret; 2384 2385 if (regs[i].delay_us) { 2386 if (map->can_sleep) 2387 fsleep(regs[i].delay_us); 2388 else 2389 udelay(regs[i].delay_us); 2390 } 2391 2392 base += n; 2393 n = 0; 2394 2395 if (page_change) { 2396 ret = _regmap_select_page(map, 2397 &base[n].reg, 2398 range, 1); 2399 if (ret != 0) 2400 return ret; 2401 2402 page_change = 0; 2403 } 2404 2405 } 2406 2407 } 2408 if (n > 0) 2409 return _regmap_raw_multi_reg_write(map, base, n); 2410 return 0; 2411 } 2412 2413 static int _regmap_multi_reg_write(struct regmap *map, 2414 const struct reg_sequence *regs, 2415 size_t num_regs) 2416 { 2417 int i; 2418 int ret; 2419 2420 if (!map->can_multi_write) { 2421 for (i = 0; i < num_regs; i++) { 2422 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2423 if (ret != 0) 2424 return ret; 2425 2426 if (regs[i].delay_us) { 2427 if (map->can_sleep) 2428 fsleep(regs[i].delay_us); 2429 else 2430 udelay(regs[i].delay_us); 2431 } 2432 } 2433 return 0; 2434 } 2435 2436 if (!map->format.parse_inplace) 2437 return -EINVAL; 2438 2439 if (map->writeable_reg) 2440 for (i = 0; i < num_regs; i++) { 2441 int reg = regs[i].reg; 2442 if (!map->writeable_reg(map->dev, reg)) 2443 return -EINVAL; 2444 if (!IS_ALIGNED(reg, map->reg_stride)) 2445 return -EINVAL; 2446 } 2447 2448 if (!map->cache_bypass) { 2449 for (i = 0; i < num_regs; i++) { 2450 unsigned int val = regs[i].def; 2451 unsigned int reg = regs[i].reg; 2452 ret = regcache_write(map, reg, val); 2453 if (ret) { 2454 dev_err(map->dev, 2455 "Error in caching of register: %x ret: %d\n", 2456 reg, ret); 2457 return ret; 2458 } 2459 } 2460 if (map->cache_only) { 2461 map->cache_dirty = true; 2462 return 0; 2463 } 2464 } 2465 2466 WARN_ON(!map->bus); 2467 2468 for (i = 0; i < num_regs; i++) { 2469 unsigned int reg = regs[i].reg; 2470 struct regmap_range_node *range; 2471 2472 /* Coalesce all the writes between a page break or a delay 2473 * in a sequence 2474 */ 2475 range = _regmap_range_lookup(map, reg); 2476 if (range || regs[i].delay_us) { 2477 size_t len = sizeof(struct reg_sequence)*num_regs; 2478 struct reg_sequence *base = kmemdup(regs, len, 2479 GFP_KERNEL); 2480 if (!base) 2481 return -ENOMEM; 2482 ret = _regmap_range_multi_paged_reg_write(map, base, 2483 num_regs); 2484 kfree(base); 2485 2486 return ret; 2487 } 2488 } 2489 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2490 } 2491 2492 /** 2493 * regmap_multi_reg_write() - Write multiple registers to the device 2494 * 2495 * @map: Register map to write to 2496 * @regs: Array of structures containing register,value to be written 2497 * @num_regs: Number of registers to write 2498 * 2499 * Write multiple registers to the device where the set of register, value 2500 * pairs are supplied in any order, possibly not all in a single range. 2501 * 2502 * The 'normal' block write mode will send ultimately send data on the 2503 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2504 * addressed. However, this alternative block multi write mode will send 2505 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2506 * must of course support the mode. 2507 * 2508 * A value of zero will be returned on success, a negative errno will be 2509 * returned in error cases. 2510 */ 2511 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2512 int num_regs) 2513 { 2514 int ret; 2515 2516 map->lock(map->lock_arg); 2517 2518 ret = _regmap_multi_reg_write(map, regs, num_regs); 2519 2520 map->unlock(map->lock_arg); 2521 2522 return ret; 2523 } 2524 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2525 2526 /** 2527 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2528 * device but not the cache 2529 * 2530 * @map: Register map to write to 2531 * @regs: Array of structures containing register,value to be written 2532 * @num_regs: Number of registers to write 2533 * 2534 * Write multiple registers to the device but not the cache where the set 2535 * of register are supplied in any order. 2536 * 2537 * This function is intended to be used for writing a large block of data 2538 * atomically to the device in single transfer for those I2C client devices 2539 * that implement this alternative block write mode. 2540 * 2541 * A value of zero will be returned on success, a negative errno will 2542 * be returned in error cases. 2543 */ 2544 int regmap_multi_reg_write_bypassed(struct regmap *map, 2545 const struct reg_sequence *regs, 2546 int num_regs) 2547 { 2548 int ret; 2549 bool bypass; 2550 2551 map->lock(map->lock_arg); 2552 2553 bypass = map->cache_bypass; 2554 map->cache_bypass = true; 2555 2556 ret = _regmap_multi_reg_write(map, regs, num_regs); 2557 2558 map->cache_bypass = bypass; 2559 2560 map->unlock(map->lock_arg); 2561 2562 return ret; 2563 } 2564 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2565 2566 /** 2567 * regmap_raw_write_async() - Write raw values to one or more registers 2568 * asynchronously 2569 * 2570 * @map: Register map to write to 2571 * @reg: Initial register to write to 2572 * @val: Block of data to be written, laid out for direct transmission to the 2573 * device. Must be valid until regmap_async_complete() is called. 2574 * @val_len: Length of data pointed to by val. 2575 * 2576 * This function is intended to be used for things like firmware 2577 * download where a large block of data needs to be transferred to the 2578 * device. No formatting will be done on the data provided. 2579 * 2580 * If supported by the underlying bus the write will be scheduled 2581 * asynchronously, helping maximise I/O speed on higher speed buses 2582 * like SPI. regmap_async_complete() can be called to ensure that all 2583 * asynchrnous writes have been completed. 2584 * 2585 * A value of zero will be returned on success, a negative errno will 2586 * be returned in error cases. 2587 */ 2588 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2589 const void *val, size_t val_len) 2590 { 2591 int ret; 2592 2593 if (val_len % map->format.val_bytes) 2594 return -EINVAL; 2595 if (!IS_ALIGNED(reg, map->reg_stride)) 2596 return -EINVAL; 2597 2598 map->lock(map->lock_arg); 2599 2600 map->async = true; 2601 2602 ret = _regmap_raw_write(map, reg, val, val_len, false); 2603 2604 map->async = false; 2605 2606 map->unlock(map->lock_arg); 2607 2608 return ret; 2609 } 2610 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2611 2612 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2613 unsigned int val_len, bool noinc) 2614 { 2615 struct regmap_range_node *range; 2616 int ret; 2617 2618 WARN_ON(!map->bus); 2619 2620 if (!map->bus || !map->bus->read) 2621 return -EINVAL; 2622 2623 range = _regmap_range_lookup(map, reg); 2624 if (range) { 2625 ret = _regmap_select_page(map, ®, range, 2626 noinc ? 1 : val_len / map->format.val_bytes); 2627 if (ret != 0) 2628 return ret; 2629 } 2630 2631 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2632 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2633 map->read_flag_mask); 2634 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2635 2636 ret = map->bus->read(map->bus_context, map->work_buf, 2637 map->format.reg_bytes + map->format.pad_bytes, 2638 val, val_len); 2639 2640 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2641 2642 return ret; 2643 } 2644 2645 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2646 unsigned int *val) 2647 { 2648 struct regmap *map = context; 2649 2650 return map->bus->reg_read(map->bus_context, reg, val); 2651 } 2652 2653 static int _regmap_bus_read(void *context, unsigned int reg, 2654 unsigned int *val) 2655 { 2656 int ret; 2657 struct regmap *map = context; 2658 void *work_val = map->work_buf + map->format.reg_bytes + 2659 map->format.pad_bytes; 2660 2661 if (!map->format.parse_val) 2662 return -EINVAL; 2663 2664 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); 2665 if (ret == 0) 2666 *val = map->format.parse_val(work_val); 2667 2668 return ret; 2669 } 2670 2671 static int _regmap_read(struct regmap *map, unsigned int reg, 2672 unsigned int *val) 2673 { 2674 int ret; 2675 void *context = _regmap_map_get_context(map); 2676 2677 if (!map->cache_bypass) { 2678 ret = regcache_read(map, reg, val); 2679 if (ret == 0) 2680 return 0; 2681 } 2682 2683 if (map->cache_only) 2684 return -EBUSY; 2685 2686 if (!regmap_readable(map, reg)) 2687 return -EIO; 2688 2689 ret = map->reg_read(context, reg, val); 2690 if (ret == 0) { 2691 if (regmap_should_log(map)) 2692 dev_info(map->dev, "%x => %x\n", reg, *val); 2693 2694 trace_regmap_reg_read(map, reg, *val); 2695 2696 if (!map->cache_bypass) 2697 regcache_write(map, reg, *val); 2698 } 2699 2700 return ret; 2701 } 2702 2703 /** 2704 * regmap_read() - Read a value from a single register 2705 * 2706 * @map: Register map to read from 2707 * @reg: Register to be read from 2708 * @val: Pointer to store read value 2709 * 2710 * A value of zero will be returned on success, a negative errno will 2711 * be returned in error cases. 2712 */ 2713 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2714 { 2715 int ret; 2716 2717 if (!IS_ALIGNED(reg, map->reg_stride)) 2718 return -EINVAL; 2719 2720 map->lock(map->lock_arg); 2721 2722 ret = _regmap_read(map, reg, val); 2723 2724 map->unlock(map->lock_arg); 2725 2726 return ret; 2727 } 2728 EXPORT_SYMBOL_GPL(regmap_read); 2729 2730 /** 2731 * regmap_raw_read() - Read raw data from the device 2732 * 2733 * @map: Register map to read from 2734 * @reg: First register to be read from 2735 * @val: Pointer to store read value 2736 * @val_len: Size of data to read 2737 * 2738 * A value of zero will be returned on success, a negative errno will 2739 * be returned in error cases. 2740 */ 2741 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2742 size_t val_len) 2743 { 2744 size_t val_bytes = map->format.val_bytes; 2745 size_t val_count = val_len / val_bytes; 2746 unsigned int v; 2747 int ret, i; 2748 2749 if (!map->bus) 2750 return -EINVAL; 2751 if (val_len % map->format.val_bytes) 2752 return -EINVAL; 2753 if (!IS_ALIGNED(reg, map->reg_stride)) 2754 return -EINVAL; 2755 if (val_count == 0) 2756 return -EINVAL; 2757 2758 map->lock(map->lock_arg); 2759 2760 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2761 map->cache_type == REGCACHE_NONE) { 2762 size_t chunk_count, chunk_bytes; 2763 size_t chunk_regs = val_count; 2764 2765 if (!map->bus->read) { 2766 ret = -ENOTSUPP; 2767 goto out; 2768 } 2769 2770 if (map->use_single_read) 2771 chunk_regs = 1; 2772 else if (map->max_raw_read && val_len > map->max_raw_read) 2773 chunk_regs = map->max_raw_read / val_bytes; 2774 2775 chunk_count = val_count / chunk_regs; 2776 chunk_bytes = chunk_regs * val_bytes; 2777 2778 /* Read bytes that fit into whole chunks */ 2779 for (i = 0; i < chunk_count; i++) { 2780 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); 2781 if (ret != 0) 2782 goto out; 2783 2784 reg += regmap_get_offset(map, chunk_regs); 2785 val += chunk_bytes; 2786 val_len -= chunk_bytes; 2787 } 2788 2789 /* Read remaining bytes */ 2790 if (val_len) { 2791 ret = _regmap_raw_read(map, reg, val, val_len, false); 2792 if (ret != 0) 2793 goto out; 2794 } 2795 } else { 2796 /* Otherwise go word by word for the cache; should be low 2797 * cost as we expect to hit the cache. 2798 */ 2799 for (i = 0; i < val_count; i++) { 2800 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2801 &v); 2802 if (ret != 0) 2803 goto out; 2804 2805 map->format.format_val(val + (i * val_bytes), v, 0); 2806 } 2807 } 2808 2809 out: 2810 map->unlock(map->lock_arg); 2811 2812 return ret; 2813 } 2814 EXPORT_SYMBOL_GPL(regmap_raw_read); 2815 2816 /** 2817 * regmap_noinc_read(): Read data from a register without incrementing the 2818 * register number 2819 * 2820 * @map: Register map to read from 2821 * @reg: Register to read from 2822 * @val: Pointer to data buffer 2823 * @val_len: Length of output buffer in bytes. 2824 * 2825 * The regmap API usually assumes that bulk bus read operations will read a 2826 * range of registers. Some devices have certain registers for which a read 2827 * operation read will read from an internal FIFO. 2828 * 2829 * The target register must be volatile but registers after it can be 2830 * completely unrelated cacheable registers. 2831 * 2832 * This will attempt multiple reads as required to read val_len bytes. 2833 * 2834 * A value of zero will be returned on success, a negative errno will be 2835 * returned in error cases. 2836 */ 2837 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2838 void *val, size_t val_len) 2839 { 2840 size_t read_len; 2841 int ret; 2842 2843 if (!map->bus) 2844 return -EINVAL; 2845 if (!map->bus->read) 2846 return -ENOTSUPP; 2847 if (val_len % map->format.val_bytes) 2848 return -EINVAL; 2849 if (!IS_ALIGNED(reg, map->reg_stride)) 2850 return -EINVAL; 2851 if (val_len == 0) 2852 return -EINVAL; 2853 2854 map->lock(map->lock_arg); 2855 2856 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2857 ret = -EINVAL; 2858 goto out_unlock; 2859 } 2860 2861 while (val_len) { 2862 if (map->max_raw_read && map->max_raw_read < val_len) 2863 read_len = map->max_raw_read; 2864 else 2865 read_len = val_len; 2866 ret = _regmap_raw_read(map, reg, val, read_len, true); 2867 if (ret) 2868 goto out_unlock; 2869 val = ((u8 *)val) + read_len; 2870 val_len -= read_len; 2871 } 2872 2873 out_unlock: 2874 map->unlock(map->lock_arg); 2875 return ret; 2876 } 2877 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2878 2879 /** 2880 * regmap_field_read(): Read a value to a single register field 2881 * 2882 * @field: Register field to read from 2883 * @val: Pointer to store read value 2884 * 2885 * A value of zero will be returned on success, a negative errno will 2886 * be returned in error cases. 2887 */ 2888 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2889 { 2890 int ret; 2891 unsigned int reg_val; 2892 ret = regmap_read(field->regmap, field->reg, ®_val); 2893 if (ret != 0) 2894 return ret; 2895 2896 reg_val &= field->mask; 2897 reg_val >>= field->shift; 2898 *val = reg_val; 2899 2900 return ret; 2901 } 2902 EXPORT_SYMBOL_GPL(regmap_field_read); 2903 2904 /** 2905 * regmap_fields_read() - Read a value to a single register field with port ID 2906 * 2907 * @field: Register field to read from 2908 * @id: port ID 2909 * @val: Pointer to store read value 2910 * 2911 * A value of zero will be returned on success, a negative errno will 2912 * be returned in error cases. 2913 */ 2914 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2915 unsigned int *val) 2916 { 2917 int ret; 2918 unsigned int reg_val; 2919 2920 if (id >= field->id_size) 2921 return -EINVAL; 2922 2923 ret = regmap_read(field->regmap, 2924 field->reg + (field->id_offset * id), 2925 ®_val); 2926 if (ret != 0) 2927 return ret; 2928 2929 reg_val &= field->mask; 2930 reg_val >>= field->shift; 2931 *val = reg_val; 2932 2933 return ret; 2934 } 2935 EXPORT_SYMBOL_GPL(regmap_fields_read); 2936 2937 /** 2938 * regmap_bulk_read() - Read multiple registers from the device 2939 * 2940 * @map: Register map to read from 2941 * @reg: First register to be read from 2942 * @val: Pointer to store read value, in native register size for device 2943 * @val_count: Number of registers to read 2944 * 2945 * A value of zero will be returned on success, a negative errno will 2946 * be returned in error cases. 2947 */ 2948 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2949 size_t val_count) 2950 { 2951 int ret, i; 2952 size_t val_bytes = map->format.val_bytes; 2953 bool vol = regmap_volatile_range(map, reg, val_count); 2954 2955 if (!IS_ALIGNED(reg, map->reg_stride)) 2956 return -EINVAL; 2957 if (val_count == 0) 2958 return -EINVAL; 2959 2960 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2961 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2962 if (ret != 0) 2963 return ret; 2964 2965 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2966 map->format.parse_inplace(val + i); 2967 } else { 2968 #ifdef CONFIG_64BIT 2969 u64 *u64 = val; 2970 #endif 2971 u32 *u32 = val; 2972 u16 *u16 = val; 2973 u8 *u8 = val; 2974 2975 map->lock(map->lock_arg); 2976 2977 for (i = 0; i < val_count; i++) { 2978 unsigned int ival; 2979 2980 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2981 &ival); 2982 if (ret != 0) 2983 goto out; 2984 2985 switch (map->format.val_bytes) { 2986 #ifdef CONFIG_64BIT 2987 case 8: 2988 u64[i] = ival; 2989 break; 2990 #endif 2991 case 4: 2992 u32[i] = ival; 2993 break; 2994 case 2: 2995 u16[i] = ival; 2996 break; 2997 case 1: 2998 u8[i] = ival; 2999 break; 3000 default: 3001 ret = -EINVAL; 3002 goto out; 3003 } 3004 } 3005 3006 out: 3007 map->unlock(map->lock_arg); 3008 } 3009 3010 return ret; 3011 } 3012 EXPORT_SYMBOL_GPL(regmap_bulk_read); 3013 3014 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 3015 unsigned int mask, unsigned int val, 3016 bool *change, bool force_write) 3017 { 3018 int ret; 3019 unsigned int tmp, orig; 3020 3021 if (change) 3022 *change = false; 3023 3024 if (regmap_volatile(map, reg) && map->reg_update_bits) { 3025 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 3026 if (ret == 0 && change) 3027 *change = true; 3028 } else { 3029 ret = _regmap_read(map, reg, &orig); 3030 if (ret != 0) 3031 return ret; 3032 3033 tmp = orig & ~mask; 3034 tmp |= val & mask; 3035 3036 if (force_write || (tmp != orig)) { 3037 ret = _regmap_write(map, reg, tmp); 3038 if (ret == 0 && change) 3039 *change = true; 3040 } 3041 } 3042 3043 return ret; 3044 } 3045 3046 /** 3047 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 3048 * 3049 * @map: Register map to update 3050 * @reg: Register to update 3051 * @mask: Bitmask to change 3052 * @val: New value for bitmask 3053 * @change: Boolean indicating if a write was done 3054 * @async: Boolean indicating asynchronously 3055 * @force: Boolean indicating use force update 3056 * 3057 * Perform a read/modify/write cycle on a register map with change, async, force 3058 * options. 3059 * 3060 * If async is true: 3061 * 3062 * With most buses the read must be done synchronously so this is most useful 3063 * for devices with a cache which do not need to interact with the hardware to 3064 * determine the current register value. 3065 * 3066 * Returns zero for success, a negative number on error. 3067 */ 3068 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 3069 unsigned int mask, unsigned int val, 3070 bool *change, bool async, bool force) 3071 { 3072 int ret; 3073 3074 map->lock(map->lock_arg); 3075 3076 map->async = async; 3077 3078 ret = _regmap_update_bits(map, reg, mask, val, change, force); 3079 3080 map->async = false; 3081 3082 map->unlock(map->lock_arg); 3083 3084 return ret; 3085 } 3086 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 3087 3088 /** 3089 * regmap_test_bits() - Check if all specified bits are set in a register. 3090 * 3091 * @map: Register map to operate on 3092 * @reg: Register to read from 3093 * @bits: Bits to test 3094 * 3095 * Returns 0 if at least one of the tested bits is not set, 1 if all tested 3096 * bits are set and a negative error number if the underlying regmap_read() 3097 * fails. 3098 */ 3099 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 3100 { 3101 unsigned int val, ret; 3102 3103 ret = regmap_read(map, reg, &val); 3104 if (ret) 3105 return ret; 3106 3107 return (val & bits) == bits; 3108 } 3109 EXPORT_SYMBOL_GPL(regmap_test_bits); 3110 3111 void regmap_async_complete_cb(struct regmap_async *async, int ret) 3112 { 3113 struct regmap *map = async->map; 3114 bool wake; 3115 3116 trace_regmap_async_io_complete(map); 3117 3118 spin_lock(&map->async_lock); 3119 list_move(&async->list, &map->async_free); 3120 wake = list_empty(&map->async_list); 3121 3122 if (ret != 0) 3123 map->async_ret = ret; 3124 3125 spin_unlock(&map->async_lock); 3126 3127 if (wake) 3128 wake_up(&map->async_waitq); 3129 } 3130 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 3131 3132 static int regmap_async_is_done(struct regmap *map) 3133 { 3134 unsigned long flags; 3135 int ret; 3136 3137 spin_lock_irqsave(&map->async_lock, flags); 3138 ret = list_empty(&map->async_list); 3139 spin_unlock_irqrestore(&map->async_lock, flags); 3140 3141 return ret; 3142 } 3143 3144 /** 3145 * regmap_async_complete - Ensure all asynchronous I/O has completed. 3146 * 3147 * @map: Map to operate on. 3148 * 3149 * Blocks until any pending asynchronous I/O has completed. Returns 3150 * an error code for any failed I/O operations. 3151 */ 3152 int regmap_async_complete(struct regmap *map) 3153 { 3154 unsigned long flags; 3155 int ret; 3156 3157 /* Nothing to do with no async support */ 3158 if (!map->bus || !map->bus->async_write) 3159 return 0; 3160 3161 trace_regmap_async_complete_start(map); 3162 3163 wait_event(map->async_waitq, regmap_async_is_done(map)); 3164 3165 spin_lock_irqsave(&map->async_lock, flags); 3166 ret = map->async_ret; 3167 map->async_ret = 0; 3168 spin_unlock_irqrestore(&map->async_lock, flags); 3169 3170 trace_regmap_async_complete_done(map); 3171 3172 return ret; 3173 } 3174 EXPORT_SYMBOL_GPL(regmap_async_complete); 3175 3176 /** 3177 * regmap_register_patch - Register and apply register updates to be applied 3178 * on device initialistion 3179 * 3180 * @map: Register map to apply updates to. 3181 * @regs: Values to update. 3182 * @num_regs: Number of entries in regs. 3183 * 3184 * Register a set of register updates to be applied to the device 3185 * whenever the device registers are synchronised with the cache and 3186 * apply them immediately. Typically this is used to apply 3187 * corrections to be applied to the device defaults on startup, such 3188 * as the updates some vendors provide to undocumented registers. 3189 * 3190 * The caller must ensure that this function cannot be called 3191 * concurrently with either itself or regcache_sync(). 3192 */ 3193 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3194 int num_regs) 3195 { 3196 struct reg_sequence *p; 3197 int ret; 3198 bool bypass; 3199 3200 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3201 num_regs)) 3202 return 0; 3203 3204 p = krealloc(map->patch, 3205 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3206 GFP_KERNEL); 3207 if (p) { 3208 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3209 map->patch = p; 3210 map->patch_regs += num_regs; 3211 } else { 3212 return -ENOMEM; 3213 } 3214 3215 map->lock(map->lock_arg); 3216 3217 bypass = map->cache_bypass; 3218 3219 map->cache_bypass = true; 3220 map->async = true; 3221 3222 ret = _regmap_multi_reg_write(map, regs, num_regs); 3223 3224 map->async = false; 3225 map->cache_bypass = bypass; 3226 3227 map->unlock(map->lock_arg); 3228 3229 regmap_async_complete(map); 3230 3231 return ret; 3232 } 3233 EXPORT_SYMBOL_GPL(regmap_register_patch); 3234 3235 /** 3236 * regmap_get_val_bytes() - Report the size of a register value 3237 * 3238 * @map: Register map to operate on. 3239 * 3240 * Report the size of a register value, mainly intended to for use by 3241 * generic infrastructure built on top of regmap. 3242 */ 3243 int regmap_get_val_bytes(struct regmap *map) 3244 { 3245 if (map->format.format_write) 3246 return -EINVAL; 3247 3248 return map->format.val_bytes; 3249 } 3250 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3251 3252 /** 3253 * regmap_get_max_register() - Report the max register value 3254 * 3255 * @map: Register map to operate on. 3256 * 3257 * Report the max register value, mainly intended to for use by 3258 * generic infrastructure built on top of regmap. 3259 */ 3260 int regmap_get_max_register(struct regmap *map) 3261 { 3262 return map->max_register ? map->max_register : -EINVAL; 3263 } 3264 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3265 3266 /** 3267 * regmap_get_reg_stride() - Report the register address stride 3268 * 3269 * @map: Register map to operate on. 3270 * 3271 * Report the register address stride, mainly intended to for use by 3272 * generic infrastructure built on top of regmap. 3273 */ 3274 int regmap_get_reg_stride(struct regmap *map) 3275 { 3276 return map->reg_stride; 3277 } 3278 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3279 3280 int regmap_parse_val(struct regmap *map, const void *buf, 3281 unsigned int *val) 3282 { 3283 if (!map->format.parse_val) 3284 return -EINVAL; 3285 3286 *val = map->format.parse_val(buf); 3287 3288 return 0; 3289 } 3290 EXPORT_SYMBOL_GPL(regmap_parse_val); 3291 3292 static int __init regmap_initcall(void) 3293 { 3294 regmap_debugfs_initcall(); 3295 3296 return 0; 3297 } 3298 postcore_initcall(regmap_initcall); 3299