1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/property.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 #include <asm/unaligned.h> 21 22 #define CREATE_TRACE_POINTS 23 #include "trace.h" 24 25 #include "internal.h" 26 27 /* 28 * Sometimes for failures during very early init the trace 29 * infrastructure isn't available early enough to be used. For this 30 * sort of problem defining LOG_DEVICE will add printks for basic 31 * register I/O on a specific device. 32 */ 33 #undef LOG_DEVICE 34 35 #ifdef LOG_DEVICE 36 static inline bool regmap_should_log(struct regmap *map) 37 { 38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 39 } 40 #else 41 static inline bool regmap_should_log(struct regmap *map) { return false; } 42 #endif 43 44 45 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 46 unsigned int mask, unsigned int val, 47 bool *change, bool force_write); 48 49 static int _regmap_bus_reg_read(void *context, unsigned int reg, 50 unsigned int *val); 51 static int _regmap_bus_read(void *context, unsigned int reg, 52 unsigned int *val); 53 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 54 unsigned int val); 55 static int _regmap_bus_reg_write(void *context, unsigned int reg, 56 unsigned int val); 57 static int _regmap_bus_raw_write(void *context, unsigned int reg, 58 unsigned int val); 59 60 bool regmap_reg_in_ranges(unsigned int reg, 61 const struct regmap_range *ranges, 62 unsigned int nranges) 63 { 64 const struct regmap_range *r; 65 int i; 66 67 for (i = 0, r = ranges; i < nranges; i++, r++) 68 if (regmap_reg_in_range(reg, r)) 69 return true; 70 return false; 71 } 72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 73 74 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 75 const struct regmap_access_table *table) 76 { 77 /* Check "no ranges" first */ 78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 79 return false; 80 81 /* In case zero "yes ranges" are supplied, any reg is OK */ 82 if (!table->n_yes_ranges) 83 return true; 84 85 return regmap_reg_in_ranges(reg, table->yes_ranges, 86 table->n_yes_ranges); 87 } 88 EXPORT_SYMBOL_GPL(regmap_check_range_table); 89 90 bool regmap_writeable(struct regmap *map, unsigned int reg) 91 { 92 if (map->max_register && reg > map->max_register) 93 return false; 94 95 if (map->writeable_reg) 96 return map->writeable_reg(map->dev, reg); 97 98 if (map->wr_table) 99 return regmap_check_range_table(map, reg, map->wr_table); 100 101 return true; 102 } 103 104 bool regmap_cached(struct regmap *map, unsigned int reg) 105 { 106 int ret; 107 unsigned int val; 108 109 if (map->cache_type == REGCACHE_NONE) 110 return false; 111 112 if (!map->cache_ops) 113 return false; 114 115 if (map->max_register && reg > map->max_register) 116 return false; 117 118 map->lock(map->lock_arg); 119 ret = regcache_read(map, reg, &val); 120 map->unlock(map->lock_arg); 121 if (ret) 122 return false; 123 124 return true; 125 } 126 127 bool regmap_readable(struct regmap *map, unsigned int reg) 128 { 129 if (!map->reg_read) 130 return false; 131 132 if (map->max_register && reg > map->max_register) 133 return false; 134 135 if (map->format.format_write) 136 return false; 137 138 if (map->readable_reg) 139 return map->readable_reg(map->dev, reg); 140 141 if (map->rd_table) 142 return regmap_check_range_table(map, reg, map->rd_table); 143 144 return true; 145 } 146 147 bool regmap_volatile(struct regmap *map, unsigned int reg) 148 { 149 if (!map->format.format_write && !regmap_readable(map, reg)) 150 return false; 151 152 if (map->volatile_reg) 153 return map->volatile_reg(map->dev, reg); 154 155 if (map->volatile_table) 156 return regmap_check_range_table(map, reg, map->volatile_table); 157 158 if (map->cache_ops) 159 return false; 160 else 161 return true; 162 } 163 164 bool regmap_precious(struct regmap *map, unsigned int reg) 165 { 166 if (!regmap_readable(map, reg)) 167 return false; 168 169 if (map->precious_reg) 170 return map->precious_reg(map->dev, reg); 171 172 if (map->precious_table) 173 return regmap_check_range_table(map, reg, map->precious_table); 174 175 return false; 176 } 177 178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 179 { 180 if (map->writeable_noinc_reg) 181 return map->writeable_noinc_reg(map->dev, reg); 182 183 if (map->wr_noinc_table) 184 return regmap_check_range_table(map, reg, map->wr_noinc_table); 185 186 return true; 187 } 188 189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 190 { 191 if (map->readable_noinc_reg) 192 return map->readable_noinc_reg(map->dev, reg); 193 194 if (map->rd_noinc_table) 195 return regmap_check_range_table(map, reg, map->rd_noinc_table); 196 197 return true; 198 } 199 200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 201 size_t num) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num; i++) 206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 207 return false; 208 209 return true; 210 } 211 212 static void regmap_format_12_20_write(struct regmap *map, 213 unsigned int reg, unsigned int val) 214 { 215 u8 *out = map->work_buf; 216 217 out[0] = reg >> 4; 218 out[1] = (reg << 4) | (val >> 16); 219 out[2] = val >> 8; 220 out[3] = val; 221 } 222 223 224 static void regmap_format_2_6_write(struct regmap *map, 225 unsigned int reg, unsigned int val) 226 { 227 u8 *out = map->work_buf; 228 229 *out = (reg << 6) | val; 230 } 231 232 static void regmap_format_4_12_write(struct regmap *map, 233 unsigned int reg, unsigned int val) 234 { 235 __be16 *out = map->work_buf; 236 *out = cpu_to_be16((reg << 12) | val); 237 } 238 239 static void regmap_format_7_9_write(struct regmap *map, 240 unsigned int reg, unsigned int val) 241 { 242 __be16 *out = map->work_buf; 243 *out = cpu_to_be16((reg << 9) | val); 244 } 245 246 static void regmap_format_7_17_write(struct regmap *map, 247 unsigned int reg, unsigned int val) 248 { 249 u8 *out = map->work_buf; 250 251 out[2] = val; 252 out[1] = val >> 8; 253 out[0] = (val >> 16) | (reg << 1); 254 } 255 256 static void regmap_format_10_14_write(struct regmap *map, 257 unsigned int reg, unsigned int val) 258 { 259 u8 *out = map->work_buf; 260 261 out[2] = val; 262 out[1] = (val >> 8) | (reg << 6); 263 out[0] = reg >> 2; 264 } 265 266 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 267 { 268 u8 *b = buf; 269 270 b[0] = val << shift; 271 } 272 273 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 274 { 275 put_unaligned_be16(val << shift, buf); 276 } 277 278 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 279 { 280 put_unaligned_le16(val << shift, buf); 281 } 282 283 static void regmap_format_16_native(void *buf, unsigned int val, 284 unsigned int shift) 285 { 286 u16 v = val << shift; 287 288 memcpy(buf, &v, sizeof(v)); 289 } 290 291 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 292 { 293 u8 *b = buf; 294 295 val <<= shift; 296 297 b[0] = val >> 16; 298 b[1] = val >> 8; 299 b[2] = val; 300 } 301 302 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 303 { 304 put_unaligned_be32(val << shift, buf); 305 } 306 307 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 308 { 309 put_unaligned_le32(val << shift, buf); 310 } 311 312 static void regmap_format_32_native(void *buf, unsigned int val, 313 unsigned int shift) 314 { 315 u32 v = val << shift; 316 317 memcpy(buf, &v, sizeof(v)); 318 } 319 320 #ifdef CONFIG_64BIT 321 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 322 { 323 put_unaligned_be64((u64) val << shift, buf); 324 } 325 326 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 327 { 328 put_unaligned_le64((u64) val << shift, buf); 329 } 330 331 static void regmap_format_64_native(void *buf, unsigned int val, 332 unsigned int shift) 333 { 334 u64 v = (u64) val << shift; 335 336 memcpy(buf, &v, sizeof(v)); 337 } 338 #endif 339 340 static void regmap_parse_inplace_noop(void *buf) 341 { 342 } 343 344 static unsigned int regmap_parse_8(const void *buf) 345 { 346 const u8 *b = buf; 347 348 return b[0]; 349 } 350 351 static unsigned int regmap_parse_16_be(const void *buf) 352 { 353 return get_unaligned_be16(buf); 354 } 355 356 static unsigned int regmap_parse_16_le(const void *buf) 357 { 358 return get_unaligned_le16(buf); 359 } 360 361 static void regmap_parse_16_be_inplace(void *buf) 362 { 363 u16 v = get_unaligned_be16(buf); 364 365 memcpy(buf, &v, sizeof(v)); 366 } 367 368 static void regmap_parse_16_le_inplace(void *buf) 369 { 370 u16 v = get_unaligned_le16(buf); 371 372 memcpy(buf, &v, sizeof(v)); 373 } 374 375 static unsigned int regmap_parse_16_native(const void *buf) 376 { 377 u16 v; 378 379 memcpy(&v, buf, sizeof(v)); 380 return v; 381 } 382 383 static unsigned int regmap_parse_24(const void *buf) 384 { 385 const u8 *b = buf; 386 unsigned int ret = b[2]; 387 ret |= ((unsigned int)b[1]) << 8; 388 ret |= ((unsigned int)b[0]) << 16; 389 390 return ret; 391 } 392 393 static unsigned int regmap_parse_32_be(const void *buf) 394 { 395 return get_unaligned_be32(buf); 396 } 397 398 static unsigned int regmap_parse_32_le(const void *buf) 399 { 400 return get_unaligned_le32(buf); 401 } 402 403 static void regmap_parse_32_be_inplace(void *buf) 404 { 405 u32 v = get_unaligned_be32(buf); 406 407 memcpy(buf, &v, sizeof(v)); 408 } 409 410 static void regmap_parse_32_le_inplace(void *buf) 411 { 412 u32 v = get_unaligned_le32(buf); 413 414 memcpy(buf, &v, sizeof(v)); 415 } 416 417 static unsigned int regmap_parse_32_native(const void *buf) 418 { 419 u32 v; 420 421 memcpy(&v, buf, sizeof(v)); 422 return v; 423 } 424 425 #ifdef CONFIG_64BIT 426 static unsigned int regmap_parse_64_be(const void *buf) 427 { 428 return get_unaligned_be64(buf); 429 } 430 431 static unsigned int regmap_parse_64_le(const void *buf) 432 { 433 return get_unaligned_le64(buf); 434 } 435 436 static void regmap_parse_64_be_inplace(void *buf) 437 { 438 u64 v = get_unaligned_be64(buf); 439 440 memcpy(buf, &v, sizeof(v)); 441 } 442 443 static void regmap_parse_64_le_inplace(void *buf) 444 { 445 u64 v = get_unaligned_le64(buf); 446 447 memcpy(buf, &v, sizeof(v)); 448 } 449 450 static unsigned int regmap_parse_64_native(const void *buf) 451 { 452 u64 v; 453 454 memcpy(&v, buf, sizeof(v)); 455 return v; 456 } 457 #endif 458 459 static void regmap_lock_hwlock(void *__map) 460 { 461 struct regmap *map = __map; 462 463 hwspin_lock_timeout(map->hwlock, UINT_MAX); 464 } 465 466 static void regmap_lock_hwlock_irq(void *__map) 467 { 468 struct regmap *map = __map; 469 470 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 471 } 472 473 static void regmap_lock_hwlock_irqsave(void *__map) 474 { 475 struct regmap *map = __map; 476 477 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 478 &map->spinlock_flags); 479 } 480 481 static void regmap_unlock_hwlock(void *__map) 482 { 483 struct regmap *map = __map; 484 485 hwspin_unlock(map->hwlock); 486 } 487 488 static void regmap_unlock_hwlock_irq(void *__map) 489 { 490 struct regmap *map = __map; 491 492 hwspin_unlock_irq(map->hwlock); 493 } 494 495 static void regmap_unlock_hwlock_irqrestore(void *__map) 496 { 497 struct regmap *map = __map; 498 499 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 500 } 501 502 static void regmap_lock_unlock_none(void *__map) 503 { 504 505 } 506 507 static void regmap_lock_mutex(void *__map) 508 { 509 struct regmap *map = __map; 510 mutex_lock(&map->mutex); 511 } 512 513 static void regmap_unlock_mutex(void *__map) 514 { 515 struct regmap *map = __map; 516 mutex_unlock(&map->mutex); 517 } 518 519 static void regmap_lock_spinlock(void *__map) 520 __acquires(&map->spinlock) 521 { 522 struct regmap *map = __map; 523 unsigned long flags; 524 525 spin_lock_irqsave(&map->spinlock, flags); 526 map->spinlock_flags = flags; 527 } 528 529 static void regmap_unlock_spinlock(void *__map) 530 __releases(&map->spinlock) 531 { 532 struct regmap *map = __map; 533 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 534 } 535 536 static void dev_get_regmap_release(struct device *dev, void *res) 537 { 538 /* 539 * We don't actually have anything to do here; the goal here 540 * is not to manage the regmap but to provide a simple way to 541 * get the regmap back given a struct device. 542 */ 543 } 544 545 static bool _regmap_range_add(struct regmap *map, 546 struct regmap_range_node *data) 547 { 548 struct rb_root *root = &map->range_tree; 549 struct rb_node **new = &(root->rb_node), *parent = NULL; 550 551 while (*new) { 552 struct regmap_range_node *this = 553 rb_entry(*new, struct regmap_range_node, node); 554 555 parent = *new; 556 if (data->range_max < this->range_min) 557 new = &((*new)->rb_left); 558 else if (data->range_min > this->range_max) 559 new = &((*new)->rb_right); 560 else 561 return false; 562 } 563 564 rb_link_node(&data->node, parent, new); 565 rb_insert_color(&data->node, root); 566 567 return true; 568 } 569 570 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 571 unsigned int reg) 572 { 573 struct rb_node *node = map->range_tree.rb_node; 574 575 while (node) { 576 struct regmap_range_node *this = 577 rb_entry(node, struct regmap_range_node, node); 578 579 if (reg < this->range_min) 580 node = node->rb_left; 581 else if (reg > this->range_max) 582 node = node->rb_right; 583 else 584 return this; 585 } 586 587 return NULL; 588 } 589 590 static void regmap_range_exit(struct regmap *map) 591 { 592 struct rb_node *next; 593 struct regmap_range_node *range_node; 594 595 next = rb_first(&map->range_tree); 596 while (next) { 597 range_node = rb_entry(next, struct regmap_range_node, node); 598 next = rb_next(&range_node->node); 599 rb_erase(&range_node->node, &map->range_tree); 600 kfree(range_node); 601 } 602 603 kfree(map->selector_work_buf); 604 } 605 606 static int regmap_set_name(struct regmap *map, const struct regmap_config *config) 607 { 608 if (config->name) { 609 const char *name = kstrdup_const(config->name, GFP_KERNEL); 610 611 if (!name) 612 return -ENOMEM; 613 614 kfree_const(map->name); 615 map->name = name; 616 } 617 618 return 0; 619 } 620 621 int regmap_attach_dev(struct device *dev, struct regmap *map, 622 const struct regmap_config *config) 623 { 624 struct regmap **m; 625 int ret; 626 627 map->dev = dev; 628 629 ret = regmap_set_name(map, config); 630 if (ret) 631 return ret; 632 633 regmap_debugfs_init(map); 634 635 /* Add a devres resource for dev_get_regmap() */ 636 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 637 if (!m) { 638 regmap_debugfs_exit(map); 639 return -ENOMEM; 640 } 641 *m = map; 642 devres_add(dev, m); 643 644 return 0; 645 } 646 EXPORT_SYMBOL_GPL(regmap_attach_dev); 647 648 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 649 const struct regmap_config *config) 650 { 651 enum regmap_endian endian; 652 653 /* Retrieve the endianness specification from the regmap config */ 654 endian = config->reg_format_endian; 655 656 /* If the regmap config specified a non-default value, use that */ 657 if (endian != REGMAP_ENDIAN_DEFAULT) 658 return endian; 659 660 /* Retrieve the endianness specification from the bus config */ 661 if (bus && bus->reg_format_endian_default) 662 endian = bus->reg_format_endian_default; 663 664 /* If the bus specified a non-default value, use that */ 665 if (endian != REGMAP_ENDIAN_DEFAULT) 666 return endian; 667 668 /* Use this if no other value was found */ 669 return REGMAP_ENDIAN_BIG; 670 } 671 672 enum regmap_endian regmap_get_val_endian(struct device *dev, 673 const struct regmap_bus *bus, 674 const struct regmap_config *config) 675 { 676 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; 677 enum regmap_endian endian; 678 679 /* Retrieve the endianness specification from the regmap config */ 680 endian = config->val_format_endian; 681 682 /* If the regmap config specified a non-default value, use that */ 683 if (endian != REGMAP_ENDIAN_DEFAULT) 684 return endian; 685 686 /* If the firmware node exist try to get endianness from it */ 687 if (fwnode_property_read_bool(fwnode, "big-endian")) 688 endian = REGMAP_ENDIAN_BIG; 689 else if (fwnode_property_read_bool(fwnode, "little-endian")) 690 endian = REGMAP_ENDIAN_LITTLE; 691 else if (fwnode_property_read_bool(fwnode, "native-endian")) 692 endian = REGMAP_ENDIAN_NATIVE; 693 694 /* If the endianness was specified in fwnode, use that */ 695 if (endian != REGMAP_ENDIAN_DEFAULT) 696 return endian; 697 698 /* Retrieve the endianness specification from the bus config */ 699 if (bus && bus->val_format_endian_default) 700 endian = bus->val_format_endian_default; 701 702 /* If the bus specified a non-default value, use that */ 703 if (endian != REGMAP_ENDIAN_DEFAULT) 704 return endian; 705 706 /* Use this if no other value was found */ 707 return REGMAP_ENDIAN_BIG; 708 } 709 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 710 711 struct regmap *__regmap_init(struct device *dev, 712 const struct regmap_bus *bus, 713 void *bus_context, 714 const struct regmap_config *config, 715 struct lock_class_key *lock_key, 716 const char *lock_name) 717 { 718 struct regmap *map; 719 int ret = -EINVAL; 720 enum regmap_endian reg_endian, val_endian; 721 int i, j; 722 723 if (!config) 724 goto err; 725 726 map = kzalloc(sizeof(*map), GFP_KERNEL); 727 if (map == NULL) { 728 ret = -ENOMEM; 729 goto err; 730 } 731 732 ret = regmap_set_name(map, config); 733 if (ret) 734 goto err_map; 735 736 ret = -EINVAL; /* Later error paths rely on this */ 737 738 if (config->disable_locking) { 739 map->lock = map->unlock = regmap_lock_unlock_none; 740 map->can_sleep = config->can_sleep; 741 regmap_debugfs_disable(map); 742 } else if (config->lock && config->unlock) { 743 map->lock = config->lock; 744 map->unlock = config->unlock; 745 map->lock_arg = config->lock_arg; 746 map->can_sleep = config->can_sleep; 747 } else if (config->use_hwlock) { 748 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 749 if (!map->hwlock) { 750 ret = -ENXIO; 751 goto err_name; 752 } 753 754 switch (config->hwlock_mode) { 755 case HWLOCK_IRQSTATE: 756 map->lock = regmap_lock_hwlock_irqsave; 757 map->unlock = regmap_unlock_hwlock_irqrestore; 758 break; 759 case HWLOCK_IRQ: 760 map->lock = regmap_lock_hwlock_irq; 761 map->unlock = regmap_unlock_hwlock_irq; 762 break; 763 default: 764 map->lock = regmap_lock_hwlock; 765 map->unlock = regmap_unlock_hwlock; 766 break; 767 } 768 769 map->lock_arg = map; 770 } else { 771 if ((bus && bus->fast_io) || 772 config->fast_io) { 773 spin_lock_init(&map->spinlock); 774 map->lock = regmap_lock_spinlock; 775 map->unlock = regmap_unlock_spinlock; 776 lockdep_set_class_and_name(&map->spinlock, 777 lock_key, lock_name); 778 } else { 779 mutex_init(&map->mutex); 780 map->lock = regmap_lock_mutex; 781 map->unlock = regmap_unlock_mutex; 782 map->can_sleep = true; 783 lockdep_set_class_and_name(&map->mutex, 784 lock_key, lock_name); 785 } 786 map->lock_arg = map; 787 } 788 789 /* 790 * When we write in fast-paths with regmap_bulk_write() don't allocate 791 * scratch buffers with sleeping allocations. 792 */ 793 if ((bus && bus->fast_io) || config->fast_io) 794 map->alloc_flags = GFP_ATOMIC; 795 else 796 map->alloc_flags = GFP_KERNEL; 797 798 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 799 map->format.pad_bytes = config->pad_bits / 8; 800 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 801 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 802 config->val_bits + config->pad_bits, 8); 803 map->reg_shift = config->pad_bits % 8; 804 if (config->reg_stride) 805 map->reg_stride = config->reg_stride; 806 else 807 map->reg_stride = 1; 808 if (is_power_of_2(map->reg_stride)) 809 map->reg_stride_order = ilog2(map->reg_stride); 810 else 811 map->reg_stride_order = -1; 812 map->use_single_read = config->use_single_read || !bus || !bus->read; 813 map->use_single_write = config->use_single_write || !bus || !bus->write; 814 map->can_multi_write = config->can_multi_write && bus && bus->write; 815 if (bus) { 816 map->max_raw_read = bus->max_raw_read; 817 map->max_raw_write = bus->max_raw_write; 818 } 819 map->dev = dev; 820 map->bus = bus; 821 map->bus_context = bus_context; 822 map->max_register = config->max_register; 823 map->wr_table = config->wr_table; 824 map->rd_table = config->rd_table; 825 map->volatile_table = config->volatile_table; 826 map->precious_table = config->precious_table; 827 map->wr_noinc_table = config->wr_noinc_table; 828 map->rd_noinc_table = config->rd_noinc_table; 829 map->writeable_reg = config->writeable_reg; 830 map->readable_reg = config->readable_reg; 831 map->volatile_reg = config->volatile_reg; 832 map->precious_reg = config->precious_reg; 833 map->writeable_noinc_reg = config->writeable_noinc_reg; 834 map->readable_noinc_reg = config->readable_noinc_reg; 835 map->cache_type = config->cache_type; 836 837 spin_lock_init(&map->async_lock); 838 INIT_LIST_HEAD(&map->async_list); 839 INIT_LIST_HEAD(&map->async_free); 840 init_waitqueue_head(&map->async_waitq); 841 842 if (config->read_flag_mask || 843 config->write_flag_mask || 844 config->zero_flag_mask) { 845 map->read_flag_mask = config->read_flag_mask; 846 map->write_flag_mask = config->write_flag_mask; 847 } else if (bus) { 848 map->read_flag_mask = bus->read_flag_mask; 849 } 850 851 if (!bus) { 852 map->reg_read = config->reg_read; 853 map->reg_write = config->reg_write; 854 855 map->defer_caching = false; 856 goto skip_format_initialization; 857 } else if (!bus->read || !bus->write) { 858 map->reg_read = _regmap_bus_reg_read; 859 map->reg_write = _regmap_bus_reg_write; 860 map->reg_update_bits = bus->reg_update_bits; 861 862 map->defer_caching = false; 863 goto skip_format_initialization; 864 } else { 865 map->reg_read = _regmap_bus_read; 866 map->reg_update_bits = bus->reg_update_bits; 867 } 868 869 reg_endian = regmap_get_reg_endian(bus, config); 870 val_endian = regmap_get_val_endian(dev, bus, config); 871 872 switch (config->reg_bits + map->reg_shift) { 873 case 2: 874 switch (config->val_bits) { 875 case 6: 876 map->format.format_write = regmap_format_2_6_write; 877 break; 878 default: 879 goto err_hwlock; 880 } 881 break; 882 883 case 4: 884 switch (config->val_bits) { 885 case 12: 886 map->format.format_write = regmap_format_4_12_write; 887 break; 888 default: 889 goto err_hwlock; 890 } 891 break; 892 893 case 7: 894 switch (config->val_bits) { 895 case 9: 896 map->format.format_write = regmap_format_7_9_write; 897 break; 898 case 17: 899 map->format.format_write = regmap_format_7_17_write; 900 break; 901 default: 902 goto err_hwlock; 903 } 904 break; 905 906 case 10: 907 switch (config->val_bits) { 908 case 14: 909 map->format.format_write = regmap_format_10_14_write; 910 break; 911 default: 912 goto err_hwlock; 913 } 914 break; 915 916 case 12: 917 switch (config->val_bits) { 918 case 20: 919 map->format.format_write = regmap_format_12_20_write; 920 break; 921 default: 922 goto err_hwlock; 923 } 924 break; 925 926 case 8: 927 map->format.format_reg = regmap_format_8; 928 break; 929 930 case 16: 931 switch (reg_endian) { 932 case REGMAP_ENDIAN_BIG: 933 map->format.format_reg = regmap_format_16_be; 934 break; 935 case REGMAP_ENDIAN_LITTLE: 936 map->format.format_reg = regmap_format_16_le; 937 break; 938 case REGMAP_ENDIAN_NATIVE: 939 map->format.format_reg = regmap_format_16_native; 940 break; 941 default: 942 goto err_hwlock; 943 } 944 break; 945 946 case 24: 947 if (reg_endian != REGMAP_ENDIAN_BIG) 948 goto err_hwlock; 949 map->format.format_reg = regmap_format_24; 950 break; 951 952 case 32: 953 switch (reg_endian) { 954 case REGMAP_ENDIAN_BIG: 955 map->format.format_reg = regmap_format_32_be; 956 break; 957 case REGMAP_ENDIAN_LITTLE: 958 map->format.format_reg = regmap_format_32_le; 959 break; 960 case REGMAP_ENDIAN_NATIVE: 961 map->format.format_reg = regmap_format_32_native; 962 break; 963 default: 964 goto err_hwlock; 965 } 966 break; 967 968 #ifdef CONFIG_64BIT 969 case 64: 970 switch (reg_endian) { 971 case REGMAP_ENDIAN_BIG: 972 map->format.format_reg = regmap_format_64_be; 973 break; 974 case REGMAP_ENDIAN_LITTLE: 975 map->format.format_reg = regmap_format_64_le; 976 break; 977 case REGMAP_ENDIAN_NATIVE: 978 map->format.format_reg = regmap_format_64_native; 979 break; 980 default: 981 goto err_hwlock; 982 } 983 break; 984 #endif 985 986 default: 987 goto err_hwlock; 988 } 989 990 if (val_endian == REGMAP_ENDIAN_NATIVE) 991 map->format.parse_inplace = regmap_parse_inplace_noop; 992 993 switch (config->val_bits) { 994 case 8: 995 map->format.format_val = regmap_format_8; 996 map->format.parse_val = regmap_parse_8; 997 map->format.parse_inplace = regmap_parse_inplace_noop; 998 break; 999 case 16: 1000 switch (val_endian) { 1001 case REGMAP_ENDIAN_BIG: 1002 map->format.format_val = regmap_format_16_be; 1003 map->format.parse_val = regmap_parse_16_be; 1004 map->format.parse_inplace = regmap_parse_16_be_inplace; 1005 break; 1006 case REGMAP_ENDIAN_LITTLE: 1007 map->format.format_val = regmap_format_16_le; 1008 map->format.parse_val = regmap_parse_16_le; 1009 map->format.parse_inplace = regmap_parse_16_le_inplace; 1010 break; 1011 case REGMAP_ENDIAN_NATIVE: 1012 map->format.format_val = regmap_format_16_native; 1013 map->format.parse_val = regmap_parse_16_native; 1014 break; 1015 default: 1016 goto err_hwlock; 1017 } 1018 break; 1019 case 24: 1020 if (val_endian != REGMAP_ENDIAN_BIG) 1021 goto err_hwlock; 1022 map->format.format_val = regmap_format_24; 1023 map->format.parse_val = regmap_parse_24; 1024 break; 1025 case 32: 1026 switch (val_endian) { 1027 case REGMAP_ENDIAN_BIG: 1028 map->format.format_val = regmap_format_32_be; 1029 map->format.parse_val = regmap_parse_32_be; 1030 map->format.parse_inplace = regmap_parse_32_be_inplace; 1031 break; 1032 case REGMAP_ENDIAN_LITTLE: 1033 map->format.format_val = regmap_format_32_le; 1034 map->format.parse_val = regmap_parse_32_le; 1035 map->format.parse_inplace = regmap_parse_32_le_inplace; 1036 break; 1037 case REGMAP_ENDIAN_NATIVE: 1038 map->format.format_val = regmap_format_32_native; 1039 map->format.parse_val = regmap_parse_32_native; 1040 break; 1041 default: 1042 goto err_hwlock; 1043 } 1044 break; 1045 #ifdef CONFIG_64BIT 1046 case 64: 1047 switch (val_endian) { 1048 case REGMAP_ENDIAN_BIG: 1049 map->format.format_val = regmap_format_64_be; 1050 map->format.parse_val = regmap_parse_64_be; 1051 map->format.parse_inplace = regmap_parse_64_be_inplace; 1052 break; 1053 case REGMAP_ENDIAN_LITTLE: 1054 map->format.format_val = regmap_format_64_le; 1055 map->format.parse_val = regmap_parse_64_le; 1056 map->format.parse_inplace = regmap_parse_64_le_inplace; 1057 break; 1058 case REGMAP_ENDIAN_NATIVE: 1059 map->format.format_val = regmap_format_64_native; 1060 map->format.parse_val = regmap_parse_64_native; 1061 break; 1062 default: 1063 goto err_hwlock; 1064 } 1065 break; 1066 #endif 1067 } 1068 1069 if (map->format.format_write) { 1070 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1071 (val_endian != REGMAP_ENDIAN_BIG)) 1072 goto err_hwlock; 1073 map->use_single_write = true; 1074 } 1075 1076 if (!map->format.format_write && 1077 !(map->format.format_reg && map->format.format_val)) 1078 goto err_hwlock; 1079 1080 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1081 if (map->work_buf == NULL) { 1082 ret = -ENOMEM; 1083 goto err_hwlock; 1084 } 1085 1086 if (map->format.format_write) { 1087 map->defer_caching = false; 1088 map->reg_write = _regmap_bus_formatted_write; 1089 } else if (map->format.format_val) { 1090 map->defer_caching = true; 1091 map->reg_write = _regmap_bus_raw_write; 1092 } 1093 1094 skip_format_initialization: 1095 1096 map->range_tree = RB_ROOT; 1097 for (i = 0; i < config->num_ranges; i++) { 1098 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1099 struct regmap_range_node *new; 1100 1101 /* Sanity check */ 1102 if (range_cfg->range_max < range_cfg->range_min) { 1103 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1104 range_cfg->range_max, range_cfg->range_min); 1105 goto err_range; 1106 } 1107 1108 if (range_cfg->range_max > map->max_register) { 1109 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1110 range_cfg->range_max, map->max_register); 1111 goto err_range; 1112 } 1113 1114 if (range_cfg->selector_reg > map->max_register) { 1115 dev_err(map->dev, 1116 "Invalid range %d: selector out of map\n", i); 1117 goto err_range; 1118 } 1119 1120 if (range_cfg->window_len == 0) { 1121 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1122 i); 1123 goto err_range; 1124 } 1125 1126 /* Make sure, that this register range has no selector 1127 or data window within its boundary */ 1128 for (j = 0; j < config->num_ranges; j++) { 1129 unsigned sel_reg = config->ranges[j].selector_reg; 1130 unsigned win_min = config->ranges[j].window_start; 1131 unsigned win_max = win_min + 1132 config->ranges[j].window_len - 1; 1133 1134 /* Allow data window inside its own virtual range */ 1135 if (j == i) 1136 continue; 1137 1138 if (range_cfg->range_min <= sel_reg && 1139 sel_reg <= range_cfg->range_max) { 1140 dev_err(map->dev, 1141 "Range %d: selector for %d in window\n", 1142 i, j); 1143 goto err_range; 1144 } 1145 1146 if (!(win_max < range_cfg->range_min || 1147 win_min > range_cfg->range_max)) { 1148 dev_err(map->dev, 1149 "Range %d: window for %d in window\n", 1150 i, j); 1151 goto err_range; 1152 } 1153 } 1154 1155 new = kzalloc(sizeof(*new), GFP_KERNEL); 1156 if (new == NULL) { 1157 ret = -ENOMEM; 1158 goto err_range; 1159 } 1160 1161 new->map = map; 1162 new->name = range_cfg->name; 1163 new->range_min = range_cfg->range_min; 1164 new->range_max = range_cfg->range_max; 1165 new->selector_reg = range_cfg->selector_reg; 1166 new->selector_mask = range_cfg->selector_mask; 1167 new->selector_shift = range_cfg->selector_shift; 1168 new->window_start = range_cfg->window_start; 1169 new->window_len = range_cfg->window_len; 1170 1171 if (!_regmap_range_add(map, new)) { 1172 dev_err(map->dev, "Failed to add range %d\n", i); 1173 kfree(new); 1174 goto err_range; 1175 } 1176 1177 if (map->selector_work_buf == NULL) { 1178 map->selector_work_buf = 1179 kzalloc(map->format.buf_size, GFP_KERNEL); 1180 if (map->selector_work_buf == NULL) { 1181 ret = -ENOMEM; 1182 goto err_range; 1183 } 1184 } 1185 } 1186 1187 ret = regcache_init(map, config); 1188 if (ret != 0) 1189 goto err_range; 1190 1191 if (dev) { 1192 ret = regmap_attach_dev(dev, map, config); 1193 if (ret != 0) 1194 goto err_regcache; 1195 } else { 1196 regmap_debugfs_init(map); 1197 } 1198 1199 return map; 1200 1201 err_regcache: 1202 regcache_exit(map); 1203 err_range: 1204 regmap_range_exit(map); 1205 kfree(map->work_buf); 1206 err_hwlock: 1207 if (map->hwlock) 1208 hwspin_lock_free(map->hwlock); 1209 err_name: 1210 kfree_const(map->name); 1211 err_map: 1212 kfree(map); 1213 err: 1214 return ERR_PTR(ret); 1215 } 1216 EXPORT_SYMBOL_GPL(__regmap_init); 1217 1218 static void devm_regmap_release(struct device *dev, void *res) 1219 { 1220 regmap_exit(*(struct regmap **)res); 1221 } 1222 1223 struct regmap *__devm_regmap_init(struct device *dev, 1224 const struct regmap_bus *bus, 1225 void *bus_context, 1226 const struct regmap_config *config, 1227 struct lock_class_key *lock_key, 1228 const char *lock_name) 1229 { 1230 struct regmap **ptr, *regmap; 1231 1232 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1233 if (!ptr) 1234 return ERR_PTR(-ENOMEM); 1235 1236 regmap = __regmap_init(dev, bus, bus_context, config, 1237 lock_key, lock_name); 1238 if (!IS_ERR(regmap)) { 1239 *ptr = regmap; 1240 devres_add(dev, ptr); 1241 } else { 1242 devres_free(ptr); 1243 } 1244 1245 return regmap; 1246 } 1247 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1248 1249 static void regmap_field_init(struct regmap_field *rm_field, 1250 struct regmap *regmap, struct reg_field reg_field) 1251 { 1252 rm_field->regmap = regmap; 1253 rm_field->reg = reg_field.reg; 1254 rm_field->shift = reg_field.lsb; 1255 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1256 rm_field->id_size = reg_field.id_size; 1257 rm_field->id_offset = reg_field.id_offset; 1258 } 1259 1260 /** 1261 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1262 * 1263 * @dev: Device that will be interacted with 1264 * @regmap: regmap bank in which this register field is located. 1265 * @reg_field: Register field with in the bank. 1266 * 1267 * The return value will be an ERR_PTR() on error or a valid pointer 1268 * to a struct regmap_field. The regmap_field will be automatically freed 1269 * by the device management code. 1270 */ 1271 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1272 struct regmap *regmap, struct reg_field reg_field) 1273 { 1274 struct regmap_field *rm_field = devm_kzalloc(dev, 1275 sizeof(*rm_field), GFP_KERNEL); 1276 if (!rm_field) 1277 return ERR_PTR(-ENOMEM); 1278 1279 regmap_field_init(rm_field, regmap, reg_field); 1280 1281 return rm_field; 1282 1283 } 1284 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1285 1286 1287 /** 1288 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field. 1289 * 1290 * @regmap: regmap bank in which this register field is located. 1291 * @rm_field: regmap register fields within the bank. 1292 * @reg_field: Register fields within the bank. 1293 * @num_fields: Number of register fields. 1294 * 1295 * The return value will be an -ENOMEM on error or zero for success. 1296 * Newly allocated regmap_fields should be freed by calling 1297 * regmap_field_bulk_free() 1298 */ 1299 int regmap_field_bulk_alloc(struct regmap *regmap, 1300 struct regmap_field **rm_field, 1301 struct reg_field *reg_field, 1302 int num_fields) 1303 { 1304 struct regmap_field *rf; 1305 int i; 1306 1307 rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL); 1308 if (!rf) 1309 return -ENOMEM; 1310 1311 for (i = 0; i < num_fields; i++) { 1312 regmap_field_init(&rf[i], regmap, reg_field[i]); 1313 rm_field[i] = &rf[i]; 1314 } 1315 1316 return 0; 1317 } 1318 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc); 1319 1320 /** 1321 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register 1322 * fields. 1323 * 1324 * @dev: Device that will be interacted with 1325 * @regmap: regmap bank in which this register field is located. 1326 * @rm_field: regmap register fields within the bank. 1327 * @reg_field: Register fields within the bank. 1328 * @num_fields: Number of register fields. 1329 * 1330 * The return value will be an -ENOMEM on error or zero for success. 1331 * Newly allocated regmap_fields will be automatically freed by the 1332 * device management code. 1333 */ 1334 int devm_regmap_field_bulk_alloc(struct device *dev, 1335 struct regmap *regmap, 1336 struct regmap_field **rm_field, 1337 struct reg_field *reg_field, 1338 int num_fields) 1339 { 1340 struct regmap_field *rf; 1341 int i; 1342 1343 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL); 1344 if (!rf) 1345 return -ENOMEM; 1346 1347 for (i = 0; i < num_fields; i++) { 1348 regmap_field_init(&rf[i], regmap, reg_field[i]); 1349 rm_field[i] = &rf[i]; 1350 } 1351 1352 return 0; 1353 } 1354 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc); 1355 1356 /** 1357 * regmap_field_bulk_free() - Free register field allocated using 1358 * regmap_field_bulk_alloc. 1359 * 1360 * @field: regmap fields which should be freed. 1361 */ 1362 void regmap_field_bulk_free(struct regmap_field *field) 1363 { 1364 kfree(field); 1365 } 1366 EXPORT_SYMBOL_GPL(regmap_field_bulk_free); 1367 1368 /** 1369 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using 1370 * devm_regmap_field_bulk_alloc. 1371 * 1372 * @dev: Device that will be interacted with 1373 * @field: regmap field which should be freed. 1374 * 1375 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually 1376 * drivers need not call this function, as the memory allocated via devm 1377 * will be freed as per device-driver life-cycle. 1378 */ 1379 void devm_regmap_field_bulk_free(struct device *dev, 1380 struct regmap_field *field) 1381 { 1382 devm_kfree(dev, field); 1383 } 1384 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free); 1385 1386 /** 1387 * devm_regmap_field_free() - Free a register field allocated using 1388 * devm_regmap_field_alloc. 1389 * 1390 * @dev: Device that will be interacted with 1391 * @field: regmap field which should be freed. 1392 * 1393 * Free register field allocated using devm_regmap_field_alloc(). Usually 1394 * drivers need not call this function, as the memory allocated via devm 1395 * will be freed as per device-driver life-cyle. 1396 */ 1397 void devm_regmap_field_free(struct device *dev, 1398 struct regmap_field *field) 1399 { 1400 devm_kfree(dev, field); 1401 } 1402 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1403 1404 /** 1405 * regmap_field_alloc() - Allocate and initialise a register field. 1406 * 1407 * @regmap: regmap bank in which this register field is located. 1408 * @reg_field: Register field with in the bank. 1409 * 1410 * The return value will be an ERR_PTR() on error or a valid pointer 1411 * to a struct regmap_field. The regmap_field should be freed by the 1412 * user once its finished working with it using regmap_field_free(). 1413 */ 1414 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1415 struct reg_field reg_field) 1416 { 1417 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1418 1419 if (!rm_field) 1420 return ERR_PTR(-ENOMEM); 1421 1422 regmap_field_init(rm_field, regmap, reg_field); 1423 1424 return rm_field; 1425 } 1426 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1427 1428 /** 1429 * regmap_field_free() - Free register field allocated using 1430 * regmap_field_alloc. 1431 * 1432 * @field: regmap field which should be freed. 1433 */ 1434 void regmap_field_free(struct regmap_field *field) 1435 { 1436 kfree(field); 1437 } 1438 EXPORT_SYMBOL_GPL(regmap_field_free); 1439 1440 /** 1441 * regmap_reinit_cache() - Reinitialise the current register cache 1442 * 1443 * @map: Register map to operate on. 1444 * @config: New configuration. Only the cache data will be used. 1445 * 1446 * Discard any existing register cache for the map and initialize a 1447 * new cache. This can be used to restore the cache to defaults or to 1448 * update the cache configuration to reflect runtime discovery of the 1449 * hardware. 1450 * 1451 * No explicit locking is done here, the user needs to ensure that 1452 * this function will not race with other calls to regmap. 1453 */ 1454 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1455 { 1456 int ret; 1457 1458 regcache_exit(map); 1459 regmap_debugfs_exit(map); 1460 1461 map->max_register = config->max_register; 1462 map->writeable_reg = config->writeable_reg; 1463 map->readable_reg = config->readable_reg; 1464 map->volatile_reg = config->volatile_reg; 1465 map->precious_reg = config->precious_reg; 1466 map->writeable_noinc_reg = config->writeable_noinc_reg; 1467 map->readable_noinc_reg = config->readable_noinc_reg; 1468 map->cache_type = config->cache_type; 1469 1470 ret = regmap_set_name(map, config); 1471 if (ret) 1472 return ret; 1473 1474 regmap_debugfs_init(map); 1475 1476 map->cache_bypass = false; 1477 map->cache_only = false; 1478 1479 return regcache_init(map, config); 1480 } 1481 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1482 1483 /** 1484 * regmap_exit() - Free a previously allocated register map 1485 * 1486 * @map: Register map to operate on. 1487 */ 1488 void regmap_exit(struct regmap *map) 1489 { 1490 struct regmap_async *async; 1491 1492 regcache_exit(map); 1493 regmap_debugfs_exit(map); 1494 regmap_range_exit(map); 1495 if (map->bus && map->bus->free_context) 1496 map->bus->free_context(map->bus_context); 1497 kfree(map->work_buf); 1498 while (!list_empty(&map->async_free)) { 1499 async = list_first_entry_or_null(&map->async_free, 1500 struct regmap_async, 1501 list); 1502 list_del(&async->list); 1503 kfree(async->work_buf); 1504 kfree(async); 1505 } 1506 if (map->hwlock) 1507 hwspin_lock_free(map->hwlock); 1508 if (map->lock == regmap_lock_mutex) 1509 mutex_destroy(&map->mutex); 1510 kfree_const(map->name); 1511 kfree(map->patch); 1512 if (map->bus && map->bus->free_on_exit) 1513 kfree(map->bus); 1514 kfree(map); 1515 } 1516 EXPORT_SYMBOL_GPL(regmap_exit); 1517 1518 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1519 { 1520 struct regmap **r = res; 1521 if (!r || !*r) { 1522 WARN_ON(!r || !*r); 1523 return 0; 1524 } 1525 1526 /* If the user didn't specify a name match any */ 1527 if (data) 1528 return !strcmp((*r)->name, data); 1529 else 1530 return 1; 1531 } 1532 1533 /** 1534 * dev_get_regmap() - Obtain the regmap (if any) for a device 1535 * 1536 * @dev: Device to retrieve the map for 1537 * @name: Optional name for the register map, usually NULL. 1538 * 1539 * Returns the regmap for the device if one is present, or NULL. If 1540 * name is specified then it must match the name specified when 1541 * registering the device, if it is NULL then the first regmap found 1542 * will be used. Devices with multiple register maps are very rare, 1543 * generic code should normally not need to specify a name. 1544 */ 1545 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1546 { 1547 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1548 dev_get_regmap_match, (void *)name); 1549 1550 if (!r) 1551 return NULL; 1552 return *r; 1553 } 1554 EXPORT_SYMBOL_GPL(dev_get_regmap); 1555 1556 /** 1557 * regmap_get_device() - Obtain the device from a regmap 1558 * 1559 * @map: Register map to operate on. 1560 * 1561 * Returns the underlying device that the regmap has been created for. 1562 */ 1563 struct device *regmap_get_device(struct regmap *map) 1564 { 1565 return map->dev; 1566 } 1567 EXPORT_SYMBOL_GPL(regmap_get_device); 1568 1569 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1570 struct regmap_range_node *range, 1571 unsigned int val_num) 1572 { 1573 void *orig_work_buf; 1574 unsigned int win_offset; 1575 unsigned int win_page; 1576 bool page_chg; 1577 int ret; 1578 1579 win_offset = (*reg - range->range_min) % range->window_len; 1580 win_page = (*reg - range->range_min) / range->window_len; 1581 1582 if (val_num > 1) { 1583 /* Bulk write shouldn't cross range boundary */ 1584 if (*reg + val_num - 1 > range->range_max) 1585 return -EINVAL; 1586 1587 /* ... or single page boundary */ 1588 if (val_num > range->window_len - win_offset) 1589 return -EINVAL; 1590 } 1591 1592 /* It is possible to have selector register inside data window. 1593 In that case, selector register is located on every page and 1594 it needs no page switching, when accessed alone. */ 1595 if (val_num > 1 || 1596 range->window_start + win_offset != range->selector_reg) { 1597 /* Use separate work_buf during page switching */ 1598 orig_work_buf = map->work_buf; 1599 map->work_buf = map->selector_work_buf; 1600 1601 ret = _regmap_update_bits(map, range->selector_reg, 1602 range->selector_mask, 1603 win_page << range->selector_shift, 1604 &page_chg, false); 1605 1606 map->work_buf = orig_work_buf; 1607 1608 if (ret != 0) 1609 return ret; 1610 } 1611 1612 *reg = range->window_start + win_offset; 1613 1614 return 0; 1615 } 1616 1617 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1618 unsigned long mask) 1619 { 1620 u8 *buf; 1621 int i; 1622 1623 if (!mask || !map->work_buf) 1624 return; 1625 1626 buf = map->work_buf; 1627 1628 for (i = 0; i < max_bytes; i++) 1629 buf[i] |= (mask >> (8 * i)) & 0xff; 1630 } 1631 1632 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1633 const void *val, size_t val_len, bool noinc) 1634 { 1635 struct regmap_range_node *range; 1636 unsigned long flags; 1637 void *work_val = map->work_buf + map->format.reg_bytes + 1638 map->format.pad_bytes; 1639 void *buf; 1640 int ret = -ENOTSUPP; 1641 size_t len; 1642 int i; 1643 1644 WARN_ON(!map->bus); 1645 1646 /* Check for unwritable or noinc registers in range 1647 * before we start 1648 */ 1649 if (!regmap_writeable_noinc(map, reg)) { 1650 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1651 unsigned int element = 1652 reg + regmap_get_offset(map, i); 1653 if (!regmap_writeable(map, element) || 1654 regmap_writeable_noinc(map, element)) 1655 return -EINVAL; 1656 } 1657 } 1658 1659 if (!map->cache_bypass && map->format.parse_val) { 1660 unsigned int ival; 1661 int val_bytes = map->format.val_bytes; 1662 for (i = 0; i < val_len / val_bytes; i++) { 1663 ival = map->format.parse_val(val + (i * val_bytes)); 1664 ret = regcache_write(map, 1665 reg + regmap_get_offset(map, i), 1666 ival); 1667 if (ret) { 1668 dev_err(map->dev, 1669 "Error in caching of register: %x ret: %d\n", 1670 reg + i, ret); 1671 return ret; 1672 } 1673 } 1674 if (map->cache_only) { 1675 map->cache_dirty = true; 1676 return 0; 1677 } 1678 } 1679 1680 range = _regmap_range_lookup(map, reg); 1681 if (range) { 1682 int val_num = val_len / map->format.val_bytes; 1683 int win_offset = (reg - range->range_min) % range->window_len; 1684 int win_residue = range->window_len - win_offset; 1685 1686 /* If the write goes beyond the end of the window split it */ 1687 while (val_num > win_residue) { 1688 dev_dbg(map->dev, "Writing window %d/%zu\n", 1689 win_residue, val_len / map->format.val_bytes); 1690 ret = _regmap_raw_write_impl(map, reg, val, 1691 win_residue * 1692 map->format.val_bytes, noinc); 1693 if (ret != 0) 1694 return ret; 1695 1696 reg += win_residue; 1697 val_num -= win_residue; 1698 val += win_residue * map->format.val_bytes; 1699 val_len -= win_residue * map->format.val_bytes; 1700 1701 win_offset = (reg - range->range_min) % 1702 range->window_len; 1703 win_residue = range->window_len - win_offset; 1704 } 1705 1706 ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); 1707 if (ret != 0) 1708 return ret; 1709 } 1710 1711 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1712 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1713 map->write_flag_mask); 1714 1715 /* 1716 * Essentially all I/O mechanisms will be faster with a single 1717 * buffer to write. Since register syncs often generate raw 1718 * writes of single registers optimise that case. 1719 */ 1720 if (val != work_val && val_len == map->format.val_bytes) { 1721 memcpy(work_val, val, map->format.val_bytes); 1722 val = work_val; 1723 } 1724 1725 if (map->async && map->bus->async_write) { 1726 struct regmap_async *async; 1727 1728 trace_regmap_async_write_start(map, reg, val_len); 1729 1730 spin_lock_irqsave(&map->async_lock, flags); 1731 async = list_first_entry_or_null(&map->async_free, 1732 struct regmap_async, 1733 list); 1734 if (async) 1735 list_del(&async->list); 1736 spin_unlock_irqrestore(&map->async_lock, flags); 1737 1738 if (!async) { 1739 async = map->bus->async_alloc(); 1740 if (!async) 1741 return -ENOMEM; 1742 1743 async->work_buf = kzalloc(map->format.buf_size, 1744 GFP_KERNEL | GFP_DMA); 1745 if (!async->work_buf) { 1746 kfree(async); 1747 return -ENOMEM; 1748 } 1749 } 1750 1751 async->map = map; 1752 1753 /* If the caller supplied the value we can use it safely. */ 1754 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1755 map->format.reg_bytes + map->format.val_bytes); 1756 1757 spin_lock_irqsave(&map->async_lock, flags); 1758 list_add_tail(&async->list, &map->async_list); 1759 spin_unlock_irqrestore(&map->async_lock, flags); 1760 1761 if (val != work_val) 1762 ret = map->bus->async_write(map->bus_context, 1763 async->work_buf, 1764 map->format.reg_bytes + 1765 map->format.pad_bytes, 1766 val, val_len, async); 1767 else 1768 ret = map->bus->async_write(map->bus_context, 1769 async->work_buf, 1770 map->format.reg_bytes + 1771 map->format.pad_bytes + 1772 val_len, NULL, 0, async); 1773 1774 if (ret != 0) { 1775 dev_err(map->dev, "Failed to schedule write: %d\n", 1776 ret); 1777 1778 spin_lock_irqsave(&map->async_lock, flags); 1779 list_move(&async->list, &map->async_free); 1780 spin_unlock_irqrestore(&map->async_lock, flags); 1781 } 1782 1783 return ret; 1784 } 1785 1786 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1787 1788 /* If we're doing a single register write we can probably just 1789 * send the work_buf directly, otherwise try to do a gather 1790 * write. 1791 */ 1792 if (val == work_val) 1793 ret = map->bus->write(map->bus_context, map->work_buf, 1794 map->format.reg_bytes + 1795 map->format.pad_bytes + 1796 val_len); 1797 else if (map->bus->gather_write) 1798 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1799 map->format.reg_bytes + 1800 map->format.pad_bytes, 1801 val, val_len); 1802 else 1803 ret = -ENOTSUPP; 1804 1805 /* If that didn't work fall back on linearising by hand. */ 1806 if (ret == -ENOTSUPP) { 1807 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1808 buf = kzalloc(len, GFP_KERNEL); 1809 if (!buf) 1810 return -ENOMEM; 1811 1812 memcpy(buf, map->work_buf, map->format.reg_bytes); 1813 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1814 val, val_len); 1815 ret = map->bus->write(map->bus_context, buf, len); 1816 1817 kfree(buf); 1818 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1819 /* regcache_drop_region() takes lock that we already have, 1820 * thus call map->cache_ops->drop() directly 1821 */ 1822 if (map->cache_ops && map->cache_ops->drop) 1823 map->cache_ops->drop(map, reg, reg + 1); 1824 } 1825 1826 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1827 1828 return ret; 1829 } 1830 1831 /** 1832 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1833 * 1834 * @map: Map to check. 1835 */ 1836 bool regmap_can_raw_write(struct regmap *map) 1837 { 1838 return map->bus && map->bus->write && map->format.format_val && 1839 map->format.format_reg; 1840 } 1841 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1842 1843 /** 1844 * regmap_get_raw_read_max - Get the maximum size we can read 1845 * 1846 * @map: Map to check. 1847 */ 1848 size_t regmap_get_raw_read_max(struct regmap *map) 1849 { 1850 return map->max_raw_read; 1851 } 1852 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1853 1854 /** 1855 * regmap_get_raw_write_max - Get the maximum size we can read 1856 * 1857 * @map: Map to check. 1858 */ 1859 size_t regmap_get_raw_write_max(struct regmap *map) 1860 { 1861 return map->max_raw_write; 1862 } 1863 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1864 1865 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1866 unsigned int val) 1867 { 1868 int ret; 1869 struct regmap_range_node *range; 1870 struct regmap *map = context; 1871 1872 WARN_ON(!map->bus || !map->format.format_write); 1873 1874 range = _regmap_range_lookup(map, reg); 1875 if (range) { 1876 ret = _regmap_select_page(map, ®, range, 1); 1877 if (ret != 0) 1878 return ret; 1879 } 1880 1881 map->format.format_write(map, reg, val); 1882 1883 trace_regmap_hw_write_start(map, reg, 1); 1884 1885 ret = map->bus->write(map->bus_context, map->work_buf, 1886 map->format.buf_size); 1887 1888 trace_regmap_hw_write_done(map, reg, 1); 1889 1890 return ret; 1891 } 1892 1893 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1894 unsigned int val) 1895 { 1896 struct regmap *map = context; 1897 1898 return map->bus->reg_write(map->bus_context, reg, val); 1899 } 1900 1901 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1902 unsigned int val) 1903 { 1904 struct regmap *map = context; 1905 1906 WARN_ON(!map->bus || !map->format.format_val); 1907 1908 map->format.format_val(map->work_buf + map->format.reg_bytes 1909 + map->format.pad_bytes, val, 0); 1910 return _regmap_raw_write_impl(map, reg, 1911 map->work_buf + 1912 map->format.reg_bytes + 1913 map->format.pad_bytes, 1914 map->format.val_bytes, 1915 false); 1916 } 1917 1918 static inline void *_regmap_map_get_context(struct regmap *map) 1919 { 1920 return (map->bus) ? map : map->bus_context; 1921 } 1922 1923 int _regmap_write(struct regmap *map, unsigned int reg, 1924 unsigned int val) 1925 { 1926 int ret; 1927 void *context = _regmap_map_get_context(map); 1928 1929 if (!regmap_writeable(map, reg)) 1930 return -EIO; 1931 1932 if (!map->cache_bypass && !map->defer_caching) { 1933 ret = regcache_write(map, reg, val); 1934 if (ret != 0) 1935 return ret; 1936 if (map->cache_only) { 1937 map->cache_dirty = true; 1938 return 0; 1939 } 1940 } 1941 1942 ret = map->reg_write(context, reg, val); 1943 if (ret == 0) { 1944 if (regmap_should_log(map)) 1945 dev_info(map->dev, "%x <= %x\n", reg, val); 1946 1947 trace_regmap_reg_write(map, reg, val); 1948 } 1949 1950 return ret; 1951 } 1952 1953 /** 1954 * regmap_write() - Write a value to a single register 1955 * 1956 * @map: Register map to write to 1957 * @reg: Register to write to 1958 * @val: Value to be written 1959 * 1960 * A value of zero will be returned on success, a negative errno will 1961 * be returned in error cases. 1962 */ 1963 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1964 { 1965 int ret; 1966 1967 if (!IS_ALIGNED(reg, map->reg_stride)) 1968 return -EINVAL; 1969 1970 map->lock(map->lock_arg); 1971 1972 ret = _regmap_write(map, reg, val); 1973 1974 map->unlock(map->lock_arg); 1975 1976 return ret; 1977 } 1978 EXPORT_SYMBOL_GPL(regmap_write); 1979 1980 /** 1981 * regmap_write_async() - Write a value to a single register asynchronously 1982 * 1983 * @map: Register map to write to 1984 * @reg: Register to write to 1985 * @val: Value to be written 1986 * 1987 * A value of zero will be returned on success, a negative errno will 1988 * be returned in error cases. 1989 */ 1990 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1991 { 1992 int ret; 1993 1994 if (!IS_ALIGNED(reg, map->reg_stride)) 1995 return -EINVAL; 1996 1997 map->lock(map->lock_arg); 1998 1999 map->async = true; 2000 2001 ret = _regmap_write(map, reg, val); 2002 2003 map->async = false; 2004 2005 map->unlock(map->lock_arg); 2006 2007 return ret; 2008 } 2009 EXPORT_SYMBOL_GPL(regmap_write_async); 2010 2011 int _regmap_raw_write(struct regmap *map, unsigned int reg, 2012 const void *val, size_t val_len, bool noinc) 2013 { 2014 size_t val_bytes = map->format.val_bytes; 2015 size_t val_count = val_len / val_bytes; 2016 size_t chunk_count, chunk_bytes; 2017 size_t chunk_regs = val_count; 2018 int ret, i; 2019 2020 if (!val_count) 2021 return -EINVAL; 2022 2023 if (map->use_single_write) 2024 chunk_regs = 1; 2025 else if (map->max_raw_write && val_len > map->max_raw_write) 2026 chunk_regs = map->max_raw_write / val_bytes; 2027 2028 chunk_count = val_count / chunk_regs; 2029 chunk_bytes = chunk_regs * val_bytes; 2030 2031 /* Write as many bytes as possible with chunk_size */ 2032 for (i = 0; i < chunk_count; i++) { 2033 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); 2034 if (ret) 2035 return ret; 2036 2037 reg += regmap_get_offset(map, chunk_regs); 2038 val += chunk_bytes; 2039 val_len -= chunk_bytes; 2040 } 2041 2042 /* Write remaining bytes */ 2043 if (val_len) 2044 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); 2045 2046 return ret; 2047 } 2048 2049 /** 2050 * regmap_raw_write() - Write raw values to one or more registers 2051 * 2052 * @map: Register map to write to 2053 * @reg: Initial register to write to 2054 * @val: Block of data to be written, laid out for direct transmission to the 2055 * device 2056 * @val_len: Length of data pointed to by val. 2057 * 2058 * This function is intended to be used for things like firmware 2059 * download where a large block of data needs to be transferred to the 2060 * device. No formatting will be done on the data provided. 2061 * 2062 * A value of zero will be returned on success, a negative errno will 2063 * be returned in error cases. 2064 */ 2065 int regmap_raw_write(struct regmap *map, unsigned int reg, 2066 const void *val, size_t val_len) 2067 { 2068 int ret; 2069 2070 if (!regmap_can_raw_write(map)) 2071 return -EINVAL; 2072 if (val_len % map->format.val_bytes) 2073 return -EINVAL; 2074 2075 map->lock(map->lock_arg); 2076 2077 ret = _regmap_raw_write(map, reg, val, val_len, false); 2078 2079 map->unlock(map->lock_arg); 2080 2081 return ret; 2082 } 2083 EXPORT_SYMBOL_GPL(regmap_raw_write); 2084 2085 /** 2086 * regmap_noinc_write(): Write data from a register without incrementing the 2087 * register number 2088 * 2089 * @map: Register map to write to 2090 * @reg: Register to write to 2091 * @val: Pointer to data buffer 2092 * @val_len: Length of output buffer in bytes. 2093 * 2094 * The regmap API usually assumes that bulk bus write operations will write a 2095 * range of registers. Some devices have certain registers for which a write 2096 * operation can write to an internal FIFO. 2097 * 2098 * The target register must be volatile but registers after it can be 2099 * completely unrelated cacheable registers. 2100 * 2101 * This will attempt multiple writes as required to write val_len bytes. 2102 * 2103 * A value of zero will be returned on success, a negative errno will be 2104 * returned in error cases. 2105 */ 2106 int regmap_noinc_write(struct regmap *map, unsigned int reg, 2107 const void *val, size_t val_len) 2108 { 2109 size_t write_len; 2110 int ret; 2111 2112 if (!map->bus) 2113 return -EINVAL; 2114 if (!map->bus->write) 2115 return -ENOTSUPP; 2116 if (val_len % map->format.val_bytes) 2117 return -EINVAL; 2118 if (!IS_ALIGNED(reg, map->reg_stride)) 2119 return -EINVAL; 2120 if (val_len == 0) 2121 return -EINVAL; 2122 2123 map->lock(map->lock_arg); 2124 2125 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 2126 ret = -EINVAL; 2127 goto out_unlock; 2128 } 2129 2130 while (val_len) { 2131 if (map->max_raw_write && map->max_raw_write < val_len) 2132 write_len = map->max_raw_write; 2133 else 2134 write_len = val_len; 2135 ret = _regmap_raw_write(map, reg, val, write_len, true); 2136 if (ret) 2137 goto out_unlock; 2138 val = ((u8 *)val) + write_len; 2139 val_len -= write_len; 2140 } 2141 2142 out_unlock: 2143 map->unlock(map->lock_arg); 2144 return ret; 2145 } 2146 EXPORT_SYMBOL_GPL(regmap_noinc_write); 2147 2148 /** 2149 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 2150 * register field. 2151 * 2152 * @field: Register field to write to 2153 * @mask: Bitmask to change 2154 * @val: Value to be written 2155 * @change: Boolean indicating if a write was done 2156 * @async: Boolean indicating asynchronously 2157 * @force: Boolean indicating use force update 2158 * 2159 * Perform a read/modify/write cycle on the register field with change, 2160 * async, force option. 2161 * 2162 * A value of zero will be returned on success, a negative errno will 2163 * be returned in error cases. 2164 */ 2165 int regmap_field_update_bits_base(struct regmap_field *field, 2166 unsigned int mask, unsigned int val, 2167 bool *change, bool async, bool force) 2168 { 2169 mask = (mask << field->shift) & field->mask; 2170 2171 return regmap_update_bits_base(field->regmap, field->reg, 2172 mask, val << field->shift, 2173 change, async, force); 2174 } 2175 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2176 2177 /** 2178 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2179 * register field with port ID 2180 * 2181 * @field: Register field to write to 2182 * @id: port ID 2183 * @mask: Bitmask to change 2184 * @val: Value to be written 2185 * @change: Boolean indicating if a write was done 2186 * @async: Boolean indicating asynchronously 2187 * @force: Boolean indicating use force update 2188 * 2189 * A value of zero will be returned on success, a negative errno will 2190 * be returned in error cases. 2191 */ 2192 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2193 unsigned int mask, unsigned int val, 2194 bool *change, bool async, bool force) 2195 { 2196 if (id >= field->id_size) 2197 return -EINVAL; 2198 2199 mask = (mask << field->shift) & field->mask; 2200 2201 return regmap_update_bits_base(field->regmap, 2202 field->reg + (field->id_offset * id), 2203 mask, val << field->shift, 2204 change, async, force); 2205 } 2206 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2207 2208 /** 2209 * regmap_bulk_write() - Write multiple registers to the device 2210 * 2211 * @map: Register map to write to 2212 * @reg: First register to be write from 2213 * @val: Block of data to be written, in native register size for device 2214 * @val_count: Number of registers to write 2215 * 2216 * This function is intended to be used for writing a large block of 2217 * data to the device either in single transfer or multiple transfer. 2218 * 2219 * A value of zero will be returned on success, a negative errno will 2220 * be returned in error cases. 2221 */ 2222 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2223 size_t val_count) 2224 { 2225 int ret = 0, i; 2226 size_t val_bytes = map->format.val_bytes; 2227 2228 if (!IS_ALIGNED(reg, map->reg_stride)) 2229 return -EINVAL; 2230 2231 /* 2232 * Some devices don't support bulk write, for them we have a series of 2233 * single write operations. 2234 */ 2235 if (!map->bus || !map->format.parse_inplace) { 2236 map->lock(map->lock_arg); 2237 for (i = 0; i < val_count; i++) { 2238 unsigned int ival; 2239 2240 switch (val_bytes) { 2241 case 1: 2242 ival = *(u8 *)(val + (i * val_bytes)); 2243 break; 2244 case 2: 2245 ival = *(u16 *)(val + (i * val_bytes)); 2246 break; 2247 case 4: 2248 ival = *(u32 *)(val + (i * val_bytes)); 2249 break; 2250 #ifdef CONFIG_64BIT 2251 case 8: 2252 ival = *(u64 *)(val + (i * val_bytes)); 2253 break; 2254 #endif 2255 default: 2256 ret = -EINVAL; 2257 goto out; 2258 } 2259 2260 ret = _regmap_write(map, 2261 reg + regmap_get_offset(map, i), 2262 ival); 2263 if (ret != 0) 2264 goto out; 2265 } 2266 out: 2267 map->unlock(map->lock_arg); 2268 } else { 2269 void *wval; 2270 2271 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2272 if (!wval) 2273 return -ENOMEM; 2274 2275 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2276 map->format.parse_inplace(wval + i); 2277 2278 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2279 2280 kfree(wval); 2281 } 2282 return ret; 2283 } 2284 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2285 2286 /* 2287 * _regmap_raw_multi_reg_write() 2288 * 2289 * the (register,newvalue) pairs in regs have not been formatted, but 2290 * they are all in the same page and have been changed to being page 2291 * relative. The page register has been written if that was necessary. 2292 */ 2293 static int _regmap_raw_multi_reg_write(struct regmap *map, 2294 const struct reg_sequence *regs, 2295 size_t num_regs) 2296 { 2297 int ret; 2298 void *buf; 2299 int i; 2300 u8 *u8; 2301 size_t val_bytes = map->format.val_bytes; 2302 size_t reg_bytes = map->format.reg_bytes; 2303 size_t pad_bytes = map->format.pad_bytes; 2304 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2305 size_t len = pair_size * num_regs; 2306 2307 if (!len) 2308 return -EINVAL; 2309 2310 buf = kzalloc(len, GFP_KERNEL); 2311 if (!buf) 2312 return -ENOMEM; 2313 2314 /* We have to linearise by hand. */ 2315 2316 u8 = buf; 2317 2318 for (i = 0; i < num_regs; i++) { 2319 unsigned int reg = regs[i].reg; 2320 unsigned int val = regs[i].def; 2321 trace_regmap_hw_write_start(map, reg, 1); 2322 map->format.format_reg(u8, reg, map->reg_shift); 2323 u8 += reg_bytes + pad_bytes; 2324 map->format.format_val(u8, val, 0); 2325 u8 += val_bytes; 2326 } 2327 u8 = buf; 2328 *u8 |= map->write_flag_mask; 2329 2330 ret = map->bus->write(map->bus_context, buf, len); 2331 2332 kfree(buf); 2333 2334 for (i = 0; i < num_regs; i++) { 2335 int reg = regs[i].reg; 2336 trace_regmap_hw_write_done(map, reg, 1); 2337 } 2338 return ret; 2339 } 2340 2341 static unsigned int _regmap_register_page(struct regmap *map, 2342 unsigned int reg, 2343 struct regmap_range_node *range) 2344 { 2345 unsigned int win_page = (reg - range->range_min) / range->window_len; 2346 2347 return win_page; 2348 } 2349 2350 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2351 struct reg_sequence *regs, 2352 size_t num_regs) 2353 { 2354 int ret; 2355 int i, n; 2356 struct reg_sequence *base; 2357 unsigned int this_page = 0; 2358 unsigned int page_change = 0; 2359 /* 2360 * the set of registers are not neccessarily in order, but 2361 * since the order of write must be preserved this algorithm 2362 * chops the set each time the page changes. This also applies 2363 * if there is a delay required at any point in the sequence. 2364 */ 2365 base = regs; 2366 for (i = 0, n = 0; i < num_regs; i++, n++) { 2367 unsigned int reg = regs[i].reg; 2368 struct regmap_range_node *range; 2369 2370 range = _regmap_range_lookup(map, reg); 2371 if (range) { 2372 unsigned int win_page = _regmap_register_page(map, reg, 2373 range); 2374 2375 if (i == 0) 2376 this_page = win_page; 2377 if (win_page != this_page) { 2378 this_page = win_page; 2379 page_change = 1; 2380 } 2381 } 2382 2383 /* If we have both a page change and a delay make sure to 2384 * write the regs and apply the delay before we change the 2385 * page. 2386 */ 2387 2388 if (page_change || regs[i].delay_us) { 2389 2390 /* For situations where the first write requires 2391 * a delay we need to make sure we don't call 2392 * raw_multi_reg_write with n=0 2393 * This can't occur with page breaks as we 2394 * never write on the first iteration 2395 */ 2396 if (regs[i].delay_us && i == 0) 2397 n = 1; 2398 2399 ret = _regmap_raw_multi_reg_write(map, base, n); 2400 if (ret != 0) 2401 return ret; 2402 2403 if (regs[i].delay_us) { 2404 if (map->can_sleep) 2405 fsleep(regs[i].delay_us); 2406 else 2407 udelay(regs[i].delay_us); 2408 } 2409 2410 base += n; 2411 n = 0; 2412 2413 if (page_change) { 2414 ret = _regmap_select_page(map, 2415 &base[n].reg, 2416 range, 1); 2417 if (ret != 0) 2418 return ret; 2419 2420 page_change = 0; 2421 } 2422 2423 } 2424 2425 } 2426 if (n > 0) 2427 return _regmap_raw_multi_reg_write(map, base, n); 2428 return 0; 2429 } 2430 2431 static int _regmap_multi_reg_write(struct regmap *map, 2432 const struct reg_sequence *regs, 2433 size_t num_regs) 2434 { 2435 int i; 2436 int ret; 2437 2438 if (!map->can_multi_write) { 2439 for (i = 0; i < num_regs; i++) { 2440 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2441 if (ret != 0) 2442 return ret; 2443 2444 if (regs[i].delay_us) { 2445 if (map->can_sleep) 2446 fsleep(regs[i].delay_us); 2447 else 2448 udelay(regs[i].delay_us); 2449 } 2450 } 2451 return 0; 2452 } 2453 2454 if (!map->format.parse_inplace) 2455 return -EINVAL; 2456 2457 if (map->writeable_reg) 2458 for (i = 0; i < num_regs; i++) { 2459 int reg = regs[i].reg; 2460 if (!map->writeable_reg(map->dev, reg)) 2461 return -EINVAL; 2462 if (!IS_ALIGNED(reg, map->reg_stride)) 2463 return -EINVAL; 2464 } 2465 2466 if (!map->cache_bypass) { 2467 for (i = 0; i < num_regs; i++) { 2468 unsigned int val = regs[i].def; 2469 unsigned int reg = regs[i].reg; 2470 ret = regcache_write(map, reg, val); 2471 if (ret) { 2472 dev_err(map->dev, 2473 "Error in caching of register: %x ret: %d\n", 2474 reg, ret); 2475 return ret; 2476 } 2477 } 2478 if (map->cache_only) { 2479 map->cache_dirty = true; 2480 return 0; 2481 } 2482 } 2483 2484 WARN_ON(!map->bus); 2485 2486 for (i = 0; i < num_regs; i++) { 2487 unsigned int reg = regs[i].reg; 2488 struct regmap_range_node *range; 2489 2490 /* Coalesce all the writes between a page break or a delay 2491 * in a sequence 2492 */ 2493 range = _regmap_range_lookup(map, reg); 2494 if (range || regs[i].delay_us) { 2495 size_t len = sizeof(struct reg_sequence)*num_regs; 2496 struct reg_sequence *base = kmemdup(regs, len, 2497 GFP_KERNEL); 2498 if (!base) 2499 return -ENOMEM; 2500 ret = _regmap_range_multi_paged_reg_write(map, base, 2501 num_regs); 2502 kfree(base); 2503 2504 return ret; 2505 } 2506 } 2507 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2508 } 2509 2510 /** 2511 * regmap_multi_reg_write() - Write multiple registers to the device 2512 * 2513 * @map: Register map to write to 2514 * @regs: Array of structures containing register,value to be written 2515 * @num_regs: Number of registers to write 2516 * 2517 * Write multiple registers to the device where the set of register, value 2518 * pairs are supplied in any order, possibly not all in a single range. 2519 * 2520 * The 'normal' block write mode will send ultimately send data on the 2521 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2522 * addressed. However, this alternative block multi write mode will send 2523 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2524 * must of course support the mode. 2525 * 2526 * A value of zero will be returned on success, a negative errno will be 2527 * returned in error cases. 2528 */ 2529 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2530 int num_regs) 2531 { 2532 int ret; 2533 2534 map->lock(map->lock_arg); 2535 2536 ret = _regmap_multi_reg_write(map, regs, num_regs); 2537 2538 map->unlock(map->lock_arg); 2539 2540 return ret; 2541 } 2542 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2543 2544 /** 2545 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2546 * device but not the cache 2547 * 2548 * @map: Register map to write to 2549 * @regs: Array of structures containing register,value to be written 2550 * @num_regs: Number of registers to write 2551 * 2552 * Write multiple registers to the device but not the cache where the set 2553 * of register are supplied in any order. 2554 * 2555 * This function is intended to be used for writing a large block of data 2556 * atomically to the device in single transfer for those I2C client devices 2557 * that implement this alternative block write mode. 2558 * 2559 * A value of zero will be returned on success, a negative errno will 2560 * be returned in error cases. 2561 */ 2562 int regmap_multi_reg_write_bypassed(struct regmap *map, 2563 const struct reg_sequence *regs, 2564 int num_regs) 2565 { 2566 int ret; 2567 bool bypass; 2568 2569 map->lock(map->lock_arg); 2570 2571 bypass = map->cache_bypass; 2572 map->cache_bypass = true; 2573 2574 ret = _regmap_multi_reg_write(map, regs, num_regs); 2575 2576 map->cache_bypass = bypass; 2577 2578 map->unlock(map->lock_arg); 2579 2580 return ret; 2581 } 2582 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2583 2584 /** 2585 * regmap_raw_write_async() - Write raw values to one or more registers 2586 * asynchronously 2587 * 2588 * @map: Register map to write to 2589 * @reg: Initial register to write to 2590 * @val: Block of data to be written, laid out for direct transmission to the 2591 * device. Must be valid until regmap_async_complete() is called. 2592 * @val_len: Length of data pointed to by val. 2593 * 2594 * This function is intended to be used for things like firmware 2595 * download where a large block of data needs to be transferred to the 2596 * device. No formatting will be done on the data provided. 2597 * 2598 * If supported by the underlying bus the write will be scheduled 2599 * asynchronously, helping maximise I/O speed on higher speed buses 2600 * like SPI. regmap_async_complete() can be called to ensure that all 2601 * asynchrnous writes have been completed. 2602 * 2603 * A value of zero will be returned on success, a negative errno will 2604 * be returned in error cases. 2605 */ 2606 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2607 const void *val, size_t val_len) 2608 { 2609 int ret; 2610 2611 if (val_len % map->format.val_bytes) 2612 return -EINVAL; 2613 if (!IS_ALIGNED(reg, map->reg_stride)) 2614 return -EINVAL; 2615 2616 map->lock(map->lock_arg); 2617 2618 map->async = true; 2619 2620 ret = _regmap_raw_write(map, reg, val, val_len, false); 2621 2622 map->async = false; 2623 2624 map->unlock(map->lock_arg); 2625 2626 return ret; 2627 } 2628 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2629 2630 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2631 unsigned int val_len, bool noinc) 2632 { 2633 struct regmap_range_node *range; 2634 int ret; 2635 2636 WARN_ON(!map->bus); 2637 2638 if (!map->bus || !map->bus->read) 2639 return -EINVAL; 2640 2641 range = _regmap_range_lookup(map, reg); 2642 if (range) { 2643 ret = _regmap_select_page(map, ®, range, 2644 noinc ? 1 : val_len / map->format.val_bytes); 2645 if (ret != 0) 2646 return ret; 2647 } 2648 2649 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2650 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2651 map->read_flag_mask); 2652 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2653 2654 ret = map->bus->read(map->bus_context, map->work_buf, 2655 map->format.reg_bytes + map->format.pad_bytes, 2656 val, val_len); 2657 2658 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2659 2660 return ret; 2661 } 2662 2663 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2664 unsigned int *val) 2665 { 2666 struct regmap *map = context; 2667 2668 return map->bus->reg_read(map->bus_context, reg, val); 2669 } 2670 2671 static int _regmap_bus_read(void *context, unsigned int reg, 2672 unsigned int *val) 2673 { 2674 int ret; 2675 struct regmap *map = context; 2676 void *work_val = map->work_buf + map->format.reg_bytes + 2677 map->format.pad_bytes; 2678 2679 if (!map->format.parse_val) 2680 return -EINVAL; 2681 2682 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); 2683 if (ret == 0) 2684 *val = map->format.parse_val(work_val); 2685 2686 return ret; 2687 } 2688 2689 static int _regmap_read(struct regmap *map, unsigned int reg, 2690 unsigned int *val) 2691 { 2692 int ret; 2693 void *context = _regmap_map_get_context(map); 2694 2695 if (!map->cache_bypass) { 2696 ret = regcache_read(map, reg, val); 2697 if (ret == 0) 2698 return 0; 2699 } 2700 2701 if (map->cache_only) 2702 return -EBUSY; 2703 2704 if (!regmap_readable(map, reg)) 2705 return -EIO; 2706 2707 ret = map->reg_read(context, reg, val); 2708 if (ret == 0) { 2709 if (regmap_should_log(map)) 2710 dev_info(map->dev, "%x => %x\n", reg, *val); 2711 2712 trace_regmap_reg_read(map, reg, *val); 2713 2714 if (!map->cache_bypass) 2715 regcache_write(map, reg, *val); 2716 } 2717 2718 return ret; 2719 } 2720 2721 /** 2722 * regmap_read() - Read a value from a single register 2723 * 2724 * @map: Register map to read from 2725 * @reg: Register to be read from 2726 * @val: Pointer to store read value 2727 * 2728 * A value of zero will be returned on success, a negative errno will 2729 * be returned in error cases. 2730 */ 2731 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2732 { 2733 int ret; 2734 2735 if (!IS_ALIGNED(reg, map->reg_stride)) 2736 return -EINVAL; 2737 2738 map->lock(map->lock_arg); 2739 2740 ret = _regmap_read(map, reg, val); 2741 2742 map->unlock(map->lock_arg); 2743 2744 return ret; 2745 } 2746 EXPORT_SYMBOL_GPL(regmap_read); 2747 2748 /** 2749 * regmap_raw_read() - Read raw data from the device 2750 * 2751 * @map: Register map to read from 2752 * @reg: First register to be read from 2753 * @val: Pointer to store read value 2754 * @val_len: Size of data to read 2755 * 2756 * A value of zero will be returned on success, a negative errno will 2757 * be returned in error cases. 2758 */ 2759 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2760 size_t val_len) 2761 { 2762 size_t val_bytes = map->format.val_bytes; 2763 size_t val_count = val_len / val_bytes; 2764 unsigned int v; 2765 int ret, i; 2766 2767 if (!map->bus) 2768 return -EINVAL; 2769 if (val_len % map->format.val_bytes) 2770 return -EINVAL; 2771 if (!IS_ALIGNED(reg, map->reg_stride)) 2772 return -EINVAL; 2773 if (val_count == 0) 2774 return -EINVAL; 2775 2776 map->lock(map->lock_arg); 2777 2778 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2779 map->cache_type == REGCACHE_NONE) { 2780 size_t chunk_count, chunk_bytes; 2781 size_t chunk_regs = val_count; 2782 2783 if (!map->bus->read) { 2784 ret = -ENOTSUPP; 2785 goto out; 2786 } 2787 2788 if (map->use_single_read) 2789 chunk_regs = 1; 2790 else if (map->max_raw_read && val_len > map->max_raw_read) 2791 chunk_regs = map->max_raw_read / val_bytes; 2792 2793 chunk_count = val_count / chunk_regs; 2794 chunk_bytes = chunk_regs * val_bytes; 2795 2796 /* Read bytes that fit into whole chunks */ 2797 for (i = 0; i < chunk_count; i++) { 2798 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); 2799 if (ret != 0) 2800 goto out; 2801 2802 reg += regmap_get_offset(map, chunk_regs); 2803 val += chunk_bytes; 2804 val_len -= chunk_bytes; 2805 } 2806 2807 /* Read remaining bytes */ 2808 if (val_len) { 2809 ret = _regmap_raw_read(map, reg, val, val_len, false); 2810 if (ret != 0) 2811 goto out; 2812 } 2813 } else { 2814 /* Otherwise go word by word for the cache; should be low 2815 * cost as we expect to hit the cache. 2816 */ 2817 for (i = 0; i < val_count; i++) { 2818 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2819 &v); 2820 if (ret != 0) 2821 goto out; 2822 2823 map->format.format_val(val + (i * val_bytes), v, 0); 2824 } 2825 } 2826 2827 out: 2828 map->unlock(map->lock_arg); 2829 2830 return ret; 2831 } 2832 EXPORT_SYMBOL_GPL(regmap_raw_read); 2833 2834 /** 2835 * regmap_noinc_read(): Read data from a register without incrementing the 2836 * register number 2837 * 2838 * @map: Register map to read from 2839 * @reg: Register to read from 2840 * @val: Pointer to data buffer 2841 * @val_len: Length of output buffer in bytes. 2842 * 2843 * The regmap API usually assumes that bulk bus read operations will read a 2844 * range of registers. Some devices have certain registers for which a read 2845 * operation read will read from an internal FIFO. 2846 * 2847 * The target register must be volatile but registers after it can be 2848 * completely unrelated cacheable registers. 2849 * 2850 * This will attempt multiple reads as required to read val_len bytes. 2851 * 2852 * A value of zero will be returned on success, a negative errno will be 2853 * returned in error cases. 2854 */ 2855 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2856 void *val, size_t val_len) 2857 { 2858 size_t read_len; 2859 int ret; 2860 2861 if (!map->bus) 2862 return -EINVAL; 2863 if (!map->bus->read) 2864 return -ENOTSUPP; 2865 if (val_len % map->format.val_bytes) 2866 return -EINVAL; 2867 if (!IS_ALIGNED(reg, map->reg_stride)) 2868 return -EINVAL; 2869 if (val_len == 0) 2870 return -EINVAL; 2871 2872 map->lock(map->lock_arg); 2873 2874 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2875 ret = -EINVAL; 2876 goto out_unlock; 2877 } 2878 2879 while (val_len) { 2880 if (map->max_raw_read && map->max_raw_read < val_len) 2881 read_len = map->max_raw_read; 2882 else 2883 read_len = val_len; 2884 ret = _regmap_raw_read(map, reg, val, read_len, true); 2885 if (ret) 2886 goto out_unlock; 2887 val = ((u8 *)val) + read_len; 2888 val_len -= read_len; 2889 } 2890 2891 out_unlock: 2892 map->unlock(map->lock_arg); 2893 return ret; 2894 } 2895 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2896 2897 /** 2898 * regmap_field_read(): Read a value to a single register field 2899 * 2900 * @field: Register field to read from 2901 * @val: Pointer to store read value 2902 * 2903 * A value of zero will be returned on success, a negative errno will 2904 * be returned in error cases. 2905 */ 2906 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2907 { 2908 int ret; 2909 unsigned int reg_val; 2910 ret = regmap_read(field->regmap, field->reg, ®_val); 2911 if (ret != 0) 2912 return ret; 2913 2914 reg_val &= field->mask; 2915 reg_val >>= field->shift; 2916 *val = reg_val; 2917 2918 return ret; 2919 } 2920 EXPORT_SYMBOL_GPL(regmap_field_read); 2921 2922 /** 2923 * regmap_fields_read() - Read a value to a single register field with port ID 2924 * 2925 * @field: Register field to read from 2926 * @id: port ID 2927 * @val: Pointer to store read value 2928 * 2929 * A value of zero will be returned on success, a negative errno will 2930 * be returned in error cases. 2931 */ 2932 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2933 unsigned int *val) 2934 { 2935 int ret; 2936 unsigned int reg_val; 2937 2938 if (id >= field->id_size) 2939 return -EINVAL; 2940 2941 ret = regmap_read(field->regmap, 2942 field->reg + (field->id_offset * id), 2943 ®_val); 2944 if (ret != 0) 2945 return ret; 2946 2947 reg_val &= field->mask; 2948 reg_val >>= field->shift; 2949 *val = reg_val; 2950 2951 return ret; 2952 } 2953 EXPORT_SYMBOL_GPL(regmap_fields_read); 2954 2955 /** 2956 * regmap_bulk_read() - Read multiple registers from the device 2957 * 2958 * @map: Register map to read from 2959 * @reg: First register to be read from 2960 * @val: Pointer to store read value, in native register size for device 2961 * @val_count: Number of registers to read 2962 * 2963 * A value of zero will be returned on success, a negative errno will 2964 * be returned in error cases. 2965 */ 2966 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2967 size_t val_count) 2968 { 2969 int ret, i; 2970 size_t val_bytes = map->format.val_bytes; 2971 bool vol = regmap_volatile_range(map, reg, val_count); 2972 2973 if (!IS_ALIGNED(reg, map->reg_stride)) 2974 return -EINVAL; 2975 if (val_count == 0) 2976 return -EINVAL; 2977 2978 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2979 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2980 if (ret != 0) 2981 return ret; 2982 2983 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2984 map->format.parse_inplace(val + i); 2985 } else { 2986 #ifdef CONFIG_64BIT 2987 u64 *u64 = val; 2988 #endif 2989 u32 *u32 = val; 2990 u16 *u16 = val; 2991 u8 *u8 = val; 2992 2993 map->lock(map->lock_arg); 2994 2995 for (i = 0; i < val_count; i++) { 2996 unsigned int ival; 2997 2998 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2999 &ival); 3000 if (ret != 0) 3001 goto out; 3002 3003 switch (map->format.val_bytes) { 3004 #ifdef CONFIG_64BIT 3005 case 8: 3006 u64[i] = ival; 3007 break; 3008 #endif 3009 case 4: 3010 u32[i] = ival; 3011 break; 3012 case 2: 3013 u16[i] = ival; 3014 break; 3015 case 1: 3016 u8[i] = ival; 3017 break; 3018 default: 3019 ret = -EINVAL; 3020 goto out; 3021 } 3022 } 3023 3024 out: 3025 map->unlock(map->lock_arg); 3026 } 3027 3028 return ret; 3029 } 3030 EXPORT_SYMBOL_GPL(regmap_bulk_read); 3031 3032 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 3033 unsigned int mask, unsigned int val, 3034 bool *change, bool force_write) 3035 { 3036 int ret; 3037 unsigned int tmp, orig; 3038 3039 if (change) 3040 *change = false; 3041 3042 if (regmap_volatile(map, reg) && map->reg_update_bits) { 3043 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 3044 if (ret == 0 && change) 3045 *change = true; 3046 } else { 3047 ret = _regmap_read(map, reg, &orig); 3048 if (ret != 0) 3049 return ret; 3050 3051 tmp = orig & ~mask; 3052 tmp |= val & mask; 3053 3054 if (force_write || (tmp != orig)) { 3055 ret = _regmap_write(map, reg, tmp); 3056 if (ret == 0 && change) 3057 *change = true; 3058 } 3059 } 3060 3061 return ret; 3062 } 3063 3064 /** 3065 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 3066 * 3067 * @map: Register map to update 3068 * @reg: Register to update 3069 * @mask: Bitmask to change 3070 * @val: New value for bitmask 3071 * @change: Boolean indicating if a write was done 3072 * @async: Boolean indicating asynchronously 3073 * @force: Boolean indicating use force update 3074 * 3075 * Perform a read/modify/write cycle on a register map with change, async, force 3076 * options. 3077 * 3078 * If async is true: 3079 * 3080 * With most buses the read must be done synchronously so this is most useful 3081 * for devices with a cache which do not need to interact with the hardware to 3082 * determine the current register value. 3083 * 3084 * Returns zero for success, a negative number on error. 3085 */ 3086 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 3087 unsigned int mask, unsigned int val, 3088 bool *change, bool async, bool force) 3089 { 3090 int ret; 3091 3092 map->lock(map->lock_arg); 3093 3094 map->async = async; 3095 3096 ret = _regmap_update_bits(map, reg, mask, val, change, force); 3097 3098 map->async = false; 3099 3100 map->unlock(map->lock_arg); 3101 3102 return ret; 3103 } 3104 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 3105 3106 /** 3107 * regmap_test_bits() - Check if all specified bits are set in a register. 3108 * 3109 * @map: Register map to operate on 3110 * @reg: Register to read from 3111 * @bits: Bits to test 3112 * 3113 * Returns 0 if at least one of the tested bits is not set, 1 if all tested 3114 * bits are set and a negative error number if the underlying regmap_read() 3115 * fails. 3116 */ 3117 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 3118 { 3119 unsigned int val, ret; 3120 3121 ret = regmap_read(map, reg, &val); 3122 if (ret) 3123 return ret; 3124 3125 return (val & bits) == bits; 3126 } 3127 EXPORT_SYMBOL_GPL(regmap_test_bits); 3128 3129 void regmap_async_complete_cb(struct regmap_async *async, int ret) 3130 { 3131 struct regmap *map = async->map; 3132 bool wake; 3133 3134 trace_regmap_async_io_complete(map); 3135 3136 spin_lock(&map->async_lock); 3137 list_move(&async->list, &map->async_free); 3138 wake = list_empty(&map->async_list); 3139 3140 if (ret != 0) 3141 map->async_ret = ret; 3142 3143 spin_unlock(&map->async_lock); 3144 3145 if (wake) 3146 wake_up(&map->async_waitq); 3147 } 3148 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 3149 3150 static int regmap_async_is_done(struct regmap *map) 3151 { 3152 unsigned long flags; 3153 int ret; 3154 3155 spin_lock_irqsave(&map->async_lock, flags); 3156 ret = list_empty(&map->async_list); 3157 spin_unlock_irqrestore(&map->async_lock, flags); 3158 3159 return ret; 3160 } 3161 3162 /** 3163 * regmap_async_complete - Ensure all asynchronous I/O has completed. 3164 * 3165 * @map: Map to operate on. 3166 * 3167 * Blocks until any pending asynchronous I/O has completed. Returns 3168 * an error code for any failed I/O operations. 3169 */ 3170 int regmap_async_complete(struct regmap *map) 3171 { 3172 unsigned long flags; 3173 int ret; 3174 3175 /* Nothing to do with no async support */ 3176 if (!map->bus || !map->bus->async_write) 3177 return 0; 3178 3179 trace_regmap_async_complete_start(map); 3180 3181 wait_event(map->async_waitq, regmap_async_is_done(map)); 3182 3183 spin_lock_irqsave(&map->async_lock, flags); 3184 ret = map->async_ret; 3185 map->async_ret = 0; 3186 spin_unlock_irqrestore(&map->async_lock, flags); 3187 3188 trace_regmap_async_complete_done(map); 3189 3190 return ret; 3191 } 3192 EXPORT_SYMBOL_GPL(regmap_async_complete); 3193 3194 /** 3195 * regmap_register_patch - Register and apply register updates to be applied 3196 * on device initialistion 3197 * 3198 * @map: Register map to apply updates to. 3199 * @regs: Values to update. 3200 * @num_regs: Number of entries in regs. 3201 * 3202 * Register a set of register updates to be applied to the device 3203 * whenever the device registers are synchronised with the cache and 3204 * apply them immediately. Typically this is used to apply 3205 * corrections to be applied to the device defaults on startup, such 3206 * as the updates some vendors provide to undocumented registers. 3207 * 3208 * The caller must ensure that this function cannot be called 3209 * concurrently with either itself or regcache_sync(). 3210 */ 3211 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3212 int num_regs) 3213 { 3214 struct reg_sequence *p; 3215 int ret; 3216 bool bypass; 3217 3218 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3219 num_regs)) 3220 return 0; 3221 3222 p = krealloc(map->patch, 3223 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3224 GFP_KERNEL); 3225 if (p) { 3226 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3227 map->patch = p; 3228 map->patch_regs += num_regs; 3229 } else { 3230 return -ENOMEM; 3231 } 3232 3233 map->lock(map->lock_arg); 3234 3235 bypass = map->cache_bypass; 3236 3237 map->cache_bypass = true; 3238 map->async = true; 3239 3240 ret = _regmap_multi_reg_write(map, regs, num_regs); 3241 3242 map->async = false; 3243 map->cache_bypass = bypass; 3244 3245 map->unlock(map->lock_arg); 3246 3247 regmap_async_complete(map); 3248 3249 return ret; 3250 } 3251 EXPORT_SYMBOL_GPL(regmap_register_patch); 3252 3253 /** 3254 * regmap_get_val_bytes() - Report the size of a register value 3255 * 3256 * @map: Register map to operate on. 3257 * 3258 * Report the size of a register value, mainly intended to for use by 3259 * generic infrastructure built on top of regmap. 3260 */ 3261 int regmap_get_val_bytes(struct regmap *map) 3262 { 3263 if (map->format.format_write) 3264 return -EINVAL; 3265 3266 return map->format.val_bytes; 3267 } 3268 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3269 3270 /** 3271 * regmap_get_max_register() - Report the max register value 3272 * 3273 * @map: Register map to operate on. 3274 * 3275 * Report the max register value, mainly intended to for use by 3276 * generic infrastructure built on top of regmap. 3277 */ 3278 int regmap_get_max_register(struct regmap *map) 3279 { 3280 return map->max_register ? map->max_register : -EINVAL; 3281 } 3282 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3283 3284 /** 3285 * regmap_get_reg_stride() - Report the register address stride 3286 * 3287 * @map: Register map to operate on. 3288 * 3289 * Report the register address stride, mainly intended to for use by 3290 * generic infrastructure built on top of regmap. 3291 */ 3292 int regmap_get_reg_stride(struct regmap *map) 3293 { 3294 return map->reg_stride; 3295 } 3296 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3297 3298 int regmap_parse_val(struct regmap *map, const void *buf, 3299 unsigned int *val) 3300 { 3301 if (!map->format.parse_val) 3302 return -EINVAL; 3303 3304 *val = map->format.parse_val(buf); 3305 3306 return 0; 3307 } 3308 EXPORT_SYMBOL_GPL(regmap_parse_val); 3309 3310 static int __init regmap_initcall(void) 3311 { 3312 regmap_debugfs_initcall(); 3313 3314 return 0; 3315 } 3316 postcore_initcall(regmap_initcall); 3317