1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/of.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 #include <linux/log2.h> 23 #include <linux/hwspinlock.h> 24 25 #define CREATE_TRACE_POINTS 26 #include "trace.h" 27 28 #include "internal.h" 29 30 /* 31 * Sometimes for failures during very early init the trace 32 * infrastructure isn't available early enough to be used. For this 33 * sort of problem defining LOG_DEVICE will add printks for basic 34 * register I/O on a specific device. 35 */ 36 #undef LOG_DEVICE 37 38 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 39 unsigned int mask, unsigned int val, 40 bool *change, bool force_write); 41 42 static int _regmap_bus_reg_read(void *context, unsigned int reg, 43 unsigned int *val); 44 static int _regmap_bus_read(void *context, unsigned int reg, 45 unsigned int *val); 46 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 47 unsigned int val); 48 static int _regmap_bus_reg_write(void *context, unsigned int reg, 49 unsigned int val); 50 static int _regmap_bus_raw_write(void *context, unsigned int reg, 51 unsigned int val); 52 53 bool regmap_reg_in_ranges(unsigned int reg, 54 const struct regmap_range *ranges, 55 unsigned int nranges) 56 { 57 const struct regmap_range *r; 58 int i; 59 60 for (i = 0, r = ranges; i < nranges; i++, r++) 61 if (regmap_reg_in_range(reg, r)) 62 return true; 63 return false; 64 } 65 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 66 67 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 68 const struct regmap_access_table *table) 69 { 70 /* Check "no ranges" first */ 71 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 72 return false; 73 74 /* In case zero "yes ranges" are supplied, any reg is OK */ 75 if (!table->n_yes_ranges) 76 return true; 77 78 return regmap_reg_in_ranges(reg, table->yes_ranges, 79 table->n_yes_ranges); 80 } 81 EXPORT_SYMBOL_GPL(regmap_check_range_table); 82 83 bool regmap_writeable(struct regmap *map, unsigned int reg) 84 { 85 if (map->max_register && reg > map->max_register) 86 return false; 87 88 if (map->writeable_reg) 89 return map->writeable_reg(map->dev, reg); 90 91 if (map->wr_table) 92 return regmap_check_range_table(map, reg, map->wr_table); 93 94 return true; 95 } 96 97 bool regmap_cached(struct regmap *map, unsigned int reg) 98 { 99 int ret; 100 unsigned int val; 101 102 if (map->cache == REGCACHE_NONE) 103 return false; 104 105 if (!map->cache_ops) 106 return false; 107 108 if (map->max_register && reg > map->max_register) 109 return false; 110 111 map->lock(map->lock_arg); 112 ret = regcache_read(map, reg, &val); 113 map->unlock(map->lock_arg); 114 if (ret) 115 return false; 116 117 return true; 118 } 119 120 bool regmap_readable(struct regmap *map, unsigned int reg) 121 { 122 if (!map->reg_read) 123 return false; 124 125 if (map->max_register && reg > map->max_register) 126 return false; 127 128 if (map->format.format_write) 129 return false; 130 131 if (map->readable_reg) 132 return map->readable_reg(map->dev, reg); 133 134 if (map->rd_table) 135 return regmap_check_range_table(map, reg, map->rd_table); 136 137 return true; 138 } 139 140 bool regmap_volatile(struct regmap *map, unsigned int reg) 141 { 142 if (!map->format.format_write && !regmap_readable(map, reg)) 143 return false; 144 145 if (map->volatile_reg) 146 return map->volatile_reg(map->dev, reg); 147 148 if (map->volatile_table) 149 return regmap_check_range_table(map, reg, map->volatile_table); 150 151 if (map->cache_ops) 152 return false; 153 else 154 return true; 155 } 156 157 bool regmap_precious(struct regmap *map, unsigned int reg) 158 { 159 if (!regmap_readable(map, reg)) 160 return false; 161 162 if (map->precious_reg) 163 return map->precious_reg(map->dev, reg); 164 165 if (map->precious_table) 166 return regmap_check_range_table(map, reg, map->precious_table); 167 168 return false; 169 } 170 171 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 172 size_t num) 173 { 174 unsigned int i; 175 176 for (i = 0; i < num; i++) 177 if (!regmap_volatile(map, reg + i)) 178 return false; 179 180 return true; 181 } 182 183 static void regmap_format_2_6_write(struct regmap *map, 184 unsigned int reg, unsigned int val) 185 { 186 u8 *out = map->work_buf; 187 188 *out = (reg << 6) | val; 189 } 190 191 static void regmap_format_4_12_write(struct regmap *map, 192 unsigned int reg, unsigned int val) 193 { 194 __be16 *out = map->work_buf; 195 *out = cpu_to_be16((reg << 12) | val); 196 } 197 198 static void regmap_format_7_9_write(struct regmap *map, 199 unsigned int reg, unsigned int val) 200 { 201 __be16 *out = map->work_buf; 202 *out = cpu_to_be16((reg << 9) | val); 203 } 204 205 static void regmap_format_10_14_write(struct regmap *map, 206 unsigned int reg, unsigned int val) 207 { 208 u8 *out = map->work_buf; 209 210 out[2] = val; 211 out[1] = (val >> 8) | (reg << 6); 212 out[0] = reg >> 2; 213 } 214 215 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 216 { 217 u8 *b = buf; 218 219 b[0] = val << shift; 220 } 221 222 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 223 { 224 __be16 *b = buf; 225 226 b[0] = cpu_to_be16(val << shift); 227 } 228 229 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 230 { 231 __le16 *b = buf; 232 233 b[0] = cpu_to_le16(val << shift); 234 } 235 236 static void regmap_format_16_native(void *buf, unsigned int val, 237 unsigned int shift) 238 { 239 *(u16 *)buf = val << shift; 240 } 241 242 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 243 { 244 u8 *b = buf; 245 246 val <<= shift; 247 248 b[0] = val >> 16; 249 b[1] = val >> 8; 250 b[2] = val; 251 } 252 253 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 254 { 255 __be32 *b = buf; 256 257 b[0] = cpu_to_be32(val << shift); 258 } 259 260 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 261 { 262 __le32 *b = buf; 263 264 b[0] = cpu_to_le32(val << shift); 265 } 266 267 static void regmap_format_32_native(void *buf, unsigned int val, 268 unsigned int shift) 269 { 270 *(u32 *)buf = val << shift; 271 } 272 273 #ifdef CONFIG_64BIT 274 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 275 { 276 __be64 *b = buf; 277 278 b[0] = cpu_to_be64((u64)val << shift); 279 } 280 281 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 282 { 283 __le64 *b = buf; 284 285 b[0] = cpu_to_le64((u64)val << shift); 286 } 287 288 static void regmap_format_64_native(void *buf, unsigned int val, 289 unsigned int shift) 290 { 291 *(u64 *)buf = (u64)val << shift; 292 } 293 #endif 294 295 static void regmap_parse_inplace_noop(void *buf) 296 { 297 } 298 299 static unsigned int regmap_parse_8(const void *buf) 300 { 301 const u8 *b = buf; 302 303 return b[0]; 304 } 305 306 static unsigned int regmap_parse_16_be(const void *buf) 307 { 308 const __be16 *b = buf; 309 310 return be16_to_cpu(b[0]); 311 } 312 313 static unsigned int regmap_parse_16_le(const void *buf) 314 { 315 const __le16 *b = buf; 316 317 return le16_to_cpu(b[0]); 318 } 319 320 static void regmap_parse_16_be_inplace(void *buf) 321 { 322 __be16 *b = buf; 323 324 b[0] = be16_to_cpu(b[0]); 325 } 326 327 static void regmap_parse_16_le_inplace(void *buf) 328 { 329 __le16 *b = buf; 330 331 b[0] = le16_to_cpu(b[0]); 332 } 333 334 static unsigned int regmap_parse_16_native(const void *buf) 335 { 336 return *(u16 *)buf; 337 } 338 339 static unsigned int regmap_parse_24(const void *buf) 340 { 341 const u8 *b = buf; 342 unsigned int ret = b[2]; 343 ret |= ((unsigned int)b[1]) << 8; 344 ret |= ((unsigned int)b[0]) << 16; 345 346 return ret; 347 } 348 349 static unsigned int regmap_parse_32_be(const void *buf) 350 { 351 const __be32 *b = buf; 352 353 return be32_to_cpu(b[0]); 354 } 355 356 static unsigned int regmap_parse_32_le(const void *buf) 357 { 358 const __le32 *b = buf; 359 360 return le32_to_cpu(b[0]); 361 } 362 363 static void regmap_parse_32_be_inplace(void *buf) 364 { 365 __be32 *b = buf; 366 367 b[0] = be32_to_cpu(b[0]); 368 } 369 370 static void regmap_parse_32_le_inplace(void *buf) 371 { 372 __le32 *b = buf; 373 374 b[0] = le32_to_cpu(b[0]); 375 } 376 377 static unsigned int regmap_parse_32_native(const void *buf) 378 { 379 return *(u32 *)buf; 380 } 381 382 #ifdef CONFIG_64BIT 383 static unsigned int regmap_parse_64_be(const void *buf) 384 { 385 const __be64 *b = buf; 386 387 return be64_to_cpu(b[0]); 388 } 389 390 static unsigned int regmap_parse_64_le(const void *buf) 391 { 392 const __le64 *b = buf; 393 394 return le64_to_cpu(b[0]); 395 } 396 397 static void regmap_parse_64_be_inplace(void *buf) 398 { 399 __be64 *b = buf; 400 401 b[0] = be64_to_cpu(b[0]); 402 } 403 404 static void regmap_parse_64_le_inplace(void *buf) 405 { 406 __le64 *b = buf; 407 408 b[0] = le64_to_cpu(b[0]); 409 } 410 411 static unsigned int regmap_parse_64_native(const void *buf) 412 { 413 return *(u64 *)buf; 414 } 415 #endif 416 417 static void regmap_lock_hwlock(void *__map) 418 { 419 struct regmap *map = __map; 420 421 hwspin_lock_timeout(map->hwlock, UINT_MAX); 422 } 423 424 static void regmap_lock_hwlock_irq(void *__map) 425 { 426 struct regmap *map = __map; 427 428 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 429 } 430 431 static void regmap_lock_hwlock_irqsave(void *__map) 432 { 433 struct regmap *map = __map; 434 435 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 436 &map->spinlock_flags); 437 } 438 439 static void regmap_unlock_hwlock(void *__map) 440 { 441 struct regmap *map = __map; 442 443 hwspin_unlock(map->hwlock); 444 } 445 446 static void regmap_unlock_hwlock_irq(void *__map) 447 { 448 struct regmap *map = __map; 449 450 hwspin_unlock_irq(map->hwlock); 451 } 452 453 static void regmap_unlock_hwlock_irqrestore(void *__map) 454 { 455 struct regmap *map = __map; 456 457 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 458 } 459 460 static void regmap_lock_unlock_none(void *__map) 461 { 462 463 } 464 465 static void regmap_lock_mutex(void *__map) 466 { 467 struct regmap *map = __map; 468 mutex_lock(&map->mutex); 469 } 470 471 static void regmap_unlock_mutex(void *__map) 472 { 473 struct regmap *map = __map; 474 mutex_unlock(&map->mutex); 475 } 476 477 static void regmap_lock_spinlock(void *__map) 478 __acquires(&map->spinlock) 479 { 480 struct regmap *map = __map; 481 unsigned long flags; 482 483 spin_lock_irqsave(&map->spinlock, flags); 484 map->spinlock_flags = flags; 485 } 486 487 static void regmap_unlock_spinlock(void *__map) 488 __releases(&map->spinlock) 489 { 490 struct regmap *map = __map; 491 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 492 } 493 494 static void dev_get_regmap_release(struct device *dev, void *res) 495 { 496 /* 497 * We don't actually have anything to do here; the goal here 498 * is not to manage the regmap but to provide a simple way to 499 * get the regmap back given a struct device. 500 */ 501 } 502 503 static bool _regmap_range_add(struct regmap *map, 504 struct regmap_range_node *data) 505 { 506 struct rb_root *root = &map->range_tree; 507 struct rb_node **new = &(root->rb_node), *parent = NULL; 508 509 while (*new) { 510 struct regmap_range_node *this = 511 rb_entry(*new, struct regmap_range_node, node); 512 513 parent = *new; 514 if (data->range_max < this->range_min) 515 new = &((*new)->rb_left); 516 else if (data->range_min > this->range_max) 517 new = &((*new)->rb_right); 518 else 519 return false; 520 } 521 522 rb_link_node(&data->node, parent, new); 523 rb_insert_color(&data->node, root); 524 525 return true; 526 } 527 528 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 529 unsigned int reg) 530 { 531 struct rb_node *node = map->range_tree.rb_node; 532 533 while (node) { 534 struct regmap_range_node *this = 535 rb_entry(node, struct regmap_range_node, node); 536 537 if (reg < this->range_min) 538 node = node->rb_left; 539 else if (reg > this->range_max) 540 node = node->rb_right; 541 else 542 return this; 543 } 544 545 return NULL; 546 } 547 548 static void regmap_range_exit(struct regmap *map) 549 { 550 struct rb_node *next; 551 struct regmap_range_node *range_node; 552 553 next = rb_first(&map->range_tree); 554 while (next) { 555 range_node = rb_entry(next, struct regmap_range_node, node); 556 next = rb_next(&range_node->node); 557 rb_erase(&range_node->node, &map->range_tree); 558 kfree(range_node); 559 } 560 561 kfree(map->selector_work_buf); 562 } 563 564 int regmap_attach_dev(struct device *dev, struct regmap *map, 565 const struct regmap_config *config) 566 { 567 struct regmap **m; 568 569 map->dev = dev; 570 571 regmap_debugfs_init(map, config->name); 572 573 /* Add a devres resource for dev_get_regmap() */ 574 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 575 if (!m) { 576 regmap_debugfs_exit(map); 577 return -ENOMEM; 578 } 579 *m = map; 580 devres_add(dev, m); 581 582 return 0; 583 } 584 EXPORT_SYMBOL_GPL(regmap_attach_dev); 585 586 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 587 const struct regmap_config *config) 588 { 589 enum regmap_endian endian; 590 591 /* Retrieve the endianness specification from the regmap config */ 592 endian = config->reg_format_endian; 593 594 /* If the regmap config specified a non-default value, use that */ 595 if (endian != REGMAP_ENDIAN_DEFAULT) 596 return endian; 597 598 /* Retrieve the endianness specification from the bus config */ 599 if (bus && bus->reg_format_endian_default) 600 endian = bus->reg_format_endian_default; 601 602 /* If the bus specified a non-default value, use that */ 603 if (endian != REGMAP_ENDIAN_DEFAULT) 604 return endian; 605 606 /* Use this if no other value was found */ 607 return REGMAP_ENDIAN_BIG; 608 } 609 610 enum regmap_endian regmap_get_val_endian(struct device *dev, 611 const struct regmap_bus *bus, 612 const struct regmap_config *config) 613 { 614 struct device_node *np; 615 enum regmap_endian endian; 616 617 /* Retrieve the endianness specification from the regmap config */ 618 endian = config->val_format_endian; 619 620 /* If the regmap config specified a non-default value, use that */ 621 if (endian != REGMAP_ENDIAN_DEFAULT) 622 return endian; 623 624 /* If the dev and dev->of_node exist try to get endianness from DT */ 625 if (dev && dev->of_node) { 626 np = dev->of_node; 627 628 /* Parse the device's DT node for an endianness specification */ 629 if (of_property_read_bool(np, "big-endian")) 630 endian = REGMAP_ENDIAN_BIG; 631 else if (of_property_read_bool(np, "little-endian")) 632 endian = REGMAP_ENDIAN_LITTLE; 633 else if (of_property_read_bool(np, "native-endian")) 634 endian = REGMAP_ENDIAN_NATIVE; 635 636 /* If the endianness was specified in DT, use that */ 637 if (endian != REGMAP_ENDIAN_DEFAULT) 638 return endian; 639 } 640 641 /* Retrieve the endianness specification from the bus config */ 642 if (bus && bus->val_format_endian_default) 643 endian = bus->val_format_endian_default; 644 645 /* If the bus specified a non-default value, use that */ 646 if (endian != REGMAP_ENDIAN_DEFAULT) 647 return endian; 648 649 /* Use this if no other value was found */ 650 return REGMAP_ENDIAN_BIG; 651 } 652 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 653 654 struct regmap *__regmap_init(struct device *dev, 655 const struct regmap_bus *bus, 656 void *bus_context, 657 const struct regmap_config *config, 658 struct lock_class_key *lock_key, 659 const char *lock_name) 660 { 661 struct regmap *map; 662 int ret = -EINVAL; 663 enum regmap_endian reg_endian, val_endian; 664 int i, j; 665 666 if (!config) 667 goto err; 668 669 map = kzalloc(sizeof(*map), GFP_KERNEL); 670 if (map == NULL) { 671 ret = -ENOMEM; 672 goto err; 673 } 674 675 if (config->name) { 676 map->name = kstrdup_const(config->name, GFP_KERNEL); 677 if (!map->name) { 678 ret = -ENOMEM; 679 goto err_map; 680 } 681 } 682 683 if (config->disable_locking) { 684 map->lock = map->unlock = regmap_lock_unlock_none; 685 regmap_debugfs_disable(map); 686 } else if (config->lock && config->unlock) { 687 map->lock = config->lock; 688 map->unlock = config->unlock; 689 map->lock_arg = config->lock_arg; 690 } else if (config->use_hwlock) { 691 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 692 if (!map->hwlock) { 693 ret = -ENXIO; 694 goto err_name; 695 } 696 697 switch (config->hwlock_mode) { 698 case HWLOCK_IRQSTATE: 699 map->lock = regmap_lock_hwlock_irqsave; 700 map->unlock = regmap_unlock_hwlock_irqrestore; 701 break; 702 case HWLOCK_IRQ: 703 map->lock = regmap_lock_hwlock_irq; 704 map->unlock = regmap_unlock_hwlock_irq; 705 break; 706 default: 707 map->lock = regmap_lock_hwlock; 708 map->unlock = regmap_unlock_hwlock; 709 break; 710 } 711 712 map->lock_arg = map; 713 } else { 714 if ((bus && bus->fast_io) || 715 config->fast_io) { 716 spin_lock_init(&map->spinlock); 717 map->lock = regmap_lock_spinlock; 718 map->unlock = regmap_unlock_spinlock; 719 lockdep_set_class_and_name(&map->spinlock, 720 lock_key, lock_name); 721 } else { 722 mutex_init(&map->mutex); 723 map->lock = regmap_lock_mutex; 724 map->unlock = regmap_unlock_mutex; 725 lockdep_set_class_and_name(&map->mutex, 726 lock_key, lock_name); 727 } 728 map->lock_arg = map; 729 } 730 731 /* 732 * When we write in fast-paths with regmap_bulk_write() don't allocate 733 * scratch buffers with sleeping allocations. 734 */ 735 if ((bus && bus->fast_io) || config->fast_io) 736 map->alloc_flags = GFP_ATOMIC; 737 else 738 map->alloc_flags = GFP_KERNEL; 739 740 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 741 map->format.pad_bytes = config->pad_bits / 8; 742 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 743 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 744 config->val_bits + config->pad_bits, 8); 745 map->reg_shift = config->pad_bits % 8; 746 if (config->reg_stride) 747 map->reg_stride = config->reg_stride; 748 else 749 map->reg_stride = 1; 750 if (is_power_of_2(map->reg_stride)) 751 map->reg_stride_order = ilog2(map->reg_stride); 752 else 753 map->reg_stride_order = -1; 754 map->use_single_read = config->use_single_rw || !bus || !bus->read; 755 map->use_single_write = config->use_single_rw || !bus || !bus->write; 756 map->can_multi_write = config->can_multi_write && bus && bus->write; 757 if (bus) { 758 map->max_raw_read = bus->max_raw_read; 759 map->max_raw_write = bus->max_raw_write; 760 } 761 map->dev = dev; 762 map->bus = bus; 763 map->bus_context = bus_context; 764 map->max_register = config->max_register; 765 map->wr_table = config->wr_table; 766 map->rd_table = config->rd_table; 767 map->volatile_table = config->volatile_table; 768 map->precious_table = config->precious_table; 769 map->writeable_reg = config->writeable_reg; 770 map->readable_reg = config->readable_reg; 771 map->volatile_reg = config->volatile_reg; 772 map->precious_reg = config->precious_reg; 773 map->cache_type = config->cache_type; 774 775 spin_lock_init(&map->async_lock); 776 INIT_LIST_HEAD(&map->async_list); 777 INIT_LIST_HEAD(&map->async_free); 778 init_waitqueue_head(&map->async_waitq); 779 780 if (config->read_flag_mask || 781 config->write_flag_mask || 782 config->zero_flag_mask) { 783 map->read_flag_mask = config->read_flag_mask; 784 map->write_flag_mask = config->write_flag_mask; 785 } else if (bus) { 786 map->read_flag_mask = bus->read_flag_mask; 787 } 788 789 if (!bus) { 790 map->reg_read = config->reg_read; 791 map->reg_write = config->reg_write; 792 793 map->defer_caching = false; 794 goto skip_format_initialization; 795 } else if (!bus->read || !bus->write) { 796 map->reg_read = _regmap_bus_reg_read; 797 map->reg_write = _regmap_bus_reg_write; 798 799 map->defer_caching = false; 800 goto skip_format_initialization; 801 } else { 802 map->reg_read = _regmap_bus_read; 803 map->reg_update_bits = bus->reg_update_bits; 804 } 805 806 reg_endian = regmap_get_reg_endian(bus, config); 807 val_endian = regmap_get_val_endian(dev, bus, config); 808 809 switch (config->reg_bits + map->reg_shift) { 810 case 2: 811 switch (config->val_bits) { 812 case 6: 813 map->format.format_write = regmap_format_2_6_write; 814 break; 815 default: 816 goto err_hwlock; 817 } 818 break; 819 820 case 4: 821 switch (config->val_bits) { 822 case 12: 823 map->format.format_write = regmap_format_4_12_write; 824 break; 825 default: 826 goto err_hwlock; 827 } 828 break; 829 830 case 7: 831 switch (config->val_bits) { 832 case 9: 833 map->format.format_write = regmap_format_7_9_write; 834 break; 835 default: 836 goto err_hwlock; 837 } 838 break; 839 840 case 10: 841 switch (config->val_bits) { 842 case 14: 843 map->format.format_write = regmap_format_10_14_write; 844 break; 845 default: 846 goto err_hwlock; 847 } 848 break; 849 850 case 8: 851 map->format.format_reg = regmap_format_8; 852 break; 853 854 case 16: 855 switch (reg_endian) { 856 case REGMAP_ENDIAN_BIG: 857 map->format.format_reg = regmap_format_16_be; 858 break; 859 case REGMAP_ENDIAN_LITTLE: 860 map->format.format_reg = regmap_format_16_le; 861 break; 862 case REGMAP_ENDIAN_NATIVE: 863 map->format.format_reg = regmap_format_16_native; 864 break; 865 default: 866 goto err_hwlock; 867 } 868 break; 869 870 case 24: 871 if (reg_endian != REGMAP_ENDIAN_BIG) 872 goto err_hwlock; 873 map->format.format_reg = regmap_format_24; 874 break; 875 876 case 32: 877 switch (reg_endian) { 878 case REGMAP_ENDIAN_BIG: 879 map->format.format_reg = regmap_format_32_be; 880 break; 881 case REGMAP_ENDIAN_LITTLE: 882 map->format.format_reg = regmap_format_32_le; 883 break; 884 case REGMAP_ENDIAN_NATIVE: 885 map->format.format_reg = regmap_format_32_native; 886 break; 887 default: 888 goto err_hwlock; 889 } 890 break; 891 892 #ifdef CONFIG_64BIT 893 case 64: 894 switch (reg_endian) { 895 case REGMAP_ENDIAN_BIG: 896 map->format.format_reg = regmap_format_64_be; 897 break; 898 case REGMAP_ENDIAN_LITTLE: 899 map->format.format_reg = regmap_format_64_le; 900 break; 901 case REGMAP_ENDIAN_NATIVE: 902 map->format.format_reg = regmap_format_64_native; 903 break; 904 default: 905 goto err_hwlock; 906 } 907 break; 908 #endif 909 910 default: 911 goto err_hwlock; 912 } 913 914 if (val_endian == REGMAP_ENDIAN_NATIVE) 915 map->format.parse_inplace = regmap_parse_inplace_noop; 916 917 switch (config->val_bits) { 918 case 8: 919 map->format.format_val = regmap_format_8; 920 map->format.parse_val = regmap_parse_8; 921 map->format.parse_inplace = regmap_parse_inplace_noop; 922 break; 923 case 16: 924 switch (val_endian) { 925 case REGMAP_ENDIAN_BIG: 926 map->format.format_val = regmap_format_16_be; 927 map->format.parse_val = regmap_parse_16_be; 928 map->format.parse_inplace = regmap_parse_16_be_inplace; 929 break; 930 case REGMAP_ENDIAN_LITTLE: 931 map->format.format_val = regmap_format_16_le; 932 map->format.parse_val = regmap_parse_16_le; 933 map->format.parse_inplace = regmap_parse_16_le_inplace; 934 break; 935 case REGMAP_ENDIAN_NATIVE: 936 map->format.format_val = regmap_format_16_native; 937 map->format.parse_val = regmap_parse_16_native; 938 break; 939 default: 940 goto err_hwlock; 941 } 942 break; 943 case 24: 944 if (val_endian != REGMAP_ENDIAN_BIG) 945 goto err_hwlock; 946 map->format.format_val = regmap_format_24; 947 map->format.parse_val = regmap_parse_24; 948 break; 949 case 32: 950 switch (val_endian) { 951 case REGMAP_ENDIAN_BIG: 952 map->format.format_val = regmap_format_32_be; 953 map->format.parse_val = regmap_parse_32_be; 954 map->format.parse_inplace = regmap_parse_32_be_inplace; 955 break; 956 case REGMAP_ENDIAN_LITTLE: 957 map->format.format_val = regmap_format_32_le; 958 map->format.parse_val = regmap_parse_32_le; 959 map->format.parse_inplace = regmap_parse_32_le_inplace; 960 break; 961 case REGMAP_ENDIAN_NATIVE: 962 map->format.format_val = regmap_format_32_native; 963 map->format.parse_val = regmap_parse_32_native; 964 break; 965 default: 966 goto err_hwlock; 967 } 968 break; 969 #ifdef CONFIG_64BIT 970 case 64: 971 switch (val_endian) { 972 case REGMAP_ENDIAN_BIG: 973 map->format.format_val = regmap_format_64_be; 974 map->format.parse_val = regmap_parse_64_be; 975 map->format.parse_inplace = regmap_parse_64_be_inplace; 976 break; 977 case REGMAP_ENDIAN_LITTLE: 978 map->format.format_val = regmap_format_64_le; 979 map->format.parse_val = regmap_parse_64_le; 980 map->format.parse_inplace = regmap_parse_64_le_inplace; 981 break; 982 case REGMAP_ENDIAN_NATIVE: 983 map->format.format_val = regmap_format_64_native; 984 map->format.parse_val = regmap_parse_64_native; 985 break; 986 default: 987 goto err_hwlock; 988 } 989 break; 990 #endif 991 } 992 993 if (map->format.format_write) { 994 if ((reg_endian != REGMAP_ENDIAN_BIG) || 995 (val_endian != REGMAP_ENDIAN_BIG)) 996 goto err_hwlock; 997 map->use_single_write = true; 998 } 999 1000 if (!map->format.format_write && 1001 !(map->format.format_reg && map->format.format_val)) 1002 goto err_hwlock; 1003 1004 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1005 if (map->work_buf == NULL) { 1006 ret = -ENOMEM; 1007 goto err_hwlock; 1008 } 1009 1010 if (map->format.format_write) { 1011 map->defer_caching = false; 1012 map->reg_write = _regmap_bus_formatted_write; 1013 } else if (map->format.format_val) { 1014 map->defer_caching = true; 1015 map->reg_write = _regmap_bus_raw_write; 1016 } 1017 1018 skip_format_initialization: 1019 1020 map->range_tree = RB_ROOT; 1021 for (i = 0; i < config->num_ranges; i++) { 1022 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1023 struct regmap_range_node *new; 1024 1025 /* Sanity check */ 1026 if (range_cfg->range_max < range_cfg->range_min) { 1027 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1028 range_cfg->range_max, range_cfg->range_min); 1029 goto err_range; 1030 } 1031 1032 if (range_cfg->range_max > map->max_register) { 1033 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1034 range_cfg->range_max, map->max_register); 1035 goto err_range; 1036 } 1037 1038 if (range_cfg->selector_reg > map->max_register) { 1039 dev_err(map->dev, 1040 "Invalid range %d: selector out of map\n", i); 1041 goto err_range; 1042 } 1043 1044 if (range_cfg->window_len == 0) { 1045 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1046 i); 1047 goto err_range; 1048 } 1049 1050 /* Make sure, that this register range has no selector 1051 or data window within its boundary */ 1052 for (j = 0; j < config->num_ranges; j++) { 1053 unsigned sel_reg = config->ranges[j].selector_reg; 1054 unsigned win_min = config->ranges[j].window_start; 1055 unsigned win_max = win_min + 1056 config->ranges[j].window_len - 1; 1057 1058 /* Allow data window inside its own virtual range */ 1059 if (j == i) 1060 continue; 1061 1062 if (range_cfg->range_min <= sel_reg && 1063 sel_reg <= range_cfg->range_max) { 1064 dev_err(map->dev, 1065 "Range %d: selector for %d in window\n", 1066 i, j); 1067 goto err_range; 1068 } 1069 1070 if (!(win_max < range_cfg->range_min || 1071 win_min > range_cfg->range_max)) { 1072 dev_err(map->dev, 1073 "Range %d: window for %d in window\n", 1074 i, j); 1075 goto err_range; 1076 } 1077 } 1078 1079 new = kzalloc(sizeof(*new), GFP_KERNEL); 1080 if (new == NULL) { 1081 ret = -ENOMEM; 1082 goto err_range; 1083 } 1084 1085 new->map = map; 1086 new->name = range_cfg->name; 1087 new->range_min = range_cfg->range_min; 1088 new->range_max = range_cfg->range_max; 1089 new->selector_reg = range_cfg->selector_reg; 1090 new->selector_mask = range_cfg->selector_mask; 1091 new->selector_shift = range_cfg->selector_shift; 1092 new->window_start = range_cfg->window_start; 1093 new->window_len = range_cfg->window_len; 1094 1095 if (!_regmap_range_add(map, new)) { 1096 dev_err(map->dev, "Failed to add range %d\n", i); 1097 kfree(new); 1098 goto err_range; 1099 } 1100 1101 if (map->selector_work_buf == NULL) { 1102 map->selector_work_buf = 1103 kzalloc(map->format.buf_size, GFP_KERNEL); 1104 if (map->selector_work_buf == NULL) { 1105 ret = -ENOMEM; 1106 goto err_range; 1107 } 1108 } 1109 } 1110 1111 ret = regcache_init(map, config); 1112 if (ret != 0) 1113 goto err_range; 1114 1115 if (dev) { 1116 ret = regmap_attach_dev(dev, map, config); 1117 if (ret != 0) 1118 goto err_regcache; 1119 } 1120 1121 return map; 1122 1123 err_regcache: 1124 regcache_exit(map); 1125 err_range: 1126 regmap_range_exit(map); 1127 kfree(map->work_buf); 1128 err_hwlock: 1129 if (map->hwlock) 1130 hwspin_lock_free(map->hwlock); 1131 err_name: 1132 kfree_const(map->name); 1133 err_map: 1134 kfree(map); 1135 err: 1136 return ERR_PTR(ret); 1137 } 1138 EXPORT_SYMBOL_GPL(__regmap_init); 1139 1140 static void devm_regmap_release(struct device *dev, void *res) 1141 { 1142 regmap_exit(*(struct regmap **)res); 1143 } 1144 1145 struct regmap *__devm_regmap_init(struct device *dev, 1146 const struct regmap_bus *bus, 1147 void *bus_context, 1148 const struct regmap_config *config, 1149 struct lock_class_key *lock_key, 1150 const char *lock_name) 1151 { 1152 struct regmap **ptr, *regmap; 1153 1154 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1155 if (!ptr) 1156 return ERR_PTR(-ENOMEM); 1157 1158 regmap = __regmap_init(dev, bus, bus_context, config, 1159 lock_key, lock_name); 1160 if (!IS_ERR(regmap)) { 1161 *ptr = regmap; 1162 devres_add(dev, ptr); 1163 } else { 1164 devres_free(ptr); 1165 } 1166 1167 return regmap; 1168 } 1169 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1170 1171 static void regmap_field_init(struct regmap_field *rm_field, 1172 struct regmap *regmap, struct reg_field reg_field) 1173 { 1174 rm_field->regmap = regmap; 1175 rm_field->reg = reg_field.reg; 1176 rm_field->shift = reg_field.lsb; 1177 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1178 rm_field->id_size = reg_field.id_size; 1179 rm_field->id_offset = reg_field.id_offset; 1180 } 1181 1182 /** 1183 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1184 * 1185 * @dev: Device that will be interacted with 1186 * @regmap: regmap bank in which this register field is located. 1187 * @reg_field: Register field with in the bank. 1188 * 1189 * The return value will be an ERR_PTR() on error or a valid pointer 1190 * to a struct regmap_field. The regmap_field will be automatically freed 1191 * by the device management code. 1192 */ 1193 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1194 struct regmap *regmap, struct reg_field reg_field) 1195 { 1196 struct regmap_field *rm_field = devm_kzalloc(dev, 1197 sizeof(*rm_field), GFP_KERNEL); 1198 if (!rm_field) 1199 return ERR_PTR(-ENOMEM); 1200 1201 regmap_field_init(rm_field, regmap, reg_field); 1202 1203 return rm_field; 1204 1205 } 1206 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1207 1208 /** 1209 * devm_regmap_field_free() - Free a register field allocated using 1210 * devm_regmap_field_alloc. 1211 * 1212 * @dev: Device that will be interacted with 1213 * @field: regmap field which should be freed. 1214 * 1215 * Free register field allocated using devm_regmap_field_alloc(). Usually 1216 * drivers need not call this function, as the memory allocated via devm 1217 * will be freed as per device-driver life-cyle. 1218 */ 1219 void devm_regmap_field_free(struct device *dev, 1220 struct regmap_field *field) 1221 { 1222 devm_kfree(dev, field); 1223 } 1224 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1225 1226 /** 1227 * regmap_field_alloc() - Allocate and initialise a register field. 1228 * 1229 * @regmap: regmap bank in which this register field is located. 1230 * @reg_field: Register field with in the bank. 1231 * 1232 * The return value will be an ERR_PTR() on error or a valid pointer 1233 * to a struct regmap_field. The regmap_field should be freed by the 1234 * user once its finished working with it using regmap_field_free(). 1235 */ 1236 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1237 struct reg_field reg_field) 1238 { 1239 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1240 1241 if (!rm_field) 1242 return ERR_PTR(-ENOMEM); 1243 1244 regmap_field_init(rm_field, regmap, reg_field); 1245 1246 return rm_field; 1247 } 1248 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1249 1250 /** 1251 * regmap_field_free() - Free register field allocated using 1252 * regmap_field_alloc. 1253 * 1254 * @field: regmap field which should be freed. 1255 */ 1256 void regmap_field_free(struct regmap_field *field) 1257 { 1258 kfree(field); 1259 } 1260 EXPORT_SYMBOL_GPL(regmap_field_free); 1261 1262 /** 1263 * regmap_reinit_cache() - Reinitialise the current register cache 1264 * 1265 * @map: Register map to operate on. 1266 * @config: New configuration. Only the cache data will be used. 1267 * 1268 * Discard any existing register cache for the map and initialize a 1269 * new cache. This can be used to restore the cache to defaults or to 1270 * update the cache configuration to reflect runtime discovery of the 1271 * hardware. 1272 * 1273 * No explicit locking is done here, the user needs to ensure that 1274 * this function will not race with other calls to regmap. 1275 */ 1276 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1277 { 1278 regcache_exit(map); 1279 regmap_debugfs_exit(map); 1280 1281 map->max_register = config->max_register; 1282 map->writeable_reg = config->writeable_reg; 1283 map->readable_reg = config->readable_reg; 1284 map->volatile_reg = config->volatile_reg; 1285 map->precious_reg = config->precious_reg; 1286 map->cache_type = config->cache_type; 1287 1288 regmap_debugfs_init(map, config->name); 1289 1290 map->cache_bypass = false; 1291 map->cache_only = false; 1292 1293 return regcache_init(map, config); 1294 } 1295 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1296 1297 /** 1298 * regmap_exit() - Free a previously allocated register map 1299 * 1300 * @map: Register map to operate on. 1301 */ 1302 void regmap_exit(struct regmap *map) 1303 { 1304 struct regmap_async *async; 1305 1306 regcache_exit(map); 1307 regmap_debugfs_exit(map); 1308 regmap_range_exit(map); 1309 if (map->bus && map->bus->free_context) 1310 map->bus->free_context(map->bus_context); 1311 kfree(map->work_buf); 1312 while (!list_empty(&map->async_free)) { 1313 async = list_first_entry_or_null(&map->async_free, 1314 struct regmap_async, 1315 list); 1316 list_del(&async->list); 1317 kfree(async->work_buf); 1318 kfree(async); 1319 } 1320 if (map->hwlock) 1321 hwspin_lock_free(map->hwlock); 1322 kfree_const(map->name); 1323 kfree(map); 1324 } 1325 EXPORT_SYMBOL_GPL(regmap_exit); 1326 1327 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1328 { 1329 struct regmap **r = res; 1330 if (!r || !*r) { 1331 WARN_ON(!r || !*r); 1332 return 0; 1333 } 1334 1335 /* If the user didn't specify a name match any */ 1336 if (data) 1337 return (*r)->name == data; 1338 else 1339 return 1; 1340 } 1341 1342 /** 1343 * dev_get_regmap() - Obtain the regmap (if any) for a device 1344 * 1345 * @dev: Device to retrieve the map for 1346 * @name: Optional name for the register map, usually NULL. 1347 * 1348 * Returns the regmap for the device if one is present, or NULL. If 1349 * name is specified then it must match the name specified when 1350 * registering the device, if it is NULL then the first regmap found 1351 * will be used. Devices with multiple register maps are very rare, 1352 * generic code should normally not need to specify a name. 1353 */ 1354 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1355 { 1356 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1357 dev_get_regmap_match, (void *)name); 1358 1359 if (!r) 1360 return NULL; 1361 return *r; 1362 } 1363 EXPORT_SYMBOL_GPL(dev_get_regmap); 1364 1365 /** 1366 * regmap_get_device() - Obtain the device from a regmap 1367 * 1368 * @map: Register map to operate on. 1369 * 1370 * Returns the underlying device that the regmap has been created for. 1371 */ 1372 struct device *regmap_get_device(struct regmap *map) 1373 { 1374 return map->dev; 1375 } 1376 EXPORT_SYMBOL_GPL(regmap_get_device); 1377 1378 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1379 struct regmap_range_node *range, 1380 unsigned int val_num) 1381 { 1382 void *orig_work_buf; 1383 unsigned int win_offset; 1384 unsigned int win_page; 1385 bool page_chg; 1386 int ret; 1387 1388 win_offset = (*reg - range->range_min) % range->window_len; 1389 win_page = (*reg - range->range_min) / range->window_len; 1390 1391 if (val_num > 1) { 1392 /* Bulk write shouldn't cross range boundary */ 1393 if (*reg + val_num - 1 > range->range_max) 1394 return -EINVAL; 1395 1396 /* ... or single page boundary */ 1397 if (val_num > range->window_len - win_offset) 1398 return -EINVAL; 1399 } 1400 1401 /* It is possible to have selector register inside data window. 1402 In that case, selector register is located on every page and 1403 it needs no page switching, when accessed alone. */ 1404 if (val_num > 1 || 1405 range->window_start + win_offset != range->selector_reg) { 1406 /* Use separate work_buf during page switching */ 1407 orig_work_buf = map->work_buf; 1408 map->work_buf = map->selector_work_buf; 1409 1410 ret = _regmap_update_bits(map, range->selector_reg, 1411 range->selector_mask, 1412 win_page << range->selector_shift, 1413 &page_chg, false); 1414 1415 map->work_buf = orig_work_buf; 1416 1417 if (ret != 0) 1418 return ret; 1419 } 1420 1421 *reg = range->window_start + win_offset; 1422 1423 return 0; 1424 } 1425 1426 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1427 unsigned long mask) 1428 { 1429 u8 *buf; 1430 int i; 1431 1432 if (!mask || !map->work_buf) 1433 return; 1434 1435 buf = map->work_buf; 1436 1437 for (i = 0; i < max_bytes; i++) 1438 buf[i] |= (mask >> (8 * i)) & 0xff; 1439 } 1440 1441 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1442 const void *val, size_t val_len) 1443 { 1444 struct regmap_range_node *range; 1445 unsigned long flags; 1446 void *work_val = map->work_buf + map->format.reg_bytes + 1447 map->format.pad_bytes; 1448 void *buf; 1449 int ret = -ENOTSUPP; 1450 size_t len; 1451 int i; 1452 1453 WARN_ON(!map->bus); 1454 1455 /* Check for unwritable registers before we start */ 1456 if (map->writeable_reg) 1457 for (i = 0; i < val_len / map->format.val_bytes; i++) 1458 if (!map->writeable_reg(map->dev, 1459 reg + regmap_get_offset(map, i))) 1460 return -EINVAL; 1461 1462 if (!map->cache_bypass && map->format.parse_val) { 1463 unsigned int ival; 1464 int val_bytes = map->format.val_bytes; 1465 for (i = 0; i < val_len / val_bytes; i++) { 1466 ival = map->format.parse_val(val + (i * val_bytes)); 1467 ret = regcache_write(map, 1468 reg + regmap_get_offset(map, i), 1469 ival); 1470 if (ret) { 1471 dev_err(map->dev, 1472 "Error in caching of register: %x ret: %d\n", 1473 reg + i, ret); 1474 return ret; 1475 } 1476 } 1477 if (map->cache_only) { 1478 map->cache_dirty = true; 1479 return 0; 1480 } 1481 } 1482 1483 range = _regmap_range_lookup(map, reg); 1484 if (range) { 1485 int val_num = val_len / map->format.val_bytes; 1486 int win_offset = (reg - range->range_min) % range->window_len; 1487 int win_residue = range->window_len - win_offset; 1488 1489 /* If the write goes beyond the end of the window split it */ 1490 while (val_num > win_residue) { 1491 dev_dbg(map->dev, "Writing window %d/%zu\n", 1492 win_residue, val_len / map->format.val_bytes); 1493 ret = _regmap_raw_write(map, reg, val, win_residue * 1494 map->format.val_bytes); 1495 if (ret != 0) 1496 return ret; 1497 1498 reg += win_residue; 1499 val_num -= win_residue; 1500 val += win_residue * map->format.val_bytes; 1501 val_len -= win_residue * map->format.val_bytes; 1502 1503 win_offset = (reg - range->range_min) % 1504 range->window_len; 1505 win_residue = range->window_len - win_offset; 1506 } 1507 1508 ret = _regmap_select_page(map, ®, range, val_num); 1509 if (ret != 0) 1510 return ret; 1511 } 1512 1513 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1514 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1515 map->write_flag_mask); 1516 1517 /* 1518 * Essentially all I/O mechanisms will be faster with a single 1519 * buffer to write. Since register syncs often generate raw 1520 * writes of single registers optimise that case. 1521 */ 1522 if (val != work_val && val_len == map->format.val_bytes) { 1523 memcpy(work_val, val, map->format.val_bytes); 1524 val = work_val; 1525 } 1526 1527 if (map->async && map->bus->async_write) { 1528 struct regmap_async *async; 1529 1530 trace_regmap_async_write_start(map, reg, val_len); 1531 1532 spin_lock_irqsave(&map->async_lock, flags); 1533 async = list_first_entry_or_null(&map->async_free, 1534 struct regmap_async, 1535 list); 1536 if (async) 1537 list_del(&async->list); 1538 spin_unlock_irqrestore(&map->async_lock, flags); 1539 1540 if (!async) { 1541 async = map->bus->async_alloc(); 1542 if (!async) 1543 return -ENOMEM; 1544 1545 async->work_buf = kzalloc(map->format.buf_size, 1546 GFP_KERNEL | GFP_DMA); 1547 if (!async->work_buf) { 1548 kfree(async); 1549 return -ENOMEM; 1550 } 1551 } 1552 1553 async->map = map; 1554 1555 /* If the caller supplied the value we can use it safely. */ 1556 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1557 map->format.reg_bytes + map->format.val_bytes); 1558 1559 spin_lock_irqsave(&map->async_lock, flags); 1560 list_add_tail(&async->list, &map->async_list); 1561 spin_unlock_irqrestore(&map->async_lock, flags); 1562 1563 if (val != work_val) 1564 ret = map->bus->async_write(map->bus_context, 1565 async->work_buf, 1566 map->format.reg_bytes + 1567 map->format.pad_bytes, 1568 val, val_len, async); 1569 else 1570 ret = map->bus->async_write(map->bus_context, 1571 async->work_buf, 1572 map->format.reg_bytes + 1573 map->format.pad_bytes + 1574 val_len, NULL, 0, async); 1575 1576 if (ret != 0) { 1577 dev_err(map->dev, "Failed to schedule write: %d\n", 1578 ret); 1579 1580 spin_lock_irqsave(&map->async_lock, flags); 1581 list_move(&async->list, &map->async_free); 1582 spin_unlock_irqrestore(&map->async_lock, flags); 1583 } 1584 1585 return ret; 1586 } 1587 1588 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1589 1590 /* If we're doing a single register write we can probably just 1591 * send the work_buf directly, otherwise try to do a gather 1592 * write. 1593 */ 1594 if (val == work_val) 1595 ret = map->bus->write(map->bus_context, map->work_buf, 1596 map->format.reg_bytes + 1597 map->format.pad_bytes + 1598 val_len); 1599 else if (map->bus->gather_write) 1600 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1601 map->format.reg_bytes + 1602 map->format.pad_bytes, 1603 val, val_len); 1604 1605 /* If that didn't work fall back on linearising by hand. */ 1606 if (ret == -ENOTSUPP) { 1607 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1608 buf = kzalloc(len, GFP_KERNEL); 1609 if (!buf) 1610 return -ENOMEM; 1611 1612 memcpy(buf, map->work_buf, map->format.reg_bytes); 1613 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1614 val, val_len); 1615 ret = map->bus->write(map->bus_context, buf, len); 1616 1617 kfree(buf); 1618 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1619 /* regcache_drop_region() takes lock that we already have, 1620 * thus call map->cache_ops->drop() directly 1621 */ 1622 if (map->cache_ops && map->cache_ops->drop) 1623 map->cache_ops->drop(map, reg, reg + 1); 1624 } 1625 1626 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1627 1628 return ret; 1629 } 1630 1631 /** 1632 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1633 * 1634 * @map: Map to check. 1635 */ 1636 bool regmap_can_raw_write(struct regmap *map) 1637 { 1638 return map->bus && map->bus->write && map->format.format_val && 1639 map->format.format_reg; 1640 } 1641 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1642 1643 /** 1644 * regmap_get_raw_read_max - Get the maximum size we can read 1645 * 1646 * @map: Map to check. 1647 */ 1648 size_t regmap_get_raw_read_max(struct regmap *map) 1649 { 1650 return map->max_raw_read; 1651 } 1652 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1653 1654 /** 1655 * regmap_get_raw_write_max - Get the maximum size we can read 1656 * 1657 * @map: Map to check. 1658 */ 1659 size_t regmap_get_raw_write_max(struct regmap *map) 1660 { 1661 return map->max_raw_write; 1662 } 1663 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1664 1665 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1666 unsigned int val) 1667 { 1668 int ret; 1669 struct regmap_range_node *range; 1670 struct regmap *map = context; 1671 1672 WARN_ON(!map->bus || !map->format.format_write); 1673 1674 range = _regmap_range_lookup(map, reg); 1675 if (range) { 1676 ret = _regmap_select_page(map, ®, range, 1); 1677 if (ret != 0) 1678 return ret; 1679 } 1680 1681 map->format.format_write(map, reg, val); 1682 1683 trace_regmap_hw_write_start(map, reg, 1); 1684 1685 ret = map->bus->write(map->bus_context, map->work_buf, 1686 map->format.buf_size); 1687 1688 trace_regmap_hw_write_done(map, reg, 1); 1689 1690 return ret; 1691 } 1692 1693 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1694 unsigned int val) 1695 { 1696 struct regmap *map = context; 1697 1698 return map->bus->reg_write(map->bus_context, reg, val); 1699 } 1700 1701 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1702 unsigned int val) 1703 { 1704 struct regmap *map = context; 1705 1706 WARN_ON(!map->bus || !map->format.format_val); 1707 1708 map->format.format_val(map->work_buf + map->format.reg_bytes 1709 + map->format.pad_bytes, val, 0); 1710 return _regmap_raw_write(map, reg, 1711 map->work_buf + 1712 map->format.reg_bytes + 1713 map->format.pad_bytes, 1714 map->format.val_bytes); 1715 } 1716 1717 static inline void *_regmap_map_get_context(struct regmap *map) 1718 { 1719 return (map->bus) ? map : map->bus_context; 1720 } 1721 1722 int _regmap_write(struct regmap *map, unsigned int reg, 1723 unsigned int val) 1724 { 1725 int ret; 1726 void *context = _regmap_map_get_context(map); 1727 1728 if (!regmap_writeable(map, reg)) 1729 return -EIO; 1730 1731 if (!map->cache_bypass && !map->defer_caching) { 1732 ret = regcache_write(map, reg, val); 1733 if (ret != 0) 1734 return ret; 1735 if (map->cache_only) { 1736 map->cache_dirty = true; 1737 return 0; 1738 } 1739 } 1740 1741 #ifdef LOG_DEVICE 1742 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1743 dev_info(map->dev, "%x <= %x\n", reg, val); 1744 #endif 1745 1746 trace_regmap_reg_write(map, reg, val); 1747 1748 return map->reg_write(context, reg, val); 1749 } 1750 1751 /** 1752 * regmap_write() - Write a value to a single register 1753 * 1754 * @map: Register map to write to 1755 * @reg: Register to write to 1756 * @val: Value to be written 1757 * 1758 * A value of zero will be returned on success, a negative errno will 1759 * be returned in error cases. 1760 */ 1761 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1762 { 1763 int ret; 1764 1765 if (!IS_ALIGNED(reg, map->reg_stride)) 1766 return -EINVAL; 1767 1768 map->lock(map->lock_arg); 1769 1770 ret = _regmap_write(map, reg, val); 1771 1772 map->unlock(map->lock_arg); 1773 1774 return ret; 1775 } 1776 EXPORT_SYMBOL_GPL(regmap_write); 1777 1778 /** 1779 * regmap_write_async() - Write a value to a single register asynchronously 1780 * 1781 * @map: Register map to write to 1782 * @reg: Register to write to 1783 * @val: Value to be written 1784 * 1785 * A value of zero will be returned on success, a negative errno will 1786 * be returned in error cases. 1787 */ 1788 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1789 { 1790 int ret; 1791 1792 if (!IS_ALIGNED(reg, map->reg_stride)) 1793 return -EINVAL; 1794 1795 map->lock(map->lock_arg); 1796 1797 map->async = true; 1798 1799 ret = _regmap_write(map, reg, val); 1800 1801 map->async = false; 1802 1803 map->unlock(map->lock_arg); 1804 1805 return ret; 1806 } 1807 EXPORT_SYMBOL_GPL(regmap_write_async); 1808 1809 /** 1810 * regmap_raw_write() - Write raw values to one or more registers 1811 * 1812 * @map: Register map to write to 1813 * @reg: Initial register to write to 1814 * @val: Block of data to be written, laid out for direct transmission to the 1815 * device 1816 * @val_len: Length of data pointed to by val. 1817 * 1818 * This function is intended to be used for things like firmware 1819 * download where a large block of data needs to be transferred to the 1820 * device. No formatting will be done on the data provided. 1821 * 1822 * A value of zero will be returned on success, a negative errno will 1823 * be returned in error cases. 1824 */ 1825 int regmap_raw_write(struct regmap *map, unsigned int reg, 1826 const void *val, size_t val_len) 1827 { 1828 int ret; 1829 1830 if (!regmap_can_raw_write(map)) 1831 return -EINVAL; 1832 if (val_len % map->format.val_bytes) 1833 return -EINVAL; 1834 if (map->max_raw_write && map->max_raw_write > val_len) 1835 return -E2BIG; 1836 1837 map->lock(map->lock_arg); 1838 1839 ret = _regmap_raw_write(map, reg, val, val_len); 1840 1841 map->unlock(map->lock_arg); 1842 1843 return ret; 1844 } 1845 EXPORT_SYMBOL_GPL(regmap_raw_write); 1846 1847 /** 1848 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 1849 * register field. 1850 * 1851 * @field: Register field to write to 1852 * @mask: Bitmask to change 1853 * @val: Value to be written 1854 * @change: Boolean indicating if a write was done 1855 * @async: Boolean indicating asynchronously 1856 * @force: Boolean indicating use force update 1857 * 1858 * Perform a read/modify/write cycle on the register field with change, 1859 * async, force option. 1860 * 1861 * A value of zero will be returned on success, a negative errno will 1862 * be returned in error cases. 1863 */ 1864 int regmap_field_update_bits_base(struct regmap_field *field, 1865 unsigned int mask, unsigned int val, 1866 bool *change, bool async, bool force) 1867 { 1868 mask = (mask << field->shift) & field->mask; 1869 1870 return regmap_update_bits_base(field->regmap, field->reg, 1871 mask, val << field->shift, 1872 change, async, force); 1873 } 1874 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 1875 1876 /** 1877 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 1878 * register field with port ID 1879 * 1880 * @field: Register field to write to 1881 * @id: port ID 1882 * @mask: Bitmask to change 1883 * @val: Value to be written 1884 * @change: Boolean indicating if a write was done 1885 * @async: Boolean indicating asynchronously 1886 * @force: Boolean indicating use force update 1887 * 1888 * A value of zero will be returned on success, a negative errno will 1889 * be returned in error cases. 1890 */ 1891 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 1892 unsigned int mask, unsigned int val, 1893 bool *change, bool async, bool force) 1894 { 1895 if (id >= field->id_size) 1896 return -EINVAL; 1897 1898 mask = (mask << field->shift) & field->mask; 1899 1900 return regmap_update_bits_base(field->regmap, 1901 field->reg + (field->id_offset * id), 1902 mask, val << field->shift, 1903 change, async, force); 1904 } 1905 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 1906 1907 /** 1908 * regmap_bulk_write() - Write multiple registers to the device 1909 * 1910 * @map: Register map to write to 1911 * @reg: First register to be write from 1912 * @val: Block of data to be written, in native register size for device 1913 * @val_count: Number of registers to write 1914 * 1915 * This function is intended to be used for writing a large block of 1916 * data to the device either in single transfer or multiple transfer. 1917 * 1918 * A value of zero will be returned on success, a negative errno will 1919 * be returned in error cases. 1920 */ 1921 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1922 size_t val_count) 1923 { 1924 int ret = 0, i; 1925 size_t val_bytes = map->format.val_bytes; 1926 size_t total_size = val_bytes * val_count; 1927 1928 if (!IS_ALIGNED(reg, map->reg_stride)) 1929 return -EINVAL; 1930 1931 /* 1932 * Some devices don't support bulk write, for 1933 * them we have a series of single write operations in the first two if 1934 * blocks. 1935 * 1936 * The first if block is used for memory mapped io. It does not allow 1937 * val_bytes of 3 for example. 1938 * The second one is for busses that do not provide raw I/O. 1939 * The third one is used for busses which do not have these limitations 1940 * and can write arbitrary value lengths. 1941 */ 1942 if (!map->bus) { 1943 map->lock(map->lock_arg); 1944 for (i = 0; i < val_count; i++) { 1945 unsigned int ival; 1946 1947 switch (val_bytes) { 1948 case 1: 1949 ival = *(u8 *)(val + (i * val_bytes)); 1950 break; 1951 case 2: 1952 ival = *(u16 *)(val + (i * val_bytes)); 1953 break; 1954 case 4: 1955 ival = *(u32 *)(val + (i * val_bytes)); 1956 break; 1957 #ifdef CONFIG_64BIT 1958 case 8: 1959 ival = *(u64 *)(val + (i * val_bytes)); 1960 break; 1961 #endif 1962 default: 1963 ret = -EINVAL; 1964 goto out; 1965 } 1966 1967 ret = _regmap_write(map, 1968 reg + regmap_get_offset(map, i), 1969 ival); 1970 if (ret != 0) 1971 goto out; 1972 } 1973 out: 1974 map->unlock(map->lock_arg); 1975 } else if (map->bus && !map->format.parse_inplace) { 1976 const u8 *u8 = val; 1977 const u16 *u16 = val; 1978 const u32 *u32 = val; 1979 unsigned int ival; 1980 1981 for (i = 0; i < val_count; i++) { 1982 switch (map->format.val_bytes) { 1983 case 4: 1984 ival = u32[i]; 1985 break; 1986 case 2: 1987 ival = u16[i]; 1988 break; 1989 case 1: 1990 ival = u8[i]; 1991 break; 1992 default: 1993 return -EINVAL; 1994 } 1995 1996 ret = regmap_write(map, reg + (i * map->reg_stride), 1997 ival); 1998 if (ret) 1999 return ret; 2000 } 2001 } else if (map->use_single_write || 2002 (map->max_raw_write && map->max_raw_write < total_size)) { 2003 int chunk_stride = map->reg_stride; 2004 size_t chunk_size = val_bytes; 2005 size_t chunk_count = val_count; 2006 2007 if (!map->use_single_write) { 2008 chunk_size = map->max_raw_write; 2009 if (chunk_size % val_bytes) 2010 chunk_size -= chunk_size % val_bytes; 2011 chunk_count = total_size / chunk_size; 2012 chunk_stride *= chunk_size / val_bytes; 2013 } 2014 2015 map->lock(map->lock_arg); 2016 /* Write as many bytes as possible with chunk_size */ 2017 for (i = 0; i < chunk_count; i++) { 2018 ret = _regmap_raw_write(map, 2019 reg + (i * chunk_stride), 2020 val + (i * chunk_size), 2021 chunk_size); 2022 if (ret) 2023 break; 2024 } 2025 2026 /* Write remaining bytes */ 2027 if (!ret && chunk_size * i < total_size) { 2028 ret = _regmap_raw_write(map, reg + (i * chunk_stride), 2029 val + (i * chunk_size), 2030 total_size - i * chunk_size); 2031 } 2032 map->unlock(map->lock_arg); 2033 } else { 2034 void *wval; 2035 2036 if (!val_count) 2037 return -EINVAL; 2038 2039 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2040 if (!wval) { 2041 dev_err(map->dev, "Error in memory allocation\n"); 2042 return -ENOMEM; 2043 } 2044 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2045 map->format.parse_inplace(wval + i); 2046 2047 map->lock(map->lock_arg); 2048 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 2049 map->unlock(map->lock_arg); 2050 2051 kfree(wval); 2052 } 2053 return ret; 2054 } 2055 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2056 2057 /* 2058 * _regmap_raw_multi_reg_write() 2059 * 2060 * the (register,newvalue) pairs in regs have not been formatted, but 2061 * they are all in the same page and have been changed to being page 2062 * relative. The page register has been written if that was necessary. 2063 */ 2064 static int _regmap_raw_multi_reg_write(struct regmap *map, 2065 const struct reg_sequence *regs, 2066 size_t num_regs) 2067 { 2068 int ret; 2069 void *buf; 2070 int i; 2071 u8 *u8; 2072 size_t val_bytes = map->format.val_bytes; 2073 size_t reg_bytes = map->format.reg_bytes; 2074 size_t pad_bytes = map->format.pad_bytes; 2075 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2076 size_t len = pair_size * num_regs; 2077 2078 if (!len) 2079 return -EINVAL; 2080 2081 buf = kzalloc(len, GFP_KERNEL); 2082 if (!buf) 2083 return -ENOMEM; 2084 2085 /* We have to linearise by hand. */ 2086 2087 u8 = buf; 2088 2089 for (i = 0; i < num_regs; i++) { 2090 unsigned int reg = regs[i].reg; 2091 unsigned int val = regs[i].def; 2092 trace_regmap_hw_write_start(map, reg, 1); 2093 map->format.format_reg(u8, reg, map->reg_shift); 2094 u8 += reg_bytes + pad_bytes; 2095 map->format.format_val(u8, val, 0); 2096 u8 += val_bytes; 2097 } 2098 u8 = buf; 2099 *u8 |= map->write_flag_mask; 2100 2101 ret = map->bus->write(map->bus_context, buf, len); 2102 2103 kfree(buf); 2104 2105 for (i = 0; i < num_regs; i++) { 2106 int reg = regs[i].reg; 2107 trace_regmap_hw_write_done(map, reg, 1); 2108 } 2109 return ret; 2110 } 2111 2112 static unsigned int _regmap_register_page(struct regmap *map, 2113 unsigned int reg, 2114 struct regmap_range_node *range) 2115 { 2116 unsigned int win_page = (reg - range->range_min) / range->window_len; 2117 2118 return win_page; 2119 } 2120 2121 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2122 struct reg_sequence *regs, 2123 size_t num_regs) 2124 { 2125 int ret; 2126 int i, n; 2127 struct reg_sequence *base; 2128 unsigned int this_page = 0; 2129 unsigned int page_change = 0; 2130 /* 2131 * the set of registers are not neccessarily in order, but 2132 * since the order of write must be preserved this algorithm 2133 * chops the set each time the page changes. This also applies 2134 * if there is a delay required at any point in the sequence. 2135 */ 2136 base = regs; 2137 for (i = 0, n = 0; i < num_regs; i++, n++) { 2138 unsigned int reg = regs[i].reg; 2139 struct regmap_range_node *range; 2140 2141 range = _regmap_range_lookup(map, reg); 2142 if (range) { 2143 unsigned int win_page = _regmap_register_page(map, reg, 2144 range); 2145 2146 if (i == 0) 2147 this_page = win_page; 2148 if (win_page != this_page) { 2149 this_page = win_page; 2150 page_change = 1; 2151 } 2152 } 2153 2154 /* If we have both a page change and a delay make sure to 2155 * write the regs and apply the delay before we change the 2156 * page. 2157 */ 2158 2159 if (page_change || regs[i].delay_us) { 2160 2161 /* For situations where the first write requires 2162 * a delay we need to make sure we don't call 2163 * raw_multi_reg_write with n=0 2164 * This can't occur with page breaks as we 2165 * never write on the first iteration 2166 */ 2167 if (regs[i].delay_us && i == 0) 2168 n = 1; 2169 2170 ret = _regmap_raw_multi_reg_write(map, base, n); 2171 if (ret != 0) 2172 return ret; 2173 2174 if (regs[i].delay_us) 2175 udelay(regs[i].delay_us); 2176 2177 base += n; 2178 n = 0; 2179 2180 if (page_change) { 2181 ret = _regmap_select_page(map, 2182 &base[n].reg, 2183 range, 1); 2184 if (ret != 0) 2185 return ret; 2186 2187 page_change = 0; 2188 } 2189 2190 } 2191 2192 } 2193 if (n > 0) 2194 return _regmap_raw_multi_reg_write(map, base, n); 2195 return 0; 2196 } 2197 2198 static int _regmap_multi_reg_write(struct regmap *map, 2199 const struct reg_sequence *regs, 2200 size_t num_regs) 2201 { 2202 int i; 2203 int ret; 2204 2205 if (!map->can_multi_write) { 2206 for (i = 0; i < num_regs; i++) { 2207 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2208 if (ret != 0) 2209 return ret; 2210 2211 if (regs[i].delay_us) 2212 udelay(regs[i].delay_us); 2213 } 2214 return 0; 2215 } 2216 2217 if (!map->format.parse_inplace) 2218 return -EINVAL; 2219 2220 if (map->writeable_reg) 2221 for (i = 0; i < num_regs; i++) { 2222 int reg = regs[i].reg; 2223 if (!map->writeable_reg(map->dev, reg)) 2224 return -EINVAL; 2225 if (!IS_ALIGNED(reg, map->reg_stride)) 2226 return -EINVAL; 2227 } 2228 2229 if (!map->cache_bypass) { 2230 for (i = 0; i < num_regs; i++) { 2231 unsigned int val = regs[i].def; 2232 unsigned int reg = regs[i].reg; 2233 ret = regcache_write(map, reg, val); 2234 if (ret) { 2235 dev_err(map->dev, 2236 "Error in caching of register: %x ret: %d\n", 2237 reg, ret); 2238 return ret; 2239 } 2240 } 2241 if (map->cache_only) { 2242 map->cache_dirty = true; 2243 return 0; 2244 } 2245 } 2246 2247 WARN_ON(!map->bus); 2248 2249 for (i = 0; i < num_regs; i++) { 2250 unsigned int reg = regs[i].reg; 2251 struct regmap_range_node *range; 2252 2253 /* Coalesce all the writes between a page break or a delay 2254 * in a sequence 2255 */ 2256 range = _regmap_range_lookup(map, reg); 2257 if (range || regs[i].delay_us) { 2258 size_t len = sizeof(struct reg_sequence)*num_regs; 2259 struct reg_sequence *base = kmemdup(regs, len, 2260 GFP_KERNEL); 2261 if (!base) 2262 return -ENOMEM; 2263 ret = _regmap_range_multi_paged_reg_write(map, base, 2264 num_regs); 2265 kfree(base); 2266 2267 return ret; 2268 } 2269 } 2270 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2271 } 2272 2273 /** 2274 * regmap_multi_reg_write() - Write multiple registers to the device 2275 * 2276 * @map: Register map to write to 2277 * @regs: Array of structures containing register,value to be written 2278 * @num_regs: Number of registers to write 2279 * 2280 * Write multiple registers to the device where the set of register, value 2281 * pairs are supplied in any order, possibly not all in a single range. 2282 * 2283 * The 'normal' block write mode will send ultimately send data on the 2284 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2285 * addressed. However, this alternative block multi write mode will send 2286 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2287 * must of course support the mode. 2288 * 2289 * A value of zero will be returned on success, a negative errno will be 2290 * returned in error cases. 2291 */ 2292 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2293 int num_regs) 2294 { 2295 int ret; 2296 2297 map->lock(map->lock_arg); 2298 2299 ret = _regmap_multi_reg_write(map, regs, num_regs); 2300 2301 map->unlock(map->lock_arg); 2302 2303 return ret; 2304 } 2305 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2306 2307 /** 2308 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2309 * device but not the cache 2310 * 2311 * @map: Register map to write to 2312 * @regs: Array of structures containing register,value to be written 2313 * @num_regs: Number of registers to write 2314 * 2315 * Write multiple registers to the device but not the cache where the set 2316 * of register are supplied in any order. 2317 * 2318 * This function is intended to be used for writing a large block of data 2319 * atomically to the device in single transfer for those I2C client devices 2320 * that implement this alternative block write mode. 2321 * 2322 * A value of zero will be returned on success, a negative errno will 2323 * be returned in error cases. 2324 */ 2325 int regmap_multi_reg_write_bypassed(struct regmap *map, 2326 const struct reg_sequence *regs, 2327 int num_regs) 2328 { 2329 int ret; 2330 bool bypass; 2331 2332 map->lock(map->lock_arg); 2333 2334 bypass = map->cache_bypass; 2335 map->cache_bypass = true; 2336 2337 ret = _regmap_multi_reg_write(map, regs, num_regs); 2338 2339 map->cache_bypass = bypass; 2340 2341 map->unlock(map->lock_arg); 2342 2343 return ret; 2344 } 2345 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2346 2347 /** 2348 * regmap_raw_write_async() - Write raw values to one or more registers 2349 * asynchronously 2350 * 2351 * @map: Register map to write to 2352 * @reg: Initial register to write to 2353 * @val: Block of data to be written, laid out for direct transmission to the 2354 * device. Must be valid until regmap_async_complete() is called. 2355 * @val_len: Length of data pointed to by val. 2356 * 2357 * This function is intended to be used for things like firmware 2358 * download where a large block of data needs to be transferred to the 2359 * device. No formatting will be done on the data provided. 2360 * 2361 * If supported by the underlying bus the write will be scheduled 2362 * asynchronously, helping maximise I/O speed on higher speed buses 2363 * like SPI. regmap_async_complete() can be called to ensure that all 2364 * asynchrnous writes have been completed. 2365 * 2366 * A value of zero will be returned on success, a negative errno will 2367 * be returned in error cases. 2368 */ 2369 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2370 const void *val, size_t val_len) 2371 { 2372 int ret; 2373 2374 if (val_len % map->format.val_bytes) 2375 return -EINVAL; 2376 if (!IS_ALIGNED(reg, map->reg_stride)) 2377 return -EINVAL; 2378 2379 map->lock(map->lock_arg); 2380 2381 map->async = true; 2382 2383 ret = _regmap_raw_write(map, reg, val, val_len); 2384 2385 map->async = false; 2386 2387 map->unlock(map->lock_arg); 2388 2389 return ret; 2390 } 2391 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2392 2393 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2394 unsigned int val_len) 2395 { 2396 struct regmap_range_node *range; 2397 int ret; 2398 2399 WARN_ON(!map->bus); 2400 2401 if (!map->bus || !map->bus->read) 2402 return -EINVAL; 2403 2404 range = _regmap_range_lookup(map, reg); 2405 if (range) { 2406 ret = _regmap_select_page(map, ®, range, 2407 val_len / map->format.val_bytes); 2408 if (ret != 0) 2409 return ret; 2410 } 2411 2412 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2413 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2414 map->read_flag_mask); 2415 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2416 2417 ret = map->bus->read(map->bus_context, map->work_buf, 2418 map->format.reg_bytes + map->format.pad_bytes, 2419 val, val_len); 2420 2421 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2422 2423 return ret; 2424 } 2425 2426 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2427 unsigned int *val) 2428 { 2429 struct regmap *map = context; 2430 2431 return map->bus->reg_read(map->bus_context, reg, val); 2432 } 2433 2434 static int _regmap_bus_read(void *context, unsigned int reg, 2435 unsigned int *val) 2436 { 2437 int ret; 2438 struct regmap *map = context; 2439 void *work_val = map->work_buf + map->format.reg_bytes + 2440 map->format.pad_bytes; 2441 2442 if (!map->format.parse_val) 2443 return -EINVAL; 2444 2445 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); 2446 if (ret == 0) 2447 *val = map->format.parse_val(work_val); 2448 2449 return ret; 2450 } 2451 2452 static int _regmap_read(struct regmap *map, unsigned int reg, 2453 unsigned int *val) 2454 { 2455 int ret; 2456 void *context = _regmap_map_get_context(map); 2457 2458 if (!map->cache_bypass) { 2459 ret = regcache_read(map, reg, val); 2460 if (ret == 0) 2461 return 0; 2462 } 2463 2464 if (map->cache_only) 2465 return -EBUSY; 2466 2467 if (!regmap_readable(map, reg)) 2468 return -EIO; 2469 2470 ret = map->reg_read(context, reg, val); 2471 if (ret == 0) { 2472 #ifdef LOG_DEVICE 2473 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2474 dev_info(map->dev, "%x => %x\n", reg, *val); 2475 #endif 2476 2477 trace_regmap_reg_read(map, reg, *val); 2478 2479 if (!map->cache_bypass) 2480 regcache_write(map, reg, *val); 2481 } 2482 2483 return ret; 2484 } 2485 2486 /** 2487 * regmap_read() - Read a value from a single register 2488 * 2489 * @map: Register map to read from 2490 * @reg: Register to be read from 2491 * @val: Pointer to store read value 2492 * 2493 * A value of zero will be returned on success, a negative errno will 2494 * be returned in error cases. 2495 */ 2496 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2497 { 2498 int ret; 2499 2500 if (!IS_ALIGNED(reg, map->reg_stride)) 2501 return -EINVAL; 2502 2503 map->lock(map->lock_arg); 2504 2505 ret = _regmap_read(map, reg, val); 2506 2507 map->unlock(map->lock_arg); 2508 2509 return ret; 2510 } 2511 EXPORT_SYMBOL_GPL(regmap_read); 2512 2513 /** 2514 * regmap_raw_read() - Read raw data from the device 2515 * 2516 * @map: Register map to read from 2517 * @reg: First register to be read from 2518 * @val: Pointer to store read value 2519 * @val_len: Size of data to read 2520 * 2521 * A value of zero will be returned on success, a negative errno will 2522 * be returned in error cases. 2523 */ 2524 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2525 size_t val_len) 2526 { 2527 size_t val_bytes = map->format.val_bytes; 2528 size_t val_count = val_len / val_bytes; 2529 unsigned int v; 2530 int ret, i; 2531 2532 if (!map->bus) 2533 return -EINVAL; 2534 if (val_len % map->format.val_bytes) 2535 return -EINVAL; 2536 if (!IS_ALIGNED(reg, map->reg_stride)) 2537 return -EINVAL; 2538 if (val_count == 0) 2539 return -EINVAL; 2540 2541 map->lock(map->lock_arg); 2542 2543 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2544 map->cache_type == REGCACHE_NONE) { 2545 if (!map->bus->read) { 2546 ret = -ENOTSUPP; 2547 goto out; 2548 } 2549 if (map->max_raw_read && map->max_raw_read < val_len) { 2550 ret = -E2BIG; 2551 goto out; 2552 } 2553 2554 /* Physical block read if there's no cache involved */ 2555 ret = _regmap_raw_read(map, reg, val, val_len); 2556 2557 } else { 2558 /* Otherwise go word by word for the cache; should be low 2559 * cost as we expect to hit the cache. 2560 */ 2561 for (i = 0; i < val_count; i++) { 2562 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2563 &v); 2564 if (ret != 0) 2565 goto out; 2566 2567 map->format.format_val(val + (i * val_bytes), v, 0); 2568 } 2569 } 2570 2571 out: 2572 map->unlock(map->lock_arg); 2573 2574 return ret; 2575 } 2576 EXPORT_SYMBOL_GPL(regmap_raw_read); 2577 2578 /** 2579 * regmap_field_read() - Read a value to a single register field 2580 * 2581 * @field: Register field to read from 2582 * @val: Pointer to store read value 2583 * 2584 * A value of zero will be returned on success, a negative errno will 2585 * be returned in error cases. 2586 */ 2587 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2588 { 2589 int ret; 2590 unsigned int reg_val; 2591 ret = regmap_read(field->regmap, field->reg, ®_val); 2592 if (ret != 0) 2593 return ret; 2594 2595 reg_val &= field->mask; 2596 reg_val >>= field->shift; 2597 *val = reg_val; 2598 2599 return ret; 2600 } 2601 EXPORT_SYMBOL_GPL(regmap_field_read); 2602 2603 /** 2604 * regmap_fields_read() - Read a value to a single register field with port ID 2605 * 2606 * @field: Register field to read from 2607 * @id: port ID 2608 * @val: Pointer to store read value 2609 * 2610 * A value of zero will be returned on success, a negative errno will 2611 * be returned in error cases. 2612 */ 2613 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2614 unsigned int *val) 2615 { 2616 int ret; 2617 unsigned int reg_val; 2618 2619 if (id >= field->id_size) 2620 return -EINVAL; 2621 2622 ret = regmap_read(field->regmap, 2623 field->reg + (field->id_offset * id), 2624 ®_val); 2625 if (ret != 0) 2626 return ret; 2627 2628 reg_val &= field->mask; 2629 reg_val >>= field->shift; 2630 *val = reg_val; 2631 2632 return ret; 2633 } 2634 EXPORT_SYMBOL_GPL(regmap_fields_read); 2635 2636 /** 2637 * regmap_bulk_read() - Read multiple registers from the device 2638 * 2639 * @map: Register map to read from 2640 * @reg: First register to be read from 2641 * @val: Pointer to store read value, in native register size for device 2642 * @val_count: Number of registers to read 2643 * 2644 * A value of zero will be returned on success, a negative errno will 2645 * be returned in error cases. 2646 */ 2647 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2648 size_t val_count) 2649 { 2650 int ret, i; 2651 size_t val_bytes = map->format.val_bytes; 2652 bool vol = regmap_volatile_range(map, reg, val_count); 2653 2654 if (!IS_ALIGNED(reg, map->reg_stride)) 2655 return -EINVAL; 2656 2657 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2658 /* 2659 * Some devices does not support bulk read, for 2660 * them we have a series of single read operations. 2661 */ 2662 size_t total_size = val_bytes * val_count; 2663 2664 if (!map->use_single_read && 2665 (!map->max_raw_read || map->max_raw_read > total_size)) { 2666 ret = regmap_raw_read(map, reg, val, 2667 val_bytes * val_count); 2668 if (ret != 0) 2669 return ret; 2670 } else { 2671 /* 2672 * Some devices do not support bulk read or do not 2673 * support large bulk reads, for them we have a series 2674 * of read operations. 2675 */ 2676 int chunk_stride = map->reg_stride; 2677 size_t chunk_size = val_bytes; 2678 size_t chunk_count = val_count; 2679 2680 if (!map->use_single_read) { 2681 chunk_size = map->max_raw_read; 2682 if (chunk_size % val_bytes) 2683 chunk_size -= chunk_size % val_bytes; 2684 chunk_count = total_size / chunk_size; 2685 chunk_stride *= chunk_size / val_bytes; 2686 } 2687 2688 /* Read bytes that fit into a multiple of chunk_size */ 2689 for (i = 0; i < chunk_count; i++) { 2690 ret = regmap_raw_read(map, 2691 reg + (i * chunk_stride), 2692 val + (i * chunk_size), 2693 chunk_size); 2694 if (ret != 0) 2695 return ret; 2696 } 2697 2698 /* Read remaining bytes */ 2699 if (chunk_size * i < total_size) { 2700 ret = regmap_raw_read(map, 2701 reg + (i * chunk_stride), 2702 val + (i * chunk_size), 2703 total_size - i * chunk_size); 2704 if (ret != 0) 2705 return ret; 2706 } 2707 } 2708 2709 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2710 map->format.parse_inplace(val + i); 2711 } else { 2712 for (i = 0; i < val_count; i++) { 2713 unsigned int ival; 2714 ret = regmap_read(map, reg + regmap_get_offset(map, i), 2715 &ival); 2716 if (ret != 0) 2717 return ret; 2718 2719 if (map->format.format_val) { 2720 map->format.format_val(val + (i * val_bytes), ival, 0); 2721 } else { 2722 /* Devices providing read and write 2723 * operations can use the bulk I/O 2724 * functions if they define a val_bytes, 2725 * we assume that the values are native 2726 * endian. 2727 */ 2728 #ifdef CONFIG_64BIT 2729 u64 *u64 = val; 2730 #endif 2731 u32 *u32 = val; 2732 u16 *u16 = val; 2733 u8 *u8 = val; 2734 2735 switch (map->format.val_bytes) { 2736 #ifdef CONFIG_64BIT 2737 case 8: 2738 u64[i] = ival; 2739 break; 2740 #endif 2741 case 4: 2742 u32[i] = ival; 2743 break; 2744 case 2: 2745 u16[i] = ival; 2746 break; 2747 case 1: 2748 u8[i] = ival; 2749 break; 2750 default: 2751 return -EINVAL; 2752 } 2753 } 2754 } 2755 } 2756 2757 return 0; 2758 } 2759 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2760 2761 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2762 unsigned int mask, unsigned int val, 2763 bool *change, bool force_write) 2764 { 2765 int ret; 2766 unsigned int tmp, orig; 2767 2768 if (change) 2769 *change = false; 2770 2771 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2772 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2773 if (ret == 0 && change) 2774 *change = true; 2775 } else { 2776 ret = _regmap_read(map, reg, &orig); 2777 if (ret != 0) 2778 return ret; 2779 2780 tmp = orig & ~mask; 2781 tmp |= val & mask; 2782 2783 if (force_write || (tmp != orig)) { 2784 ret = _regmap_write(map, reg, tmp); 2785 if (ret == 0 && change) 2786 *change = true; 2787 } 2788 } 2789 2790 return ret; 2791 } 2792 2793 /** 2794 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2795 * 2796 * @map: Register map to update 2797 * @reg: Register to update 2798 * @mask: Bitmask to change 2799 * @val: New value for bitmask 2800 * @change: Boolean indicating if a write was done 2801 * @async: Boolean indicating asynchronously 2802 * @force: Boolean indicating use force update 2803 * 2804 * Perform a read/modify/write cycle on a register map with change, async, force 2805 * options. 2806 * 2807 * If async is true: 2808 * 2809 * With most buses the read must be done synchronously so this is most useful 2810 * for devices with a cache which do not need to interact with the hardware to 2811 * determine the current register value. 2812 * 2813 * Returns zero for success, a negative number on error. 2814 */ 2815 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2816 unsigned int mask, unsigned int val, 2817 bool *change, bool async, bool force) 2818 { 2819 int ret; 2820 2821 map->lock(map->lock_arg); 2822 2823 map->async = async; 2824 2825 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2826 2827 map->async = false; 2828 2829 map->unlock(map->lock_arg); 2830 2831 return ret; 2832 } 2833 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2834 2835 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2836 { 2837 struct regmap *map = async->map; 2838 bool wake; 2839 2840 trace_regmap_async_io_complete(map); 2841 2842 spin_lock(&map->async_lock); 2843 list_move(&async->list, &map->async_free); 2844 wake = list_empty(&map->async_list); 2845 2846 if (ret != 0) 2847 map->async_ret = ret; 2848 2849 spin_unlock(&map->async_lock); 2850 2851 if (wake) 2852 wake_up(&map->async_waitq); 2853 } 2854 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2855 2856 static int regmap_async_is_done(struct regmap *map) 2857 { 2858 unsigned long flags; 2859 int ret; 2860 2861 spin_lock_irqsave(&map->async_lock, flags); 2862 ret = list_empty(&map->async_list); 2863 spin_unlock_irqrestore(&map->async_lock, flags); 2864 2865 return ret; 2866 } 2867 2868 /** 2869 * regmap_async_complete - Ensure all asynchronous I/O has completed. 2870 * 2871 * @map: Map to operate on. 2872 * 2873 * Blocks until any pending asynchronous I/O has completed. Returns 2874 * an error code for any failed I/O operations. 2875 */ 2876 int regmap_async_complete(struct regmap *map) 2877 { 2878 unsigned long flags; 2879 int ret; 2880 2881 /* Nothing to do with no async support */ 2882 if (!map->bus || !map->bus->async_write) 2883 return 0; 2884 2885 trace_regmap_async_complete_start(map); 2886 2887 wait_event(map->async_waitq, regmap_async_is_done(map)); 2888 2889 spin_lock_irqsave(&map->async_lock, flags); 2890 ret = map->async_ret; 2891 map->async_ret = 0; 2892 spin_unlock_irqrestore(&map->async_lock, flags); 2893 2894 trace_regmap_async_complete_done(map); 2895 2896 return ret; 2897 } 2898 EXPORT_SYMBOL_GPL(regmap_async_complete); 2899 2900 /** 2901 * regmap_register_patch - Register and apply register updates to be applied 2902 * on device initialistion 2903 * 2904 * @map: Register map to apply updates to. 2905 * @regs: Values to update. 2906 * @num_regs: Number of entries in regs. 2907 * 2908 * Register a set of register updates to be applied to the device 2909 * whenever the device registers are synchronised with the cache and 2910 * apply them immediately. Typically this is used to apply 2911 * corrections to be applied to the device defaults on startup, such 2912 * as the updates some vendors provide to undocumented registers. 2913 * 2914 * The caller must ensure that this function cannot be called 2915 * concurrently with either itself or regcache_sync(). 2916 */ 2917 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 2918 int num_regs) 2919 { 2920 struct reg_sequence *p; 2921 int ret; 2922 bool bypass; 2923 2924 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2925 num_regs)) 2926 return 0; 2927 2928 p = krealloc(map->patch, 2929 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 2930 GFP_KERNEL); 2931 if (p) { 2932 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2933 map->patch = p; 2934 map->patch_regs += num_regs; 2935 } else { 2936 return -ENOMEM; 2937 } 2938 2939 map->lock(map->lock_arg); 2940 2941 bypass = map->cache_bypass; 2942 2943 map->cache_bypass = true; 2944 map->async = true; 2945 2946 ret = _regmap_multi_reg_write(map, regs, num_regs); 2947 2948 map->async = false; 2949 map->cache_bypass = bypass; 2950 2951 map->unlock(map->lock_arg); 2952 2953 regmap_async_complete(map); 2954 2955 return ret; 2956 } 2957 EXPORT_SYMBOL_GPL(regmap_register_patch); 2958 2959 /** 2960 * regmap_get_val_bytes() - Report the size of a register value 2961 * 2962 * @map: Register map to operate on. 2963 * 2964 * Report the size of a register value, mainly intended to for use by 2965 * generic infrastructure built on top of regmap. 2966 */ 2967 int regmap_get_val_bytes(struct regmap *map) 2968 { 2969 if (map->format.format_write) 2970 return -EINVAL; 2971 2972 return map->format.val_bytes; 2973 } 2974 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2975 2976 /** 2977 * regmap_get_max_register() - Report the max register value 2978 * 2979 * @map: Register map to operate on. 2980 * 2981 * Report the max register value, mainly intended to for use by 2982 * generic infrastructure built on top of regmap. 2983 */ 2984 int regmap_get_max_register(struct regmap *map) 2985 { 2986 return map->max_register ? map->max_register : -EINVAL; 2987 } 2988 EXPORT_SYMBOL_GPL(regmap_get_max_register); 2989 2990 /** 2991 * regmap_get_reg_stride() - Report the register address stride 2992 * 2993 * @map: Register map to operate on. 2994 * 2995 * Report the register address stride, mainly intended to for use by 2996 * generic infrastructure built on top of regmap. 2997 */ 2998 int regmap_get_reg_stride(struct regmap *map) 2999 { 3000 return map->reg_stride; 3001 } 3002 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3003 3004 int regmap_parse_val(struct regmap *map, const void *buf, 3005 unsigned int *val) 3006 { 3007 if (!map->format.parse_val) 3008 return -EINVAL; 3009 3010 *val = map->format.parse_val(buf); 3011 3012 return 0; 3013 } 3014 EXPORT_SYMBOL_GPL(regmap_parse_val); 3015 3016 static int __init regmap_initcall(void) 3017 { 3018 regmap_debugfs_initcall(); 3019 3020 return 0; 3021 } 3022 postcore_initcall(regmap_initcall); 3023