1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/of.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 #include <linux/log2.h> 23 #include <linux/hwspinlock.h> 24 25 #define CREATE_TRACE_POINTS 26 #include "trace.h" 27 28 #include "internal.h" 29 30 /* 31 * Sometimes for failures during very early init the trace 32 * infrastructure isn't available early enough to be used. For this 33 * sort of problem defining LOG_DEVICE will add printks for basic 34 * register I/O on a specific device. 35 */ 36 #undef LOG_DEVICE 37 38 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 39 unsigned int mask, unsigned int val, 40 bool *change, bool force_write); 41 42 static int _regmap_bus_reg_read(void *context, unsigned int reg, 43 unsigned int *val); 44 static int _regmap_bus_read(void *context, unsigned int reg, 45 unsigned int *val); 46 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 47 unsigned int val); 48 static int _regmap_bus_reg_write(void *context, unsigned int reg, 49 unsigned int val); 50 static int _regmap_bus_raw_write(void *context, unsigned int reg, 51 unsigned int val); 52 53 bool regmap_reg_in_ranges(unsigned int reg, 54 const struct regmap_range *ranges, 55 unsigned int nranges) 56 { 57 const struct regmap_range *r; 58 int i; 59 60 for (i = 0, r = ranges; i < nranges; i++, r++) 61 if (regmap_reg_in_range(reg, r)) 62 return true; 63 return false; 64 } 65 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 66 67 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 68 const struct regmap_access_table *table) 69 { 70 /* Check "no ranges" first */ 71 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 72 return false; 73 74 /* In case zero "yes ranges" are supplied, any reg is OK */ 75 if (!table->n_yes_ranges) 76 return true; 77 78 return regmap_reg_in_ranges(reg, table->yes_ranges, 79 table->n_yes_ranges); 80 } 81 EXPORT_SYMBOL_GPL(regmap_check_range_table); 82 83 bool regmap_writeable(struct regmap *map, unsigned int reg) 84 { 85 if (map->max_register && reg > map->max_register) 86 return false; 87 88 if (map->writeable_reg) 89 return map->writeable_reg(map->dev, reg); 90 91 if (map->wr_table) 92 return regmap_check_range_table(map, reg, map->wr_table); 93 94 return true; 95 } 96 97 bool regmap_cached(struct regmap *map, unsigned int reg) 98 { 99 int ret; 100 unsigned int val; 101 102 if (map->cache_type == REGCACHE_NONE) 103 return false; 104 105 if (!map->cache_ops) 106 return false; 107 108 if (map->max_register && reg > map->max_register) 109 return false; 110 111 map->lock(map->lock_arg); 112 ret = regcache_read(map, reg, &val); 113 map->unlock(map->lock_arg); 114 if (ret) 115 return false; 116 117 return true; 118 } 119 120 bool regmap_readable(struct regmap *map, unsigned int reg) 121 { 122 if (!map->reg_read) 123 return false; 124 125 if (map->max_register && reg > map->max_register) 126 return false; 127 128 if (map->format.format_write) 129 return false; 130 131 if (map->readable_reg) 132 return map->readable_reg(map->dev, reg); 133 134 if (map->rd_table) 135 return regmap_check_range_table(map, reg, map->rd_table); 136 137 return true; 138 } 139 140 bool regmap_volatile(struct regmap *map, unsigned int reg) 141 { 142 if (!map->format.format_write && !regmap_readable(map, reg)) 143 return false; 144 145 if (map->volatile_reg) 146 return map->volatile_reg(map->dev, reg); 147 148 if (map->volatile_table) 149 return regmap_check_range_table(map, reg, map->volatile_table); 150 151 if (map->cache_ops) 152 return false; 153 else 154 return true; 155 } 156 157 bool regmap_precious(struct regmap *map, unsigned int reg) 158 { 159 if (!regmap_readable(map, reg)) 160 return false; 161 162 if (map->precious_reg) 163 return map->precious_reg(map->dev, reg); 164 165 if (map->precious_table) 166 return regmap_check_range_table(map, reg, map->precious_table); 167 168 return false; 169 } 170 171 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 172 { 173 if (map->readable_noinc_reg) 174 return map->readable_noinc_reg(map->dev, reg); 175 176 if (map->rd_noinc_table) 177 return regmap_check_range_table(map, reg, map->rd_noinc_table); 178 179 return true; 180 } 181 182 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 183 size_t num) 184 { 185 unsigned int i; 186 187 for (i = 0; i < num; i++) 188 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 189 return false; 190 191 return true; 192 } 193 194 static void regmap_format_2_6_write(struct regmap *map, 195 unsigned int reg, unsigned int val) 196 { 197 u8 *out = map->work_buf; 198 199 *out = (reg << 6) | val; 200 } 201 202 static void regmap_format_4_12_write(struct regmap *map, 203 unsigned int reg, unsigned int val) 204 { 205 __be16 *out = map->work_buf; 206 *out = cpu_to_be16((reg << 12) | val); 207 } 208 209 static void regmap_format_7_9_write(struct regmap *map, 210 unsigned int reg, unsigned int val) 211 { 212 __be16 *out = map->work_buf; 213 *out = cpu_to_be16((reg << 9) | val); 214 } 215 216 static void regmap_format_10_14_write(struct regmap *map, 217 unsigned int reg, unsigned int val) 218 { 219 u8 *out = map->work_buf; 220 221 out[2] = val; 222 out[1] = (val >> 8) | (reg << 6); 223 out[0] = reg >> 2; 224 } 225 226 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 227 { 228 u8 *b = buf; 229 230 b[0] = val << shift; 231 } 232 233 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 234 { 235 __be16 *b = buf; 236 237 b[0] = cpu_to_be16(val << shift); 238 } 239 240 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 241 { 242 __le16 *b = buf; 243 244 b[0] = cpu_to_le16(val << shift); 245 } 246 247 static void regmap_format_16_native(void *buf, unsigned int val, 248 unsigned int shift) 249 { 250 *(u16 *)buf = val << shift; 251 } 252 253 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 254 { 255 u8 *b = buf; 256 257 val <<= shift; 258 259 b[0] = val >> 16; 260 b[1] = val >> 8; 261 b[2] = val; 262 } 263 264 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 265 { 266 __be32 *b = buf; 267 268 b[0] = cpu_to_be32(val << shift); 269 } 270 271 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 272 { 273 __le32 *b = buf; 274 275 b[0] = cpu_to_le32(val << shift); 276 } 277 278 static void regmap_format_32_native(void *buf, unsigned int val, 279 unsigned int shift) 280 { 281 *(u32 *)buf = val << shift; 282 } 283 284 #ifdef CONFIG_64BIT 285 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 286 { 287 __be64 *b = buf; 288 289 b[0] = cpu_to_be64((u64)val << shift); 290 } 291 292 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 293 { 294 __le64 *b = buf; 295 296 b[0] = cpu_to_le64((u64)val << shift); 297 } 298 299 static void regmap_format_64_native(void *buf, unsigned int val, 300 unsigned int shift) 301 { 302 *(u64 *)buf = (u64)val << shift; 303 } 304 #endif 305 306 static void regmap_parse_inplace_noop(void *buf) 307 { 308 } 309 310 static unsigned int regmap_parse_8(const void *buf) 311 { 312 const u8 *b = buf; 313 314 return b[0]; 315 } 316 317 static unsigned int regmap_parse_16_be(const void *buf) 318 { 319 const __be16 *b = buf; 320 321 return be16_to_cpu(b[0]); 322 } 323 324 static unsigned int regmap_parse_16_le(const void *buf) 325 { 326 const __le16 *b = buf; 327 328 return le16_to_cpu(b[0]); 329 } 330 331 static void regmap_parse_16_be_inplace(void *buf) 332 { 333 __be16 *b = buf; 334 335 b[0] = be16_to_cpu(b[0]); 336 } 337 338 static void regmap_parse_16_le_inplace(void *buf) 339 { 340 __le16 *b = buf; 341 342 b[0] = le16_to_cpu(b[0]); 343 } 344 345 static unsigned int regmap_parse_16_native(const void *buf) 346 { 347 return *(u16 *)buf; 348 } 349 350 static unsigned int regmap_parse_24(const void *buf) 351 { 352 const u8 *b = buf; 353 unsigned int ret = b[2]; 354 ret |= ((unsigned int)b[1]) << 8; 355 ret |= ((unsigned int)b[0]) << 16; 356 357 return ret; 358 } 359 360 static unsigned int regmap_parse_32_be(const void *buf) 361 { 362 const __be32 *b = buf; 363 364 return be32_to_cpu(b[0]); 365 } 366 367 static unsigned int regmap_parse_32_le(const void *buf) 368 { 369 const __le32 *b = buf; 370 371 return le32_to_cpu(b[0]); 372 } 373 374 static void regmap_parse_32_be_inplace(void *buf) 375 { 376 __be32 *b = buf; 377 378 b[0] = be32_to_cpu(b[0]); 379 } 380 381 static void regmap_parse_32_le_inplace(void *buf) 382 { 383 __le32 *b = buf; 384 385 b[0] = le32_to_cpu(b[0]); 386 } 387 388 static unsigned int regmap_parse_32_native(const void *buf) 389 { 390 return *(u32 *)buf; 391 } 392 393 #ifdef CONFIG_64BIT 394 static unsigned int regmap_parse_64_be(const void *buf) 395 { 396 const __be64 *b = buf; 397 398 return be64_to_cpu(b[0]); 399 } 400 401 static unsigned int regmap_parse_64_le(const void *buf) 402 { 403 const __le64 *b = buf; 404 405 return le64_to_cpu(b[0]); 406 } 407 408 static void regmap_parse_64_be_inplace(void *buf) 409 { 410 __be64 *b = buf; 411 412 b[0] = be64_to_cpu(b[0]); 413 } 414 415 static void regmap_parse_64_le_inplace(void *buf) 416 { 417 __le64 *b = buf; 418 419 b[0] = le64_to_cpu(b[0]); 420 } 421 422 static unsigned int regmap_parse_64_native(const void *buf) 423 { 424 return *(u64 *)buf; 425 } 426 #endif 427 428 static void regmap_lock_hwlock(void *__map) 429 { 430 struct regmap *map = __map; 431 432 hwspin_lock_timeout(map->hwlock, UINT_MAX); 433 } 434 435 static void regmap_lock_hwlock_irq(void *__map) 436 { 437 struct regmap *map = __map; 438 439 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 440 } 441 442 static void regmap_lock_hwlock_irqsave(void *__map) 443 { 444 struct regmap *map = __map; 445 446 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 447 &map->spinlock_flags); 448 } 449 450 static void regmap_unlock_hwlock(void *__map) 451 { 452 struct regmap *map = __map; 453 454 hwspin_unlock(map->hwlock); 455 } 456 457 static void regmap_unlock_hwlock_irq(void *__map) 458 { 459 struct regmap *map = __map; 460 461 hwspin_unlock_irq(map->hwlock); 462 } 463 464 static void regmap_unlock_hwlock_irqrestore(void *__map) 465 { 466 struct regmap *map = __map; 467 468 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 469 } 470 471 static void regmap_lock_unlock_none(void *__map) 472 { 473 474 } 475 476 static void regmap_lock_mutex(void *__map) 477 { 478 struct regmap *map = __map; 479 mutex_lock(&map->mutex); 480 } 481 482 static void regmap_unlock_mutex(void *__map) 483 { 484 struct regmap *map = __map; 485 mutex_unlock(&map->mutex); 486 } 487 488 static void regmap_lock_spinlock(void *__map) 489 __acquires(&map->spinlock) 490 { 491 struct regmap *map = __map; 492 unsigned long flags; 493 494 spin_lock_irqsave(&map->spinlock, flags); 495 map->spinlock_flags = flags; 496 } 497 498 static void regmap_unlock_spinlock(void *__map) 499 __releases(&map->spinlock) 500 { 501 struct regmap *map = __map; 502 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 503 } 504 505 static void dev_get_regmap_release(struct device *dev, void *res) 506 { 507 /* 508 * We don't actually have anything to do here; the goal here 509 * is not to manage the regmap but to provide a simple way to 510 * get the regmap back given a struct device. 511 */ 512 } 513 514 static bool _regmap_range_add(struct regmap *map, 515 struct regmap_range_node *data) 516 { 517 struct rb_root *root = &map->range_tree; 518 struct rb_node **new = &(root->rb_node), *parent = NULL; 519 520 while (*new) { 521 struct regmap_range_node *this = 522 rb_entry(*new, struct regmap_range_node, node); 523 524 parent = *new; 525 if (data->range_max < this->range_min) 526 new = &((*new)->rb_left); 527 else if (data->range_min > this->range_max) 528 new = &((*new)->rb_right); 529 else 530 return false; 531 } 532 533 rb_link_node(&data->node, parent, new); 534 rb_insert_color(&data->node, root); 535 536 return true; 537 } 538 539 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 540 unsigned int reg) 541 { 542 struct rb_node *node = map->range_tree.rb_node; 543 544 while (node) { 545 struct regmap_range_node *this = 546 rb_entry(node, struct regmap_range_node, node); 547 548 if (reg < this->range_min) 549 node = node->rb_left; 550 else if (reg > this->range_max) 551 node = node->rb_right; 552 else 553 return this; 554 } 555 556 return NULL; 557 } 558 559 static void regmap_range_exit(struct regmap *map) 560 { 561 struct rb_node *next; 562 struct regmap_range_node *range_node; 563 564 next = rb_first(&map->range_tree); 565 while (next) { 566 range_node = rb_entry(next, struct regmap_range_node, node); 567 next = rb_next(&range_node->node); 568 rb_erase(&range_node->node, &map->range_tree); 569 kfree(range_node); 570 } 571 572 kfree(map->selector_work_buf); 573 } 574 575 int regmap_attach_dev(struct device *dev, struct regmap *map, 576 const struct regmap_config *config) 577 { 578 struct regmap **m; 579 580 map->dev = dev; 581 582 regmap_debugfs_init(map, config->name); 583 584 /* Add a devres resource for dev_get_regmap() */ 585 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 586 if (!m) { 587 regmap_debugfs_exit(map); 588 return -ENOMEM; 589 } 590 *m = map; 591 devres_add(dev, m); 592 593 return 0; 594 } 595 EXPORT_SYMBOL_GPL(regmap_attach_dev); 596 597 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 598 const struct regmap_config *config) 599 { 600 enum regmap_endian endian; 601 602 /* Retrieve the endianness specification from the regmap config */ 603 endian = config->reg_format_endian; 604 605 /* If the regmap config specified a non-default value, use that */ 606 if (endian != REGMAP_ENDIAN_DEFAULT) 607 return endian; 608 609 /* Retrieve the endianness specification from the bus config */ 610 if (bus && bus->reg_format_endian_default) 611 endian = bus->reg_format_endian_default; 612 613 /* If the bus specified a non-default value, use that */ 614 if (endian != REGMAP_ENDIAN_DEFAULT) 615 return endian; 616 617 /* Use this if no other value was found */ 618 return REGMAP_ENDIAN_BIG; 619 } 620 621 enum regmap_endian regmap_get_val_endian(struct device *dev, 622 const struct regmap_bus *bus, 623 const struct regmap_config *config) 624 { 625 struct device_node *np; 626 enum regmap_endian endian; 627 628 /* Retrieve the endianness specification from the regmap config */ 629 endian = config->val_format_endian; 630 631 /* If the regmap config specified a non-default value, use that */ 632 if (endian != REGMAP_ENDIAN_DEFAULT) 633 return endian; 634 635 /* If the dev and dev->of_node exist try to get endianness from DT */ 636 if (dev && dev->of_node) { 637 np = dev->of_node; 638 639 /* Parse the device's DT node for an endianness specification */ 640 if (of_property_read_bool(np, "big-endian")) 641 endian = REGMAP_ENDIAN_BIG; 642 else if (of_property_read_bool(np, "little-endian")) 643 endian = REGMAP_ENDIAN_LITTLE; 644 else if (of_property_read_bool(np, "native-endian")) 645 endian = REGMAP_ENDIAN_NATIVE; 646 647 /* If the endianness was specified in DT, use that */ 648 if (endian != REGMAP_ENDIAN_DEFAULT) 649 return endian; 650 } 651 652 /* Retrieve the endianness specification from the bus config */ 653 if (bus && bus->val_format_endian_default) 654 endian = bus->val_format_endian_default; 655 656 /* If the bus specified a non-default value, use that */ 657 if (endian != REGMAP_ENDIAN_DEFAULT) 658 return endian; 659 660 /* Use this if no other value was found */ 661 return REGMAP_ENDIAN_BIG; 662 } 663 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 664 665 struct regmap *__regmap_init(struct device *dev, 666 const struct regmap_bus *bus, 667 void *bus_context, 668 const struct regmap_config *config, 669 struct lock_class_key *lock_key, 670 const char *lock_name) 671 { 672 struct regmap *map; 673 int ret = -EINVAL; 674 enum regmap_endian reg_endian, val_endian; 675 int i, j; 676 677 if (!config) 678 goto err; 679 680 map = kzalloc(sizeof(*map), GFP_KERNEL); 681 if (map == NULL) { 682 ret = -ENOMEM; 683 goto err; 684 } 685 686 if (config->name) { 687 map->name = kstrdup_const(config->name, GFP_KERNEL); 688 if (!map->name) { 689 ret = -ENOMEM; 690 goto err_map; 691 } 692 } 693 694 if (config->disable_locking) { 695 map->lock = map->unlock = regmap_lock_unlock_none; 696 regmap_debugfs_disable(map); 697 } else if (config->lock && config->unlock) { 698 map->lock = config->lock; 699 map->unlock = config->unlock; 700 map->lock_arg = config->lock_arg; 701 } else if (config->use_hwlock) { 702 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 703 if (!map->hwlock) { 704 ret = -ENXIO; 705 goto err_name; 706 } 707 708 switch (config->hwlock_mode) { 709 case HWLOCK_IRQSTATE: 710 map->lock = regmap_lock_hwlock_irqsave; 711 map->unlock = regmap_unlock_hwlock_irqrestore; 712 break; 713 case HWLOCK_IRQ: 714 map->lock = regmap_lock_hwlock_irq; 715 map->unlock = regmap_unlock_hwlock_irq; 716 break; 717 default: 718 map->lock = regmap_lock_hwlock; 719 map->unlock = regmap_unlock_hwlock; 720 break; 721 } 722 723 map->lock_arg = map; 724 } else { 725 if ((bus && bus->fast_io) || 726 config->fast_io) { 727 spin_lock_init(&map->spinlock); 728 map->lock = regmap_lock_spinlock; 729 map->unlock = regmap_unlock_spinlock; 730 lockdep_set_class_and_name(&map->spinlock, 731 lock_key, lock_name); 732 } else { 733 mutex_init(&map->mutex); 734 map->lock = regmap_lock_mutex; 735 map->unlock = regmap_unlock_mutex; 736 lockdep_set_class_and_name(&map->mutex, 737 lock_key, lock_name); 738 } 739 map->lock_arg = map; 740 } 741 742 /* 743 * When we write in fast-paths with regmap_bulk_write() don't allocate 744 * scratch buffers with sleeping allocations. 745 */ 746 if ((bus && bus->fast_io) || config->fast_io) 747 map->alloc_flags = GFP_ATOMIC; 748 else 749 map->alloc_flags = GFP_KERNEL; 750 751 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 752 map->format.pad_bytes = config->pad_bits / 8; 753 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 754 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 755 config->val_bits + config->pad_bits, 8); 756 map->reg_shift = config->pad_bits % 8; 757 if (config->reg_stride) 758 map->reg_stride = config->reg_stride; 759 else 760 map->reg_stride = 1; 761 if (is_power_of_2(map->reg_stride)) 762 map->reg_stride_order = ilog2(map->reg_stride); 763 else 764 map->reg_stride_order = -1; 765 map->use_single_read = config->use_single_rw || !bus || !bus->read; 766 map->use_single_write = config->use_single_rw || !bus || !bus->write; 767 map->can_multi_write = config->can_multi_write && bus && bus->write; 768 if (bus) { 769 map->max_raw_read = bus->max_raw_read; 770 map->max_raw_write = bus->max_raw_write; 771 } 772 map->dev = dev; 773 map->bus = bus; 774 map->bus_context = bus_context; 775 map->max_register = config->max_register; 776 map->wr_table = config->wr_table; 777 map->rd_table = config->rd_table; 778 map->volatile_table = config->volatile_table; 779 map->precious_table = config->precious_table; 780 map->rd_noinc_table = config->rd_noinc_table; 781 map->writeable_reg = config->writeable_reg; 782 map->readable_reg = config->readable_reg; 783 map->volatile_reg = config->volatile_reg; 784 map->precious_reg = config->precious_reg; 785 map->readable_noinc_reg = config->readable_noinc_reg; 786 map->cache_type = config->cache_type; 787 788 spin_lock_init(&map->async_lock); 789 INIT_LIST_HEAD(&map->async_list); 790 INIT_LIST_HEAD(&map->async_free); 791 init_waitqueue_head(&map->async_waitq); 792 793 if (config->read_flag_mask || 794 config->write_flag_mask || 795 config->zero_flag_mask) { 796 map->read_flag_mask = config->read_flag_mask; 797 map->write_flag_mask = config->write_flag_mask; 798 } else if (bus) { 799 map->read_flag_mask = bus->read_flag_mask; 800 } 801 802 if (!bus) { 803 map->reg_read = config->reg_read; 804 map->reg_write = config->reg_write; 805 806 map->defer_caching = false; 807 goto skip_format_initialization; 808 } else if (!bus->read || !bus->write) { 809 map->reg_read = _regmap_bus_reg_read; 810 map->reg_write = _regmap_bus_reg_write; 811 812 map->defer_caching = false; 813 goto skip_format_initialization; 814 } else { 815 map->reg_read = _regmap_bus_read; 816 map->reg_update_bits = bus->reg_update_bits; 817 } 818 819 reg_endian = regmap_get_reg_endian(bus, config); 820 val_endian = regmap_get_val_endian(dev, bus, config); 821 822 switch (config->reg_bits + map->reg_shift) { 823 case 2: 824 switch (config->val_bits) { 825 case 6: 826 map->format.format_write = regmap_format_2_6_write; 827 break; 828 default: 829 goto err_hwlock; 830 } 831 break; 832 833 case 4: 834 switch (config->val_bits) { 835 case 12: 836 map->format.format_write = regmap_format_4_12_write; 837 break; 838 default: 839 goto err_hwlock; 840 } 841 break; 842 843 case 7: 844 switch (config->val_bits) { 845 case 9: 846 map->format.format_write = regmap_format_7_9_write; 847 break; 848 default: 849 goto err_hwlock; 850 } 851 break; 852 853 case 10: 854 switch (config->val_bits) { 855 case 14: 856 map->format.format_write = regmap_format_10_14_write; 857 break; 858 default: 859 goto err_hwlock; 860 } 861 break; 862 863 case 8: 864 map->format.format_reg = regmap_format_8; 865 break; 866 867 case 16: 868 switch (reg_endian) { 869 case REGMAP_ENDIAN_BIG: 870 map->format.format_reg = regmap_format_16_be; 871 break; 872 case REGMAP_ENDIAN_LITTLE: 873 map->format.format_reg = regmap_format_16_le; 874 break; 875 case REGMAP_ENDIAN_NATIVE: 876 map->format.format_reg = regmap_format_16_native; 877 break; 878 default: 879 goto err_hwlock; 880 } 881 break; 882 883 case 24: 884 if (reg_endian != REGMAP_ENDIAN_BIG) 885 goto err_hwlock; 886 map->format.format_reg = regmap_format_24; 887 break; 888 889 case 32: 890 switch (reg_endian) { 891 case REGMAP_ENDIAN_BIG: 892 map->format.format_reg = regmap_format_32_be; 893 break; 894 case REGMAP_ENDIAN_LITTLE: 895 map->format.format_reg = regmap_format_32_le; 896 break; 897 case REGMAP_ENDIAN_NATIVE: 898 map->format.format_reg = regmap_format_32_native; 899 break; 900 default: 901 goto err_hwlock; 902 } 903 break; 904 905 #ifdef CONFIG_64BIT 906 case 64: 907 switch (reg_endian) { 908 case REGMAP_ENDIAN_BIG: 909 map->format.format_reg = regmap_format_64_be; 910 break; 911 case REGMAP_ENDIAN_LITTLE: 912 map->format.format_reg = regmap_format_64_le; 913 break; 914 case REGMAP_ENDIAN_NATIVE: 915 map->format.format_reg = regmap_format_64_native; 916 break; 917 default: 918 goto err_hwlock; 919 } 920 break; 921 #endif 922 923 default: 924 goto err_hwlock; 925 } 926 927 if (val_endian == REGMAP_ENDIAN_NATIVE) 928 map->format.parse_inplace = regmap_parse_inplace_noop; 929 930 switch (config->val_bits) { 931 case 8: 932 map->format.format_val = regmap_format_8; 933 map->format.parse_val = regmap_parse_8; 934 map->format.parse_inplace = regmap_parse_inplace_noop; 935 break; 936 case 16: 937 switch (val_endian) { 938 case REGMAP_ENDIAN_BIG: 939 map->format.format_val = regmap_format_16_be; 940 map->format.parse_val = regmap_parse_16_be; 941 map->format.parse_inplace = regmap_parse_16_be_inplace; 942 break; 943 case REGMAP_ENDIAN_LITTLE: 944 map->format.format_val = regmap_format_16_le; 945 map->format.parse_val = regmap_parse_16_le; 946 map->format.parse_inplace = regmap_parse_16_le_inplace; 947 break; 948 case REGMAP_ENDIAN_NATIVE: 949 map->format.format_val = regmap_format_16_native; 950 map->format.parse_val = regmap_parse_16_native; 951 break; 952 default: 953 goto err_hwlock; 954 } 955 break; 956 case 24: 957 if (val_endian != REGMAP_ENDIAN_BIG) 958 goto err_hwlock; 959 map->format.format_val = regmap_format_24; 960 map->format.parse_val = regmap_parse_24; 961 break; 962 case 32: 963 switch (val_endian) { 964 case REGMAP_ENDIAN_BIG: 965 map->format.format_val = regmap_format_32_be; 966 map->format.parse_val = regmap_parse_32_be; 967 map->format.parse_inplace = regmap_parse_32_be_inplace; 968 break; 969 case REGMAP_ENDIAN_LITTLE: 970 map->format.format_val = regmap_format_32_le; 971 map->format.parse_val = regmap_parse_32_le; 972 map->format.parse_inplace = regmap_parse_32_le_inplace; 973 break; 974 case REGMAP_ENDIAN_NATIVE: 975 map->format.format_val = regmap_format_32_native; 976 map->format.parse_val = regmap_parse_32_native; 977 break; 978 default: 979 goto err_hwlock; 980 } 981 break; 982 #ifdef CONFIG_64BIT 983 case 64: 984 switch (val_endian) { 985 case REGMAP_ENDIAN_BIG: 986 map->format.format_val = regmap_format_64_be; 987 map->format.parse_val = regmap_parse_64_be; 988 map->format.parse_inplace = regmap_parse_64_be_inplace; 989 break; 990 case REGMAP_ENDIAN_LITTLE: 991 map->format.format_val = regmap_format_64_le; 992 map->format.parse_val = regmap_parse_64_le; 993 map->format.parse_inplace = regmap_parse_64_le_inplace; 994 break; 995 case REGMAP_ENDIAN_NATIVE: 996 map->format.format_val = regmap_format_64_native; 997 map->format.parse_val = regmap_parse_64_native; 998 break; 999 default: 1000 goto err_hwlock; 1001 } 1002 break; 1003 #endif 1004 } 1005 1006 if (map->format.format_write) { 1007 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1008 (val_endian != REGMAP_ENDIAN_BIG)) 1009 goto err_hwlock; 1010 map->use_single_write = true; 1011 } 1012 1013 if (!map->format.format_write && 1014 !(map->format.format_reg && map->format.format_val)) 1015 goto err_hwlock; 1016 1017 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1018 if (map->work_buf == NULL) { 1019 ret = -ENOMEM; 1020 goto err_hwlock; 1021 } 1022 1023 if (map->format.format_write) { 1024 map->defer_caching = false; 1025 map->reg_write = _regmap_bus_formatted_write; 1026 } else if (map->format.format_val) { 1027 map->defer_caching = true; 1028 map->reg_write = _regmap_bus_raw_write; 1029 } 1030 1031 skip_format_initialization: 1032 1033 map->range_tree = RB_ROOT; 1034 for (i = 0; i < config->num_ranges; i++) { 1035 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1036 struct regmap_range_node *new; 1037 1038 /* Sanity check */ 1039 if (range_cfg->range_max < range_cfg->range_min) { 1040 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1041 range_cfg->range_max, range_cfg->range_min); 1042 goto err_range; 1043 } 1044 1045 if (range_cfg->range_max > map->max_register) { 1046 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1047 range_cfg->range_max, map->max_register); 1048 goto err_range; 1049 } 1050 1051 if (range_cfg->selector_reg > map->max_register) { 1052 dev_err(map->dev, 1053 "Invalid range %d: selector out of map\n", i); 1054 goto err_range; 1055 } 1056 1057 if (range_cfg->window_len == 0) { 1058 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1059 i); 1060 goto err_range; 1061 } 1062 1063 /* Make sure, that this register range has no selector 1064 or data window within its boundary */ 1065 for (j = 0; j < config->num_ranges; j++) { 1066 unsigned sel_reg = config->ranges[j].selector_reg; 1067 unsigned win_min = config->ranges[j].window_start; 1068 unsigned win_max = win_min + 1069 config->ranges[j].window_len - 1; 1070 1071 /* Allow data window inside its own virtual range */ 1072 if (j == i) 1073 continue; 1074 1075 if (range_cfg->range_min <= sel_reg && 1076 sel_reg <= range_cfg->range_max) { 1077 dev_err(map->dev, 1078 "Range %d: selector for %d in window\n", 1079 i, j); 1080 goto err_range; 1081 } 1082 1083 if (!(win_max < range_cfg->range_min || 1084 win_min > range_cfg->range_max)) { 1085 dev_err(map->dev, 1086 "Range %d: window for %d in window\n", 1087 i, j); 1088 goto err_range; 1089 } 1090 } 1091 1092 new = kzalloc(sizeof(*new), GFP_KERNEL); 1093 if (new == NULL) { 1094 ret = -ENOMEM; 1095 goto err_range; 1096 } 1097 1098 new->map = map; 1099 new->name = range_cfg->name; 1100 new->range_min = range_cfg->range_min; 1101 new->range_max = range_cfg->range_max; 1102 new->selector_reg = range_cfg->selector_reg; 1103 new->selector_mask = range_cfg->selector_mask; 1104 new->selector_shift = range_cfg->selector_shift; 1105 new->window_start = range_cfg->window_start; 1106 new->window_len = range_cfg->window_len; 1107 1108 if (!_regmap_range_add(map, new)) { 1109 dev_err(map->dev, "Failed to add range %d\n", i); 1110 kfree(new); 1111 goto err_range; 1112 } 1113 1114 if (map->selector_work_buf == NULL) { 1115 map->selector_work_buf = 1116 kzalloc(map->format.buf_size, GFP_KERNEL); 1117 if (map->selector_work_buf == NULL) { 1118 ret = -ENOMEM; 1119 goto err_range; 1120 } 1121 } 1122 } 1123 1124 ret = regcache_init(map, config); 1125 if (ret != 0) 1126 goto err_range; 1127 1128 if (dev) { 1129 ret = regmap_attach_dev(dev, map, config); 1130 if (ret != 0) 1131 goto err_regcache; 1132 } else { 1133 regmap_debugfs_init(map, config->name); 1134 } 1135 1136 return map; 1137 1138 err_regcache: 1139 regcache_exit(map); 1140 err_range: 1141 regmap_range_exit(map); 1142 kfree(map->work_buf); 1143 err_hwlock: 1144 if (map->hwlock) 1145 hwspin_lock_free(map->hwlock); 1146 err_name: 1147 kfree_const(map->name); 1148 err_map: 1149 kfree(map); 1150 err: 1151 return ERR_PTR(ret); 1152 } 1153 EXPORT_SYMBOL_GPL(__regmap_init); 1154 1155 static void devm_regmap_release(struct device *dev, void *res) 1156 { 1157 regmap_exit(*(struct regmap **)res); 1158 } 1159 1160 struct regmap *__devm_regmap_init(struct device *dev, 1161 const struct regmap_bus *bus, 1162 void *bus_context, 1163 const struct regmap_config *config, 1164 struct lock_class_key *lock_key, 1165 const char *lock_name) 1166 { 1167 struct regmap **ptr, *regmap; 1168 1169 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1170 if (!ptr) 1171 return ERR_PTR(-ENOMEM); 1172 1173 regmap = __regmap_init(dev, bus, bus_context, config, 1174 lock_key, lock_name); 1175 if (!IS_ERR(regmap)) { 1176 *ptr = regmap; 1177 devres_add(dev, ptr); 1178 } else { 1179 devres_free(ptr); 1180 } 1181 1182 return regmap; 1183 } 1184 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1185 1186 static void regmap_field_init(struct regmap_field *rm_field, 1187 struct regmap *regmap, struct reg_field reg_field) 1188 { 1189 rm_field->regmap = regmap; 1190 rm_field->reg = reg_field.reg; 1191 rm_field->shift = reg_field.lsb; 1192 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1193 rm_field->id_size = reg_field.id_size; 1194 rm_field->id_offset = reg_field.id_offset; 1195 } 1196 1197 /** 1198 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1199 * 1200 * @dev: Device that will be interacted with 1201 * @regmap: regmap bank in which this register field is located. 1202 * @reg_field: Register field with in the bank. 1203 * 1204 * The return value will be an ERR_PTR() on error or a valid pointer 1205 * to a struct regmap_field. The regmap_field will be automatically freed 1206 * by the device management code. 1207 */ 1208 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1209 struct regmap *regmap, struct reg_field reg_field) 1210 { 1211 struct regmap_field *rm_field = devm_kzalloc(dev, 1212 sizeof(*rm_field), GFP_KERNEL); 1213 if (!rm_field) 1214 return ERR_PTR(-ENOMEM); 1215 1216 regmap_field_init(rm_field, regmap, reg_field); 1217 1218 return rm_field; 1219 1220 } 1221 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1222 1223 /** 1224 * devm_regmap_field_free() - Free a register field allocated using 1225 * devm_regmap_field_alloc. 1226 * 1227 * @dev: Device that will be interacted with 1228 * @field: regmap field which should be freed. 1229 * 1230 * Free register field allocated using devm_regmap_field_alloc(). Usually 1231 * drivers need not call this function, as the memory allocated via devm 1232 * will be freed as per device-driver life-cyle. 1233 */ 1234 void devm_regmap_field_free(struct device *dev, 1235 struct regmap_field *field) 1236 { 1237 devm_kfree(dev, field); 1238 } 1239 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1240 1241 /** 1242 * regmap_field_alloc() - Allocate and initialise a register field. 1243 * 1244 * @regmap: regmap bank in which this register field is located. 1245 * @reg_field: Register field with in the bank. 1246 * 1247 * The return value will be an ERR_PTR() on error or a valid pointer 1248 * to a struct regmap_field. The regmap_field should be freed by the 1249 * user once its finished working with it using regmap_field_free(). 1250 */ 1251 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1252 struct reg_field reg_field) 1253 { 1254 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1255 1256 if (!rm_field) 1257 return ERR_PTR(-ENOMEM); 1258 1259 regmap_field_init(rm_field, regmap, reg_field); 1260 1261 return rm_field; 1262 } 1263 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1264 1265 /** 1266 * regmap_field_free() - Free register field allocated using 1267 * regmap_field_alloc. 1268 * 1269 * @field: regmap field which should be freed. 1270 */ 1271 void regmap_field_free(struct regmap_field *field) 1272 { 1273 kfree(field); 1274 } 1275 EXPORT_SYMBOL_GPL(regmap_field_free); 1276 1277 /** 1278 * regmap_reinit_cache() - Reinitialise the current register cache 1279 * 1280 * @map: Register map to operate on. 1281 * @config: New configuration. Only the cache data will be used. 1282 * 1283 * Discard any existing register cache for the map and initialize a 1284 * new cache. This can be used to restore the cache to defaults or to 1285 * update the cache configuration to reflect runtime discovery of the 1286 * hardware. 1287 * 1288 * No explicit locking is done here, the user needs to ensure that 1289 * this function will not race with other calls to regmap. 1290 */ 1291 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1292 { 1293 regcache_exit(map); 1294 regmap_debugfs_exit(map); 1295 1296 map->max_register = config->max_register; 1297 map->writeable_reg = config->writeable_reg; 1298 map->readable_reg = config->readable_reg; 1299 map->volatile_reg = config->volatile_reg; 1300 map->precious_reg = config->precious_reg; 1301 map->readable_noinc_reg = config->readable_noinc_reg; 1302 map->cache_type = config->cache_type; 1303 1304 regmap_debugfs_init(map, config->name); 1305 1306 map->cache_bypass = false; 1307 map->cache_only = false; 1308 1309 return regcache_init(map, config); 1310 } 1311 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1312 1313 /** 1314 * regmap_exit() - Free a previously allocated register map 1315 * 1316 * @map: Register map to operate on. 1317 */ 1318 void regmap_exit(struct regmap *map) 1319 { 1320 struct regmap_async *async; 1321 1322 regcache_exit(map); 1323 regmap_debugfs_exit(map); 1324 regmap_range_exit(map); 1325 if (map->bus && map->bus->free_context) 1326 map->bus->free_context(map->bus_context); 1327 kfree(map->work_buf); 1328 while (!list_empty(&map->async_free)) { 1329 async = list_first_entry_or_null(&map->async_free, 1330 struct regmap_async, 1331 list); 1332 list_del(&async->list); 1333 kfree(async->work_buf); 1334 kfree(async); 1335 } 1336 if (map->hwlock) 1337 hwspin_lock_free(map->hwlock); 1338 kfree_const(map->name); 1339 kfree(map); 1340 } 1341 EXPORT_SYMBOL_GPL(regmap_exit); 1342 1343 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1344 { 1345 struct regmap **r = res; 1346 if (!r || !*r) { 1347 WARN_ON(!r || !*r); 1348 return 0; 1349 } 1350 1351 /* If the user didn't specify a name match any */ 1352 if (data) 1353 return (*r)->name == data; 1354 else 1355 return 1; 1356 } 1357 1358 /** 1359 * dev_get_regmap() - Obtain the regmap (if any) for a device 1360 * 1361 * @dev: Device to retrieve the map for 1362 * @name: Optional name for the register map, usually NULL. 1363 * 1364 * Returns the regmap for the device if one is present, or NULL. If 1365 * name is specified then it must match the name specified when 1366 * registering the device, if it is NULL then the first regmap found 1367 * will be used. Devices with multiple register maps are very rare, 1368 * generic code should normally not need to specify a name. 1369 */ 1370 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1371 { 1372 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1373 dev_get_regmap_match, (void *)name); 1374 1375 if (!r) 1376 return NULL; 1377 return *r; 1378 } 1379 EXPORT_SYMBOL_GPL(dev_get_regmap); 1380 1381 /** 1382 * regmap_get_device() - Obtain the device from a regmap 1383 * 1384 * @map: Register map to operate on. 1385 * 1386 * Returns the underlying device that the regmap has been created for. 1387 */ 1388 struct device *regmap_get_device(struct regmap *map) 1389 { 1390 return map->dev; 1391 } 1392 EXPORT_SYMBOL_GPL(regmap_get_device); 1393 1394 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1395 struct regmap_range_node *range, 1396 unsigned int val_num) 1397 { 1398 void *orig_work_buf; 1399 unsigned int win_offset; 1400 unsigned int win_page; 1401 bool page_chg; 1402 int ret; 1403 1404 win_offset = (*reg - range->range_min) % range->window_len; 1405 win_page = (*reg - range->range_min) / range->window_len; 1406 1407 if (val_num > 1) { 1408 /* Bulk write shouldn't cross range boundary */ 1409 if (*reg + val_num - 1 > range->range_max) 1410 return -EINVAL; 1411 1412 /* ... or single page boundary */ 1413 if (val_num > range->window_len - win_offset) 1414 return -EINVAL; 1415 } 1416 1417 /* It is possible to have selector register inside data window. 1418 In that case, selector register is located on every page and 1419 it needs no page switching, when accessed alone. */ 1420 if (val_num > 1 || 1421 range->window_start + win_offset != range->selector_reg) { 1422 /* Use separate work_buf during page switching */ 1423 orig_work_buf = map->work_buf; 1424 map->work_buf = map->selector_work_buf; 1425 1426 ret = _regmap_update_bits(map, range->selector_reg, 1427 range->selector_mask, 1428 win_page << range->selector_shift, 1429 &page_chg, false); 1430 1431 map->work_buf = orig_work_buf; 1432 1433 if (ret != 0) 1434 return ret; 1435 } 1436 1437 *reg = range->window_start + win_offset; 1438 1439 return 0; 1440 } 1441 1442 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1443 unsigned long mask) 1444 { 1445 u8 *buf; 1446 int i; 1447 1448 if (!mask || !map->work_buf) 1449 return; 1450 1451 buf = map->work_buf; 1452 1453 for (i = 0; i < max_bytes; i++) 1454 buf[i] |= (mask >> (8 * i)) & 0xff; 1455 } 1456 1457 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1458 const void *val, size_t val_len) 1459 { 1460 struct regmap_range_node *range; 1461 unsigned long flags; 1462 void *work_val = map->work_buf + map->format.reg_bytes + 1463 map->format.pad_bytes; 1464 void *buf; 1465 int ret = -ENOTSUPP; 1466 size_t len; 1467 int i; 1468 1469 WARN_ON(!map->bus); 1470 1471 /* Check for unwritable registers before we start */ 1472 if (map->writeable_reg) 1473 for (i = 0; i < val_len / map->format.val_bytes; i++) 1474 if (!map->writeable_reg(map->dev, 1475 reg + regmap_get_offset(map, i))) 1476 return -EINVAL; 1477 1478 if (!map->cache_bypass && map->format.parse_val) { 1479 unsigned int ival; 1480 int val_bytes = map->format.val_bytes; 1481 for (i = 0; i < val_len / val_bytes; i++) { 1482 ival = map->format.parse_val(val + (i * val_bytes)); 1483 ret = regcache_write(map, 1484 reg + regmap_get_offset(map, i), 1485 ival); 1486 if (ret) { 1487 dev_err(map->dev, 1488 "Error in caching of register: %x ret: %d\n", 1489 reg + i, ret); 1490 return ret; 1491 } 1492 } 1493 if (map->cache_only) { 1494 map->cache_dirty = true; 1495 return 0; 1496 } 1497 } 1498 1499 range = _regmap_range_lookup(map, reg); 1500 if (range) { 1501 int val_num = val_len / map->format.val_bytes; 1502 int win_offset = (reg - range->range_min) % range->window_len; 1503 int win_residue = range->window_len - win_offset; 1504 1505 /* If the write goes beyond the end of the window split it */ 1506 while (val_num > win_residue) { 1507 dev_dbg(map->dev, "Writing window %d/%zu\n", 1508 win_residue, val_len / map->format.val_bytes); 1509 ret = _regmap_raw_write_impl(map, reg, val, 1510 win_residue * 1511 map->format.val_bytes); 1512 if (ret != 0) 1513 return ret; 1514 1515 reg += win_residue; 1516 val_num -= win_residue; 1517 val += win_residue * map->format.val_bytes; 1518 val_len -= win_residue * map->format.val_bytes; 1519 1520 win_offset = (reg - range->range_min) % 1521 range->window_len; 1522 win_residue = range->window_len - win_offset; 1523 } 1524 1525 ret = _regmap_select_page(map, ®, range, val_num); 1526 if (ret != 0) 1527 return ret; 1528 } 1529 1530 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1531 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1532 map->write_flag_mask); 1533 1534 /* 1535 * Essentially all I/O mechanisms will be faster with a single 1536 * buffer to write. Since register syncs often generate raw 1537 * writes of single registers optimise that case. 1538 */ 1539 if (val != work_val && val_len == map->format.val_bytes) { 1540 memcpy(work_val, val, map->format.val_bytes); 1541 val = work_val; 1542 } 1543 1544 if (map->async && map->bus->async_write) { 1545 struct regmap_async *async; 1546 1547 trace_regmap_async_write_start(map, reg, val_len); 1548 1549 spin_lock_irqsave(&map->async_lock, flags); 1550 async = list_first_entry_or_null(&map->async_free, 1551 struct regmap_async, 1552 list); 1553 if (async) 1554 list_del(&async->list); 1555 spin_unlock_irqrestore(&map->async_lock, flags); 1556 1557 if (!async) { 1558 async = map->bus->async_alloc(); 1559 if (!async) 1560 return -ENOMEM; 1561 1562 async->work_buf = kzalloc(map->format.buf_size, 1563 GFP_KERNEL | GFP_DMA); 1564 if (!async->work_buf) { 1565 kfree(async); 1566 return -ENOMEM; 1567 } 1568 } 1569 1570 async->map = map; 1571 1572 /* If the caller supplied the value we can use it safely. */ 1573 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1574 map->format.reg_bytes + map->format.val_bytes); 1575 1576 spin_lock_irqsave(&map->async_lock, flags); 1577 list_add_tail(&async->list, &map->async_list); 1578 spin_unlock_irqrestore(&map->async_lock, flags); 1579 1580 if (val != work_val) 1581 ret = map->bus->async_write(map->bus_context, 1582 async->work_buf, 1583 map->format.reg_bytes + 1584 map->format.pad_bytes, 1585 val, val_len, async); 1586 else 1587 ret = map->bus->async_write(map->bus_context, 1588 async->work_buf, 1589 map->format.reg_bytes + 1590 map->format.pad_bytes + 1591 val_len, NULL, 0, async); 1592 1593 if (ret != 0) { 1594 dev_err(map->dev, "Failed to schedule write: %d\n", 1595 ret); 1596 1597 spin_lock_irqsave(&map->async_lock, flags); 1598 list_move(&async->list, &map->async_free); 1599 spin_unlock_irqrestore(&map->async_lock, flags); 1600 } 1601 1602 return ret; 1603 } 1604 1605 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1606 1607 /* If we're doing a single register write we can probably just 1608 * send the work_buf directly, otherwise try to do a gather 1609 * write. 1610 */ 1611 if (val == work_val) 1612 ret = map->bus->write(map->bus_context, map->work_buf, 1613 map->format.reg_bytes + 1614 map->format.pad_bytes + 1615 val_len); 1616 else if (map->bus->gather_write) 1617 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1618 map->format.reg_bytes + 1619 map->format.pad_bytes, 1620 val, val_len); 1621 1622 /* If that didn't work fall back on linearising by hand. */ 1623 if (ret == -ENOTSUPP) { 1624 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1625 buf = kzalloc(len, GFP_KERNEL); 1626 if (!buf) 1627 return -ENOMEM; 1628 1629 memcpy(buf, map->work_buf, map->format.reg_bytes); 1630 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1631 val, val_len); 1632 ret = map->bus->write(map->bus_context, buf, len); 1633 1634 kfree(buf); 1635 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1636 /* regcache_drop_region() takes lock that we already have, 1637 * thus call map->cache_ops->drop() directly 1638 */ 1639 if (map->cache_ops && map->cache_ops->drop) 1640 map->cache_ops->drop(map, reg, reg + 1); 1641 } 1642 1643 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1644 1645 return ret; 1646 } 1647 1648 /** 1649 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1650 * 1651 * @map: Map to check. 1652 */ 1653 bool regmap_can_raw_write(struct regmap *map) 1654 { 1655 return map->bus && map->bus->write && map->format.format_val && 1656 map->format.format_reg; 1657 } 1658 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1659 1660 /** 1661 * regmap_get_raw_read_max - Get the maximum size we can read 1662 * 1663 * @map: Map to check. 1664 */ 1665 size_t regmap_get_raw_read_max(struct regmap *map) 1666 { 1667 return map->max_raw_read; 1668 } 1669 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1670 1671 /** 1672 * regmap_get_raw_write_max - Get the maximum size we can read 1673 * 1674 * @map: Map to check. 1675 */ 1676 size_t regmap_get_raw_write_max(struct regmap *map) 1677 { 1678 return map->max_raw_write; 1679 } 1680 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1681 1682 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1683 unsigned int val) 1684 { 1685 int ret; 1686 struct regmap_range_node *range; 1687 struct regmap *map = context; 1688 1689 WARN_ON(!map->bus || !map->format.format_write); 1690 1691 range = _regmap_range_lookup(map, reg); 1692 if (range) { 1693 ret = _regmap_select_page(map, ®, range, 1); 1694 if (ret != 0) 1695 return ret; 1696 } 1697 1698 map->format.format_write(map, reg, val); 1699 1700 trace_regmap_hw_write_start(map, reg, 1); 1701 1702 ret = map->bus->write(map->bus_context, map->work_buf, 1703 map->format.buf_size); 1704 1705 trace_regmap_hw_write_done(map, reg, 1); 1706 1707 return ret; 1708 } 1709 1710 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1711 unsigned int val) 1712 { 1713 struct regmap *map = context; 1714 1715 return map->bus->reg_write(map->bus_context, reg, val); 1716 } 1717 1718 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1719 unsigned int val) 1720 { 1721 struct regmap *map = context; 1722 1723 WARN_ON(!map->bus || !map->format.format_val); 1724 1725 map->format.format_val(map->work_buf + map->format.reg_bytes 1726 + map->format.pad_bytes, val, 0); 1727 return _regmap_raw_write_impl(map, reg, 1728 map->work_buf + 1729 map->format.reg_bytes + 1730 map->format.pad_bytes, 1731 map->format.val_bytes); 1732 } 1733 1734 static inline void *_regmap_map_get_context(struct regmap *map) 1735 { 1736 return (map->bus) ? map : map->bus_context; 1737 } 1738 1739 int _regmap_write(struct regmap *map, unsigned int reg, 1740 unsigned int val) 1741 { 1742 int ret; 1743 void *context = _regmap_map_get_context(map); 1744 1745 if (!regmap_writeable(map, reg)) 1746 return -EIO; 1747 1748 if (!map->cache_bypass && !map->defer_caching) { 1749 ret = regcache_write(map, reg, val); 1750 if (ret != 0) 1751 return ret; 1752 if (map->cache_only) { 1753 map->cache_dirty = true; 1754 return 0; 1755 } 1756 } 1757 1758 #ifdef LOG_DEVICE 1759 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1760 dev_info(map->dev, "%x <= %x\n", reg, val); 1761 #endif 1762 1763 trace_regmap_reg_write(map, reg, val); 1764 1765 return map->reg_write(context, reg, val); 1766 } 1767 1768 /** 1769 * regmap_write() - Write a value to a single register 1770 * 1771 * @map: Register map to write to 1772 * @reg: Register to write to 1773 * @val: Value to be written 1774 * 1775 * A value of zero will be returned on success, a negative errno will 1776 * be returned in error cases. 1777 */ 1778 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1779 { 1780 int ret; 1781 1782 if (!IS_ALIGNED(reg, map->reg_stride)) 1783 return -EINVAL; 1784 1785 map->lock(map->lock_arg); 1786 1787 ret = _regmap_write(map, reg, val); 1788 1789 map->unlock(map->lock_arg); 1790 1791 return ret; 1792 } 1793 EXPORT_SYMBOL_GPL(regmap_write); 1794 1795 /** 1796 * regmap_write_async() - Write a value to a single register asynchronously 1797 * 1798 * @map: Register map to write to 1799 * @reg: Register to write to 1800 * @val: Value to be written 1801 * 1802 * A value of zero will be returned on success, a negative errno will 1803 * be returned in error cases. 1804 */ 1805 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1806 { 1807 int ret; 1808 1809 if (!IS_ALIGNED(reg, map->reg_stride)) 1810 return -EINVAL; 1811 1812 map->lock(map->lock_arg); 1813 1814 map->async = true; 1815 1816 ret = _regmap_write(map, reg, val); 1817 1818 map->async = false; 1819 1820 map->unlock(map->lock_arg); 1821 1822 return ret; 1823 } 1824 EXPORT_SYMBOL_GPL(regmap_write_async); 1825 1826 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1827 const void *val, size_t val_len) 1828 { 1829 size_t val_bytes = map->format.val_bytes; 1830 size_t val_count = val_len / val_bytes; 1831 size_t chunk_count, chunk_bytes; 1832 size_t chunk_regs = val_count; 1833 int ret, i; 1834 1835 if (!val_count) 1836 return -EINVAL; 1837 1838 if (map->use_single_write) 1839 chunk_regs = 1; 1840 else if (map->max_raw_write && val_len > map->max_raw_write) 1841 chunk_regs = map->max_raw_write / val_bytes; 1842 1843 chunk_count = val_count / chunk_regs; 1844 chunk_bytes = chunk_regs * val_bytes; 1845 1846 /* Write as many bytes as possible with chunk_size */ 1847 for (i = 0; i < chunk_count; i++) { 1848 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); 1849 if (ret) 1850 return ret; 1851 1852 reg += regmap_get_offset(map, chunk_regs); 1853 val += chunk_bytes; 1854 val_len -= chunk_bytes; 1855 } 1856 1857 /* Write remaining bytes */ 1858 if (val_len) 1859 ret = _regmap_raw_write_impl(map, reg, val, val_len); 1860 1861 return ret; 1862 } 1863 1864 /** 1865 * regmap_raw_write() - Write raw values to one or more registers 1866 * 1867 * @map: Register map to write to 1868 * @reg: Initial register to write to 1869 * @val: Block of data to be written, laid out for direct transmission to the 1870 * device 1871 * @val_len: Length of data pointed to by val. 1872 * 1873 * This function is intended to be used for things like firmware 1874 * download where a large block of data needs to be transferred to the 1875 * device. No formatting will be done on the data provided. 1876 * 1877 * A value of zero will be returned on success, a negative errno will 1878 * be returned in error cases. 1879 */ 1880 int regmap_raw_write(struct regmap *map, unsigned int reg, 1881 const void *val, size_t val_len) 1882 { 1883 int ret; 1884 1885 if (!regmap_can_raw_write(map)) 1886 return -EINVAL; 1887 if (val_len % map->format.val_bytes) 1888 return -EINVAL; 1889 1890 map->lock(map->lock_arg); 1891 1892 ret = _regmap_raw_write(map, reg, val, val_len); 1893 1894 map->unlock(map->lock_arg); 1895 1896 return ret; 1897 } 1898 EXPORT_SYMBOL_GPL(regmap_raw_write); 1899 1900 /** 1901 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 1902 * register field. 1903 * 1904 * @field: Register field to write to 1905 * @mask: Bitmask to change 1906 * @val: Value to be written 1907 * @change: Boolean indicating if a write was done 1908 * @async: Boolean indicating asynchronously 1909 * @force: Boolean indicating use force update 1910 * 1911 * Perform a read/modify/write cycle on the register field with change, 1912 * async, force option. 1913 * 1914 * A value of zero will be returned on success, a negative errno will 1915 * be returned in error cases. 1916 */ 1917 int regmap_field_update_bits_base(struct regmap_field *field, 1918 unsigned int mask, unsigned int val, 1919 bool *change, bool async, bool force) 1920 { 1921 mask = (mask << field->shift) & field->mask; 1922 1923 return regmap_update_bits_base(field->regmap, field->reg, 1924 mask, val << field->shift, 1925 change, async, force); 1926 } 1927 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 1928 1929 /** 1930 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 1931 * register field with port ID 1932 * 1933 * @field: Register field to write to 1934 * @id: port ID 1935 * @mask: Bitmask to change 1936 * @val: Value to be written 1937 * @change: Boolean indicating if a write was done 1938 * @async: Boolean indicating asynchronously 1939 * @force: Boolean indicating use force update 1940 * 1941 * A value of zero will be returned on success, a negative errno will 1942 * be returned in error cases. 1943 */ 1944 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 1945 unsigned int mask, unsigned int val, 1946 bool *change, bool async, bool force) 1947 { 1948 if (id >= field->id_size) 1949 return -EINVAL; 1950 1951 mask = (mask << field->shift) & field->mask; 1952 1953 return regmap_update_bits_base(field->regmap, 1954 field->reg + (field->id_offset * id), 1955 mask, val << field->shift, 1956 change, async, force); 1957 } 1958 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 1959 1960 /** 1961 * regmap_bulk_write() - Write multiple registers to the device 1962 * 1963 * @map: Register map to write to 1964 * @reg: First register to be write from 1965 * @val: Block of data to be written, in native register size for device 1966 * @val_count: Number of registers to write 1967 * 1968 * This function is intended to be used for writing a large block of 1969 * data to the device either in single transfer or multiple transfer. 1970 * 1971 * A value of zero will be returned on success, a negative errno will 1972 * be returned in error cases. 1973 */ 1974 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1975 size_t val_count) 1976 { 1977 int ret = 0, i; 1978 size_t val_bytes = map->format.val_bytes; 1979 1980 if (!IS_ALIGNED(reg, map->reg_stride)) 1981 return -EINVAL; 1982 1983 /* 1984 * Some devices don't support bulk write, for them we have a series of 1985 * single write operations. 1986 */ 1987 if (!map->bus || !map->format.parse_inplace) { 1988 map->lock(map->lock_arg); 1989 for (i = 0; i < val_count; i++) { 1990 unsigned int ival; 1991 1992 switch (val_bytes) { 1993 case 1: 1994 ival = *(u8 *)(val + (i * val_bytes)); 1995 break; 1996 case 2: 1997 ival = *(u16 *)(val + (i * val_bytes)); 1998 break; 1999 case 4: 2000 ival = *(u32 *)(val + (i * val_bytes)); 2001 break; 2002 #ifdef CONFIG_64BIT 2003 case 8: 2004 ival = *(u64 *)(val + (i * val_bytes)); 2005 break; 2006 #endif 2007 default: 2008 ret = -EINVAL; 2009 goto out; 2010 } 2011 2012 ret = _regmap_write(map, 2013 reg + regmap_get_offset(map, i), 2014 ival); 2015 if (ret != 0) 2016 goto out; 2017 } 2018 out: 2019 map->unlock(map->lock_arg); 2020 } else { 2021 void *wval; 2022 2023 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2024 if (!wval) 2025 return -ENOMEM; 2026 2027 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2028 map->format.parse_inplace(wval + i); 2029 2030 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2031 2032 kfree(wval); 2033 } 2034 return ret; 2035 } 2036 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2037 2038 /* 2039 * _regmap_raw_multi_reg_write() 2040 * 2041 * the (register,newvalue) pairs in regs have not been formatted, but 2042 * they are all in the same page and have been changed to being page 2043 * relative. The page register has been written if that was necessary. 2044 */ 2045 static int _regmap_raw_multi_reg_write(struct regmap *map, 2046 const struct reg_sequence *regs, 2047 size_t num_regs) 2048 { 2049 int ret; 2050 void *buf; 2051 int i; 2052 u8 *u8; 2053 size_t val_bytes = map->format.val_bytes; 2054 size_t reg_bytes = map->format.reg_bytes; 2055 size_t pad_bytes = map->format.pad_bytes; 2056 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2057 size_t len = pair_size * num_regs; 2058 2059 if (!len) 2060 return -EINVAL; 2061 2062 buf = kzalloc(len, GFP_KERNEL); 2063 if (!buf) 2064 return -ENOMEM; 2065 2066 /* We have to linearise by hand. */ 2067 2068 u8 = buf; 2069 2070 for (i = 0; i < num_regs; i++) { 2071 unsigned int reg = regs[i].reg; 2072 unsigned int val = regs[i].def; 2073 trace_regmap_hw_write_start(map, reg, 1); 2074 map->format.format_reg(u8, reg, map->reg_shift); 2075 u8 += reg_bytes + pad_bytes; 2076 map->format.format_val(u8, val, 0); 2077 u8 += val_bytes; 2078 } 2079 u8 = buf; 2080 *u8 |= map->write_flag_mask; 2081 2082 ret = map->bus->write(map->bus_context, buf, len); 2083 2084 kfree(buf); 2085 2086 for (i = 0; i < num_regs; i++) { 2087 int reg = regs[i].reg; 2088 trace_regmap_hw_write_done(map, reg, 1); 2089 } 2090 return ret; 2091 } 2092 2093 static unsigned int _regmap_register_page(struct regmap *map, 2094 unsigned int reg, 2095 struct regmap_range_node *range) 2096 { 2097 unsigned int win_page = (reg - range->range_min) / range->window_len; 2098 2099 return win_page; 2100 } 2101 2102 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2103 struct reg_sequence *regs, 2104 size_t num_regs) 2105 { 2106 int ret; 2107 int i, n; 2108 struct reg_sequence *base; 2109 unsigned int this_page = 0; 2110 unsigned int page_change = 0; 2111 /* 2112 * the set of registers are not neccessarily in order, but 2113 * since the order of write must be preserved this algorithm 2114 * chops the set each time the page changes. This also applies 2115 * if there is a delay required at any point in the sequence. 2116 */ 2117 base = regs; 2118 for (i = 0, n = 0; i < num_regs; i++, n++) { 2119 unsigned int reg = regs[i].reg; 2120 struct regmap_range_node *range; 2121 2122 range = _regmap_range_lookup(map, reg); 2123 if (range) { 2124 unsigned int win_page = _regmap_register_page(map, reg, 2125 range); 2126 2127 if (i == 0) 2128 this_page = win_page; 2129 if (win_page != this_page) { 2130 this_page = win_page; 2131 page_change = 1; 2132 } 2133 } 2134 2135 /* If we have both a page change and a delay make sure to 2136 * write the regs and apply the delay before we change the 2137 * page. 2138 */ 2139 2140 if (page_change || regs[i].delay_us) { 2141 2142 /* For situations where the first write requires 2143 * a delay we need to make sure we don't call 2144 * raw_multi_reg_write with n=0 2145 * This can't occur with page breaks as we 2146 * never write on the first iteration 2147 */ 2148 if (regs[i].delay_us && i == 0) 2149 n = 1; 2150 2151 ret = _regmap_raw_multi_reg_write(map, base, n); 2152 if (ret != 0) 2153 return ret; 2154 2155 if (regs[i].delay_us) 2156 udelay(regs[i].delay_us); 2157 2158 base += n; 2159 n = 0; 2160 2161 if (page_change) { 2162 ret = _regmap_select_page(map, 2163 &base[n].reg, 2164 range, 1); 2165 if (ret != 0) 2166 return ret; 2167 2168 page_change = 0; 2169 } 2170 2171 } 2172 2173 } 2174 if (n > 0) 2175 return _regmap_raw_multi_reg_write(map, base, n); 2176 return 0; 2177 } 2178 2179 static int _regmap_multi_reg_write(struct regmap *map, 2180 const struct reg_sequence *regs, 2181 size_t num_regs) 2182 { 2183 int i; 2184 int ret; 2185 2186 if (!map->can_multi_write) { 2187 for (i = 0; i < num_regs; i++) { 2188 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2189 if (ret != 0) 2190 return ret; 2191 2192 if (regs[i].delay_us) 2193 udelay(regs[i].delay_us); 2194 } 2195 return 0; 2196 } 2197 2198 if (!map->format.parse_inplace) 2199 return -EINVAL; 2200 2201 if (map->writeable_reg) 2202 for (i = 0; i < num_regs; i++) { 2203 int reg = regs[i].reg; 2204 if (!map->writeable_reg(map->dev, reg)) 2205 return -EINVAL; 2206 if (!IS_ALIGNED(reg, map->reg_stride)) 2207 return -EINVAL; 2208 } 2209 2210 if (!map->cache_bypass) { 2211 for (i = 0; i < num_regs; i++) { 2212 unsigned int val = regs[i].def; 2213 unsigned int reg = regs[i].reg; 2214 ret = regcache_write(map, reg, val); 2215 if (ret) { 2216 dev_err(map->dev, 2217 "Error in caching of register: %x ret: %d\n", 2218 reg, ret); 2219 return ret; 2220 } 2221 } 2222 if (map->cache_only) { 2223 map->cache_dirty = true; 2224 return 0; 2225 } 2226 } 2227 2228 WARN_ON(!map->bus); 2229 2230 for (i = 0; i < num_regs; i++) { 2231 unsigned int reg = regs[i].reg; 2232 struct regmap_range_node *range; 2233 2234 /* Coalesce all the writes between a page break or a delay 2235 * in a sequence 2236 */ 2237 range = _regmap_range_lookup(map, reg); 2238 if (range || regs[i].delay_us) { 2239 size_t len = sizeof(struct reg_sequence)*num_regs; 2240 struct reg_sequence *base = kmemdup(regs, len, 2241 GFP_KERNEL); 2242 if (!base) 2243 return -ENOMEM; 2244 ret = _regmap_range_multi_paged_reg_write(map, base, 2245 num_regs); 2246 kfree(base); 2247 2248 return ret; 2249 } 2250 } 2251 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2252 } 2253 2254 /** 2255 * regmap_multi_reg_write() - Write multiple registers to the device 2256 * 2257 * @map: Register map to write to 2258 * @regs: Array of structures containing register,value to be written 2259 * @num_regs: Number of registers to write 2260 * 2261 * Write multiple registers to the device where the set of register, value 2262 * pairs are supplied in any order, possibly not all in a single range. 2263 * 2264 * The 'normal' block write mode will send ultimately send data on the 2265 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2266 * addressed. However, this alternative block multi write mode will send 2267 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2268 * must of course support the mode. 2269 * 2270 * A value of zero will be returned on success, a negative errno will be 2271 * returned in error cases. 2272 */ 2273 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2274 int num_regs) 2275 { 2276 int ret; 2277 2278 map->lock(map->lock_arg); 2279 2280 ret = _regmap_multi_reg_write(map, regs, num_regs); 2281 2282 map->unlock(map->lock_arg); 2283 2284 return ret; 2285 } 2286 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2287 2288 /** 2289 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2290 * device but not the cache 2291 * 2292 * @map: Register map to write to 2293 * @regs: Array of structures containing register,value to be written 2294 * @num_regs: Number of registers to write 2295 * 2296 * Write multiple registers to the device but not the cache where the set 2297 * of register are supplied in any order. 2298 * 2299 * This function is intended to be used for writing a large block of data 2300 * atomically to the device in single transfer for those I2C client devices 2301 * that implement this alternative block write mode. 2302 * 2303 * A value of zero will be returned on success, a negative errno will 2304 * be returned in error cases. 2305 */ 2306 int regmap_multi_reg_write_bypassed(struct regmap *map, 2307 const struct reg_sequence *regs, 2308 int num_regs) 2309 { 2310 int ret; 2311 bool bypass; 2312 2313 map->lock(map->lock_arg); 2314 2315 bypass = map->cache_bypass; 2316 map->cache_bypass = true; 2317 2318 ret = _regmap_multi_reg_write(map, regs, num_regs); 2319 2320 map->cache_bypass = bypass; 2321 2322 map->unlock(map->lock_arg); 2323 2324 return ret; 2325 } 2326 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2327 2328 /** 2329 * regmap_raw_write_async() - Write raw values to one or more registers 2330 * asynchronously 2331 * 2332 * @map: Register map to write to 2333 * @reg: Initial register to write to 2334 * @val: Block of data to be written, laid out for direct transmission to the 2335 * device. Must be valid until regmap_async_complete() is called. 2336 * @val_len: Length of data pointed to by val. 2337 * 2338 * This function is intended to be used for things like firmware 2339 * download where a large block of data needs to be transferred to the 2340 * device. No formatting will be done on the data provided. 2341 * 2342 * If supported by the underlying bus the write will be scheduled 2343 * asynchronously, helping maximise I/O speed on higher speed buses 2344 * like SPI. regmap_async_complete() can be called to ensure that all 2345 * asynchrnous writes have been completed. 2346 * 2347 * A value of zero will be returned on success, a negative errno will 2348 * be returned in error cases. 2349 */ 2350 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2351 const void *val, size_t val_len) 2352 { 2353 int ret; 2354 2355 if (val_len % map->format.val_bytes) 2356 return -EINVAL; 2357 if (!IS_ALIGNED(reg, map->reg_stride)) 2358 return -EINVAL; 2359 2360 map->lock(map->lock_arg); 2361 2362 map->async = true; 2363 2364 ret = _regmap_raw_write(map, reg, val, val_len); 2365 2366 map->async = false; 2367 2368 map->unlock(map->lock_arg); 2369 2370 return ret; 2371 } 2372 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2373 2374 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2375 unsigned int val_len) 2376 { 2377 struct regmap_range_node *range; 2378 int ret; 2379 2380 WARN_ON(!map->bus); 2381 2382 if (!map->bus || !map->bus->read) 2383 return -EINVAL; 2384 2385 range = _regmap_range_lookup(map, reg); 2386 if (range) { 2387 ret = _regmap_select_page(map, ®, range, 2388 val_len / map->format.val_bytes); 2389 if (ret != 0) 2390 return ret; 2391 } 2392 2393 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2394 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2395 map->read_flag_mask); 2396 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2397 2398 ret = map->bus->read(map->bus_context, map->work_buf, 2399 map->format.reg_bytes + map->format.pad_bytes, 2400 val, val_len); 2401 2402 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2403 2404 return ret; 2405 } 2406 2407 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2408 unsigned int *val) 2409 { 2410 struct regmap *map = context; 2411 2412 return map->bus->reg_read(map->bus_context, reg, val); 2413 } 2414 2415 static int _regmap_bus_read(void *context, unsigned int reg, 2416 unsigned int *val) 2417 { 2418 int ret; 2419 struct regmap *map = context; 2420 void *work_val = map->work_buf + map->format.reg_bytes + 2421 map->format.pad_bytes; 2422 2423 if (!map->format.parse_val) 2424 return -EINVAL; 2425 2426 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); 2427 if (ret == 0) 2428 *val = map->format.parse_val(work_val); 2429 2430 return ret; 2431 } 2432 2433 static int _regmap_read(struct regmap *map, unsigned int reg, 2434 unsigned int *val) 2435 { 2436 int ret; 2437 void *context = _regmap_map_get_context(map); 2438 2439 if (!map->cache_bypass) { 2440 ret = regcache_read(map, reg, val); 2441 if (ret == 0) 2442 return 0; 2443 } 2444 2445 if (map->cache_only) 2446 return -EBUSY; 2447 2448 if (!regmap_readable(map, reg)) 2449 return -EIO; 2450 2451 ret = map->reg_read(context, reg, val); 2452 if (ret == 0) { 2453 #ifdef LOG_DEVICE 2454 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2455 dev_info(map->dev, "%x => %x\n", reg, *val); 2456 #endif 2457 2458 trace_regmap_reg_read(map, reg, *val); 2459 2460 if (!map->cache_bypass) 2461 regcache_write(map, reg, *val); 2462 } 2463 2464 return ret; 2465 } 2466 2467 /** 2468 * regmap_read() - Read a value from a single register 2469 * 2470 * @map: Register map to read from 2471 * @reg: Register to be read from 2472 * @val: Pointer to store read value 2473 * 2474 * A value of zero will be returned on success, a negative errno will 2475 * be returned in error cases. 2476 */ 2477 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2478 { 2479 int ret; 2480 2481 if (!IS_ALIGNED(reg, map->reg_stride)) 2482 return -EINVAL; 2483 2484 map->lock(map->lock_arg); 2485 2486 ret = _regmap_read(map, reg, val); 2487 2488 map->unlock(map->lock_arg); 2489 2490 return ret; 2491 } 2492 EXPORT_SYMBOL_GPL(regmap_read); 2493 2494 /** 2495 * regmap_raw_read() - Read raw data from the device 2496 * 2497 * @map: Register map to read from 2498 * @reg: First register to be read from 2499 * @val: Pointer to store read value 2500 * @val_len: Size of data to read 2501 * 2502 * A value of zero will be returned on success, a negative errno will 2503 * be returned in error cases. 2504 */ 2505 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2506 size_t val_len) 2507 { 2508 size_t val_bytes = map->format.val_bytes; 2509 size_t val_count = val_len / val_bytes; 2510 unsigned int v; 2511 int ret, i; 2512 2513 if (!map->bus) 2514 return -EINVAL; 2515 if (val_len % map->format.val_bytes) 2516 return -EINVAL; 2517 if (!IS_ALIGNED(reg, map->reg_stride)) 2518 return -EINVAL; 2519 if (val_count == 0) 2520 return -EINVAL; 2521 2522 map->lock(map->lock_arg); 2523 2524 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2525 map->cache_type == REGCACHE_NONE) { 2526 size_t chunk_count, chunk_bytes; 2527 size_t chunk_regs = val_count; 2528 2529 if (!map->bus->read) { 2530 ret = -ENOTSUPP; 2531 goto out; 2532 } 2533 2534 if (map->use_single_read) 2535 chunk_regs = 1; 2536 else if (map->max_raw_read && val_len > map->max_raw_read) 2537 chunk_regs = map->max_raw_read / val_bytes; 2538 2539 chunk_count = val_count / chunk_regs; 2540 chunk_bytes = chunk_regs * val_bytes; 2541 2542 /* Read bytes that fit into whole chunks */ 2543 for (i = 0; i < chunk_count; i++) { 2544 ret = _regmap_raw_read(map, reg, val, chunk_bytes); 2545 if (ret != 0) 2546 goto out; 2547 2548 reg += regmap_get_offset(map, chunk_regs); 2549 val += chunk_bytes; 2550 val_len -= chunk_bytes; 2551 } 2552 2553 /* Read remaining bytes */ 2554 if (val_len) { 2555 ret = _regmap_raw_read(map, reg, val, val_len); 2556 if (ret != 0) 2557 goto out; 2558 } 2559 } else { 2560 /* Otherwise go word by word for the cache; should be low 2561 * cost as we expect to hit the cache. 2562 */ 2563 for (i = 0; i < val_count; i++) { 2564 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2565 &v); 2566 if (ret != 0) 2567 goto out; 2568 2569 map->format.format_val(val + (i * val_bytes), v, 0); 2570 } 2571 } 2572 2573 out: 2574 map->unlock(map->lock_arg); 2575 2576 return ret; 2577 } 2578 EXPORT_SYMBOL_GPL(regmap_raw_read); 2579 2580 /** 2581 * regmap_noinc_read(): Read data from a register without incrementing the 2582 * register number 2583 * 2584 * @map: Register map to read from 2585 * @reg: Register to read from 2586 * @val: Pointer to data buffer 2587 * @val_len: Length of output buffer in bytes. 2588 * 2589 * The regmap API usually assumes that bulk bus read operations will read a 2590 * range of registers. Some devices have certain registers for which a read 2591 * operation read will read from an internal FIFO. 2592 * 2593 * The target register must be volatile but registers after it can be 2594 * completely unrelated cacheable registers. 2595 * 2596 * This will attempt multiple reads as required to read val_len bytes. 2597 * 2598 * A value of zero will be returned on success, a negative errno will be 2599 * returned in error cases. 2600 */ 2601 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2602 void *val, size_t val_len) 2603 { 2604 size_t read_len; 2605 int ret; 2606 2607 if (!map->bus) 2608 return -EINVAL; 2609 if (!map->bus->read) 2610 return -ENOTSUPP; 2611 if (val_len % map->format.val_bytes) 2612 return -EINVAL; 2613 if (!IS_ALIGNED(reg, map->reg_stride)) 2614 return -EINVAL; 2615 if (val_len == 0) 2616 return -EINVAL; 2617 2618 map->lock(map->lock_arg); 2619 2620 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2621 ret = -EINVAL; 2622 goto out_unlock; 2623 } 2624 2625 while (val_len) { 2626 if (map->max_raw_read && map->max_raw_read < val_len) 2627 read_len = map->max_raw_read; 2628 else 2629 read_len = val_len; 2630 ret = _regmap_raw_read(map, reg, val, read_len); 2631 if (ret) 2632 goto out_unlock; 2633 val = ((u8 *)val) + read_len; 2634 val_len -= read_len; 2635 } 2636 2637 out_unlock: 2638 map->unlock(map->lock_arg); 2639 return ret; 2640 } 2641 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2642 2643 /** 2644 * regmap_field_read(): Read a value to a single register field 2645 * 2646 * @field: Register field to read from 2647 * @val: Pointer to store read value 2648 * 2649 * A value of zero will be returned on success, a negative errno will 2650 * be returned in error cases. 2651 */ 2652 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2653 { 2654 int ret; 2655 unsigned int reg_val; 2656 ret = regmap_read(field->regmap, field->reg, ®_val); 2657 if (ret != 0) 2658 return ret; 2659 2660 reg_val &= field->mask; 2661 reg_val >>= field->shift; 2662 *val = reg_val; 2663 2664 return ret; 2665 } 2666 EXPORT_SYMBOL_GPL(regmap_field_read); 2667 2668 /** 2669 * regmap_fields_read() - Read a value to a single register field with port ID 2670 * 2671 * @field: Register field to read from 2672 * @id: port ID 2673 * @val: Pointer to store read value 2674 * 2675 * A value of zero will be returned on success, a negative errno will 2676 * be returned in error cases. 2677 */ 2678 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2679 unsigned int *val) 2680 { 2681 int ret; 2682 unsigned int reg_val; 2683 2684 if (id >= field->id_size) 2685 return -EINVAL; 2686 2687 ret = regmap_read(field->regmap, 2688 field->reg + (field->id_offset * id), 2689 ®_val); 2690 if (ret != 0) 2691 return ret; 2692 2693 reg_val &= field->mask; 2694 reg_val >>= field->shift; 2695 *val = reg_val; 2696 2697 return ret; 2698 } 2699 EXPORT_SYMBOL_GPL(regmap_fields_read); 2700 2701 /** 2702 * regmap_bulk_read() - Read multiple registers from the device 2703 * 2704 * @map: Register map to read from 2705 * @reg: First register to be read from 2706 * @val: Pointer to store read value, in native register size for device 2707 * @val_count: Number of registers to read 2708 * 2709 * A value of zero will be returned on success, a negative errno will 2710 * be returned in error cases. 2711 */ 2712 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2713 size_t val_count) 2714 { 2715 int ret, i; 2716 size_t val_bytes = map->format.val_bytes; 2717 bool vol = regmap_volatile_range(map, reg, val_count); 2718 2719 if (!IS_ALIGNED(reg, map->reg_stride)) 2720 return -EINVAL; 2721 if (val_count == 0) 2722 return -EINVAL; 2723 2724 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2725 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2726 if (ret != 0) 2727 return ret; 2728 2729 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2730 map->format.parse_inplace(val + i); 2731 } else { 2732 #ifdef CONFIG_64BIT 2733 u64 *u64 = val; 2734 #endif 2735 u32 *u32 = val; 2736 u16 *u16 = val; 2737 u8 *u8 = val; 2738 2739 map->lock(map->lock_arg); 2740 2741 for (i = 0; i < val_count; i++) { 2742 unsigned int ival; 2743 2744 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2745 &ival); 2746 if (ret != 0) 2747 goto out; 2748 2749 switch (map->format.val_bytes) { 2750 #ifdef CONFIG_64BIT 2751 case 8: 2752 u64[i] = ival; 2753 break; 2754 #endif 2755 case 4: 2756 u32[i] = ival; 2757 break; 2758 case 2: 2759 u16[i] = ival; 2760 break; 2761 case 1: 2762 u8[i] = ival; 2763 break; 2764 default: 2765 ret = -EINVAL; 2766 goto out; 2767 } 2768 } 2769 2770 out: 2771 map->unlock(map->lock_arg); 2772 } 2773 2774 return ret; 2775 } 2776 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2777 2778 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2779 unsigned int mask, unsigned int val, 2780 bool *change, bool force_write) 2781 { 2782 int ret; 2783 unsigned int tmp, orig; 2784 2785 if (change) 2786 *change = false; 2787 2788 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2789 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2790 if (ret == 0 && change) 2791 *change = true; 2792 } else { 2793 ret = _regmap_read(map, reg, &orig); 2794 if (ret != 0) 2795 return ret; 2796 2797 tmp = orig & ~mask; 2798 tmp |= val & mask; 2799 2800 if (force_write || (tmp != orig)) { 2801 ret = _regmap_write(map, reg, tmp); 2802 if (ret == 0 && change) 2803 *change = true; 2804 } 2805 } 2806 2807 return ret; 2808 } 2809 2810 /** 2811 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2812 * 2813 * @map: Register map to update 2814 * @reg: Register to update 2815 * @mask: Bitmask to change 2816 * @val: New value for bitmask 2817 * @change: Boolean indicating if a write was done 2818 * @async: Boolean indicating asynchronously 2819 * @force: Boolean indicating use force update 2820 * 2821 * Perform a read/modify/write cycle on a register map with change, async, force 2822 * options. 2823 * 2824 * If async is true: 2825 * 2826 * With most buses the read must be done synchronously so this is most useful 2827 * for devices with a cache which do not need to interact with the hardware to 2828 * determine the current register value. 2829 * 2830 * Returns zero for success, a negative number on error. 2831 */ 2832 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2833 unsigned int mask, unsigned int val, 2834 bool *change, bool async, bool force) 2835 { 2836 int ret; 2837 2838 map->lock(map->lock_arg); 2839 2840 map->async = async; 2841 2842 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2843 2844 map->async = false; 2845 2846 map->unlock(map->lock_arg); 2847 2848 return ret; 2849 } 2850 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2851 2852 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2853 { 2854 struct regmap *map = async->map; 2855 bool wake; 2856 2857 trace_regmap_async_io_complete(map); 2858 2859 spin_lock(&map->async_lock); 2860 list_move(&async->list, &map->async_free); 2861 wake = list_empty(&map->async_list); 2862 2863 if (ret != 0) 2864 map->async_ret = ret; 2865 2866 spin_unlock(&map->async_lock); 2867 2868 if (wake) 2869 wake_up(&map->async_waitq); 2870 } 2871 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2872 2873 static int regmap_async_is_done(struct regmap *map) 2874 { 2875 unsigned long flags; 2876 int ret; 2877 2878 spin_lock_irqsave(&map->async_lock, flags); 2879 ret = list_empty(&map->async_list); 2880 spin_unlock_irqrestore(&map->async_lock, flags); 2881 2882 return ret; 2883 } 2884 2885 /** 2886 * regmap_async_complete - Ensure all asynchronous I/O has completed. 2887 * 2888 * @map: Map to operate on. 2889 * 2890 * Blocks until any pending asynchronous I/O has completed. Returns 2891 * an error code for any failed I/O operations. 2892 */ 2893 int regmap_async_complete(struct regmap *map) 2894 { 2895 unsigned long flags; 2896 int ret; 2897 2898 /* Nothing to do with no async support */ 2899 if (!map->bus || !map->bus->async_write) 2900 return 0; 2901 2902 trace_regmap_async_complete_start(map); 2903 2904 wait_event(map->async_waitq, regmap_async_is_done(map)); 2905 2906 spin_lock_irqsave(&map->async_lock, flags); 2907 ret = map->async_ret; 2908 map->async_ret = 0; 2909 spin_unlock_irqrestore(&map->async_lock, flags); 2910 2911 trace_regmap_async_complete_done(map); 2912 2913 return ret; 2914 } 2915 EXPORT_SYMBOL_GPL(regmap_async_complete); 2916 2917 /** 2918 * regmap_register_patch - Register and apply register updates to be applied 2919 * on device initialistion 2920 * 2921 * @map: Register map to apply updates to. 2922 * @regs: Values to update. 2923 * @num_regs: Number of entries in regs. 2924 * 2925 * Register a set of register updates to be applied to the device 2926 * whenever the device registers are synchronised with the cache and 2927 * apply them immediately. Typically this is used to apply 2928 * corrections to be applied to the device defaults on startup, such 2929 * as the updates some vendors provide to undocumented registers. 2930 * 2931 * The caller must ensure that this function cannot be called 2932 * concurrently with either itself or regcache_sync(). 2933 */ 2934 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 2935 int num_regs) 2936 { 2937 struct reg_sequence *p; 2938 int ret; 2939 bool bypass; 2940 2941 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2942 num_regs)) 2943 return 0; 2944 2945 p = krealloc(map->patch, 2946 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 2947 GFP_KERNEL); 2948 if (p) { 2949 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2950 map->patch = p; 2951 map->patch_regs += num_regs; 2952 } else { 2953 return -ENOMEM; 2954 } 2955 2956 map->lock(map->lock_arg); 2957 2958 bypass = map->cache_bypass; 2959 2960 map->cache_bypass = true; 2961 map->async = true; 2962 2963 ret = _regmap_multi_reg_write(map, regs, num_regs); 2964 2965 map->async = false; 2966 map->cache_bypass = bypass; 2967 2968 map->unlock(map->lock_arg); 2969 2970 regmap_async_complete(map); 2971 2972 return ret; 2973 } 2974 EXPORT_SYMBOL_GPL(regmap_register_patch); 2975 2976 /** 2977 * regmap_get_val_bytes() - Report the size of a register value 2978 * 2979 * @map: Register map to operate on. 2980 * 2981 * Report the size of a register value, mainly intended to for use by 2982 * generic infrastructure built on top of regmap. 2983 */ 2984 int regmap_get_val_bytes(struct regmap *map) 2985 { 2986 if (map->format.format_write) 2987 return -EINVAL; 2988 2989 return map->format.val_bytes; 2990 } 2991 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2992 2993 /** 2994 * regmap_get_max_register() - Report the max register value 2995 * 2996 * @map: Register map to operate on. 2997 * 2998 * Report the max register value, mainly intended to for use by 2999 * generic infrastructure built on top of regmap. 3000 */ 3001 int regmap_get_max_register(struct regmap *map) 3002 { 3003 return map->max_register ? map->max_register : -EINVAL; 3004 } 3005 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3006 3007 /** 3008 * regmap_get_reg_stride() - Report the register address stride 3009 * 3010 * @map: Register map to operate on. 3011 * 3012 * Report the register address stride, mainly intended to for use by 3013 * generic infrastructure built on top of regmap. 3014 */ 3015 int regmap_get_reg_stride(struct regmap *map) 3016 { 3017 return map->reg_stride; 3018 } 3019 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3020 3021 int regmap_parse_val(struct regmap *map, const void *buf, 3022 unsigned int *val) 3023 { 3024 if (!map->format.parse_val) 3025 return -EINVAL; 3026 3027 *val = map->format.parse_val(buf); 3028 3029 return 0; 3030 } 3031 EXPORT_SYMBOL_GPL(regmap_parse_val); 3032 3033 static int __init regmap_initcall(void) 3034 { 3035 regmap_debugfs_initcall(); 3036 3037 return 0; 3038 } 3039 postcore_initcall(regmap_initcall); 3040