1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/of.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 #include <linux/log2.h> 23 24 #define CREATE_TRACE_POINTS 25 #include "trace.h" 26 27 #include "internal.h" 28 29 /* 30 * Sometimes for failures during very early init the trace 31 * infrastructure isn't available early enough to be used. For this 32 * sort of problem defining LOG_DEVICE will add printks for basic 33 * register I/O on a specific device. 34 */ 35 #undef LOG_DEVICE 36 37 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 38 unsigned int mask, unsigned int val, 39 bool *change, bool force_write); 40 41 static int _regmap_bus_reg_read(void *context, unsigned int reg, 42 unsigned int *val); 43 static int _regmap_bus_read(void *context, unsigned int reg, 44 unsigned int *val); 45 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 46 unsigned int val); 47 static int _regmap_bus_reg_write(void *context, unsigned int reg, 48 unsigned int val); 49 static int _regmap_bus_raw_write(void *context, unsigned int reg, 50 unsigned int val); 51 52 bool regmap_reg_in_ranges(unsigned int reg, 53 const struct regmap_range *ranges, 54 unsigned int nranges) 55 { 56 const struct regmap_range *r; 57 int i; 58 59 for (i = 0, r = ranges; i < nranges; i++, r++) 60 if (regmap_reg_in_range(reg, r)) 61 return true; 62 return false; 63 } 64 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 65 66 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 67 const struct regmap_access_table *table) 68 { 69 /* Check "no ranges" first */ 70 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 71 return false; 72 73 /* In case zero "yes ranges" are supplied, any reg is OK */ 74 if (!table->n_yes_ranges) 75 return true; 76 77 return regmap_reg_in_ranges(reg, table->yes_ranges, 78 table->n_yes_ranges); 79 } 80 EXPORT_SYMBOL_GPL(regmap_check_range_table); 81 82 bool regmap_writeable(struct regmap *map, unsigned int reg) 83 { 84 if (map->max_register && reg > map->max_register) 85 return false; 86 87 if (map->writeable_reg) 88 return map->writeable_reg(map->dev, reg); 89 90 if (map->wr_table) 91 return regmap_check_range_table(map, reg, map->wr_table); 92 93 return true; 94 } 95 96 bool regmap_cached(struct regmap *map, unsigned int reg) 97 { 98 int ret; 99 unsigned int val; 100 101 if (map->cache == REGCACHE_NONE) 102 return false; 103 104 if (!map->cache_ops) 105 return false; 106 107 if (map->max_register && reg > map->max_register) 108 return false; 109 110 map->lock(map->lock_arg); 111 ret = regcache_read(map, reg, &val); 112 map->unlock(map->lock_arg); 113 if (ret) 114 return false; 115 116 return true; 117 } 118 119 bool regmap_readable(struct regmap *map, unsigned int reg) 120 { 121 if (!map->reg_read) 122 return false; 123 124 if (map->max_register && reg > map->max_register) 125 return false; 126 127 if (map->format.format_write) 128 return false; 129 130 if (map->readable_reg) 131 return map->readable_reg(map->dev, reg); 132 133 if (map->rd_table) 134 return regmap_check_range_table(map, reg, map->rd_table); 135 136 return true; 137 } 138 139 bool regmap_volatile(struct regmap *map, unsigned int reg) 140 { 141 if (!map->format.format_write && !regmap_readable(map, reg)) 142 return false; 143 144 if (map->volatile_reg) 145 return map->volatile_reg(map->dev, reg); 146 147 if (map->volatile_table) 148 return regmap_check_range_table(map, reg, map->volatile_table); 149 150 if (map->cache_ops) 151 return false; 152 else 153 return true; 154 } 155 156 bool regmap_precious(struct regmap *map, unsigned int reg) 157 { 158 if (!regmap_readable(map, reg)) 159 return false; 160 161 if (map->precious_reg) 162 return map->precious_reg(map->dev, reg); 163 164 if (map->precious_table) 165 return regmap_check_range_table(map, reg, map->precious_table); 166 167 return false; 168 } 169 170 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 171 size_t num) 172 { 173 unsigned int i; 174 175 for (i = 0; i < num; i++) 176 if (!regmap_volatile(map, reg + i)) 177 return false; 178 179 return true; 180 } 181 182 static void regmap_format_2_6_write(struct regmap *map, 183 unsigned int reg, unsigned int val) 184 { 185 u8 *out = map->work_buf; 186 187 *out = (reg << 6) | val; 188 } 189 190 static void regmap_format_4_12_write(struct regmap *map, 191 unsigned int reg, unsigned int val) 192 { 193 __be16 *out = map->work_buf; 194 *out = cpu_to_be16((reg << 12) | val); 195 } 196 197 static void regmap_format_7_9_write(struct regmap *map, 198 unsigned int reg, unsigned int val) 199 { 200 __be16 *out = map->work_buf; 201 *out = cpu_to_be16((reg << 9) | val); 202 } 203 204 static void regmap_format_10_14_write(struct regmap *map, 205 unsigned int reg, unsigned int val) 206 { 207 u8 *out = map->work_buf; 208 209 out[2] = val; 210 out[1] = (val >> 8) | (reg << 6); 211 out[0] = reg >> 2; 212 } 213 214 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 215 { 216 u8 *b = buf; 217 218 b[0] = val << shift; 219 } 220 221 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 222 { 223 __be16 *b = buf; 224 225 b[0] = cpu_to_be16(val << shift); 226 } 227 228 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 229 { 230 __le16 *b = buf; 231 232 b[0] = cpu_to_le16(val << shift); 233 } 234 235 static void regmap_format_16_native(void *buf, unsigned int val, 236 unsigned int shift) 237 { 238 *(u16 *)buf = val << shift; 239 } 240 241 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 242 { 243 u8 *b = buf; 244 245 val <<= shift; 246 247 b[0] = val >> 16; 248 b[1] = val >> 8; 249 b[2] = val; 250 } 251 252 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 253 { 254 __be32 *b = buf; 255 256 b[0] = cpu_to_be32(val << shift); 257 } 258 259 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 260 { 261 __le32 *b = buf; 262 263 b[0] = cpu_to_le32(val << shift); 264 } 265 266 static void regmap_format_32_native(void *buf, unsigned int val, 267 unsigned int shift) 268 { 269 *(u32 *)buf = val << shift; 270 } 271 272 #ifdef CONFIG_64BIT 273 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 274 { 275 __be64 *b = buf; 276 277 b[0] = cpu_to_be64((u64)val << shift); 278 } 279 280 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 281 { 282 __le64 *b = buf; 283 284 b[0] = cpu_to_le64((u64)val << shift); 285 } 286 287 static void regmap_format_64_native(void *buf, unsigned int val, 288 unsigned int shift) 289 { 290 *(u64 *)buf = (u64)val << shift; 291 } 292 #endif 293 294 static void regmap_parse_inplace_noop(void *buf) 295 { 296 } 297 298 static unsigned int regmap_parse_8(const void *buf) 299 { 300 const u8 *b = buf; 301 302 return b[0]; 303 } 304 305 static unsigned int regmap_parse_16_be(const void *buf) 306 { 307 const __be16 *b = buf; 308 309 return be16_to_cpu(b[0]); 310 } 311 312 static unsigned int regmap_parse_16_le(const void *buf) 313 { 314 const __le16 *b = buf; 315 316 return le16_to_cpu(b[0]); 317 } 318 319 static void regmap_parse_16_be_inplace(void *buf) 320 { 321 __be16 *b = buf; 322 323 b[0] = be16_to_cpu(b[0]); 324 } 325 326 static void regmap_parse_16_le_inplace(void *buf) 327 { 328 __le16 *b = buf; 329 330 b[0] = le16_to_cpu(b[0]); 331 } 332 333 static unsigned int regmap_parse_16_native(const void *buf) 334 { 335 return *(u16 *)buf; 336 } 337 338 static unsigned int regmap_parse_24(const void *buf) 339 { 340 const u8 *b = buf; 341 unsigned int ret = b[2]; 342 ret |= ((unsigned int)b[1]) << 8; 343 ret |= ((unsigned int)b[0]) << 16; 344 345 return ret; 346 } 347 348 static unsigned int regmap_parse_32_be(const void *buf) 349 { 350 const __be32 *b = buf; 351 352 return be32_to_cpu(b[0]); 353 } 354 355 static unsigned int regmap_parse_32_le(const void *buf) 356 { 357 const __le32 *b = buf; 358 359 return le32_to_cpu(b[0]); 360 } 361 362 static void regmap_parse_32_be_inplace(void *buf) 363 { 364 __be32 *b = buf; 365 366 b[0] = be32_to_cpu(b[0]); 367 } 368 369 static void regmap_parse_32_le_inplace(void *buf) 370 { 371 __le32 *b = buf; 372 373 b[0] = le32_to_cpu(b[0]); 374 } 375 376 static unsigned int regmap_parse_32_native(const void *buf) 377 { 378 return *(u32 *)buf; 379 } 380 381 #ifdef CONFIG_64BIT 382 static unsigned int regmap_parse_64_be(const void *buf) 383 { 384 const __be64 *b = buf; 385 386 return be64_to_cpu(b[0]); 387 } 388 389 static unsigned int regmap_parse_64_le(const void *buf) 390 { 391 const __le64 *b = buf; 392 393 return le64_to_cpu(b[0]); 394 } 395 396 static void regmap_parse_64_be_inplace(void *buf) 397 { 398 __be64 *b = buf; 399 400 b[0] = be64_to_cpu(b[0]); 401 } 402 403 static void regmap_parse_64_le_inplace(void *buf) 404 { 405 __le64 *b = buf; 406 407 b[0] = le64_to_cpu(b[0]); 408 } 409 410 static unsigned int regmap_parse_64_native(const void *buf) 411 { 412 return *(u64 *)buf; 413 } 414 #endif 415 416 static void regmap_lock_mutex(void *__map) 417 { 418 struct regmap *map = __map; 419 mutex_lock(&map->mutex); 420 } 421 422 static void regmap_unlock_mutex(void *__map) 423 { 424 struct regmap *map = __map; 425 mutex_unlock(&map->mutex); 426 } 427 428 static void regmap_lock_spinlock(void *__map) 429 __acquires(&map->spinlock) 430 { 431 struct regmap *map = __map; 432 unsigned long flags; 433 434 spin_lock_irqsave(&map->spinlock, flags); 435 map->spinlock_flags = flags; 436 } 437 438 static void regmap_unlock_spinlock(void *__map) 439 __releases(&map->spinlock) 440 { 441 struct regmap *map = __map; 442 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 443 } 444 445 static void dev_get_regmap_release(struct device *dev, void *res) 446 { 447 /* 448 * We don't actually have anything to do here; the goal here 449 * is not to manage the regmap but to provide a simple way to 450 * get the regmap back given a struct device. 451 */ 452 } 453 454 static bool _regmap_range_add(struct regmap *map, 455 struct regmap_range_node *data) 456 { 457 struct rb_root *root = &map->range_tree; 458 struct rb_node **new = &(root->rb_node), *parent = NULL; 459 460 while (*new) { 461 struct regmap_range_node *this = 462 container_of(*new, struct regmap_range_node, node); 463 464 parent = *new; 465 if (data->range_max < this->range_min) 466 new = &((*new)->rb_left); 467 else if (data->range_min > this->range_max) 468 new = &((*new)->rb_right); 469 else 470 return false; 471 } 472 473 rb_link_node(&data->node, parent, new); 474 rb_insert_color(&data->node, root); 475 476 return true; 477 } 478 479 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 480 unsigned int reg) 481 { 482 struct rb_node *node = map->range_tree.rb_node; 483 484 while (node) { 485 struct regmap_range_node *this = 486 container_of(node, struct regmap_range_node, node); 487 488 if (reg < this->range_min) 489 node = node->rb_left; 490 else if (reg > this->range_max) 491 node = node->rb_right; 492 else 493 return this; 494 } 495 496 return NULL; 497 } 498 499 static void regmap_range_exit(struct regmap *map) 500 { 501 struct rb_node *next; 502 struct regmap_range_node *range_node; 503 504 next = rb_first(&map->range_tree); 505 while (next) { 506 range_node = rb_entry(next, struct regmap_range_node, node); 507 next = rb_next(&range_node->node); 508 rb_erase(&range_node->node, &map->range_tree); 509 kfree(range_node); 510 } 511 512 kfree(map->selector_work_buf); 513 } 514 515 int regmap_attach_dev(struct device *dev, struct regmap *map, 516 const struct regmap_config *config) 517 { 518 struct regmap **m; 519 520 map->dev = dev; 521 522 regmap_debugfs_init(map, config->name); 523 524 /* Add a devres resource for dev_get_regmap() */ 525 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 526 if (!m) { 527 regmap_debugfs_exit(map); 528 return -ENOMEM; 529 } 530 *m = map; 531 devres_add(dev, m); 532 533 return 0; 534 } 535 EXPORT_SYMBOL_GPL(regmap_attach_dev); 536 537 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 538 const struct regmap_config *config) 539 { 540 enum regmap_endian endian; 541 542 /* Retrieve the endianness specification from the regmap config */ 543 endian = config->reg_format_endian; 544 545 /* If the regmap config specified a non-default value, use that */ 546 if (endian != REGMAP_ENDIAN_DEFAULT) 547 return endian; 548 549 /* Retrieve the endianness specification from the bus config */ 550 if (bus && bus->reg_format_endian_default) 551 endian = bus->reg_format_endian_default; 552 553 /* If the bus specified a non-default value, use that */ 554 if (endian != REGMAP_ENDIAN_DEFAULT) 555 return endian; 556 557 /* Use this if no other value was found */ 558 return REGMAP_ENDIAN_BIG; 559 } 560 561 enum regmap_endian regmap_get_val_endian(struct device *dev, 562 const struct regmap_bus *bus, 563 const struct regmap_config *config) 564 { 565 struct device_node *np; 566 enum regmap_endian endian; 567 568 /* Retrieve the endianness specification from the regmap config */ 569 endian = config->val_format_endian; 570 571 /* If the regmap config specified a non-default value, use that */ 572 if (endian != REGMAP_ENDIAN_DEFAULT) 573 return endian; 574 575 /* If the dev and dev->of_node exist try to get endianness from DT */ 576 if (dev && dev->of_node) { 577 np = dev->of_node; 578 579 /* Parse the device's DT node for an endianness specification */ 580 if (of_property_read_bool(np, "big-endian")) 581 endian = REGMAP_ENDIAN_BIG; 582 else if (of_property_read_bool(np, "little-endian")) 583 endian = REGMAP_ENDIAN_LITTLE; 584 else if (of_property_read_bool(np, "native-endian")) 585 endian = REGMAP_ENDIAN_NATIVE; 586 587 /* If the endianness was specified in DT, use that */ 588 if (endian != REGMAP_ENDIAN_DEFAULT) 589 return endian; 590 } 591 592 /* Retrieve the endianness specification from the bus config */ 593 if (bus && bus->val_format_endian_default) 594 endian = bus->val_format_endian_default; 595 596 /* If the bus specified a non-default value, use that */ 597 if (endian != REGMAP_ENDIAN_DEFAULT) 598 return endian; 599 600 /* Use this if no other value was found */ 601 return REGMAP_ENDIAN_BIG; 602 } 603 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 604 605 struct regmap *__regmap_init(struct device *dev, 606 const struct regmap_bus *bus, 607 void *bus_context, 608 const struct regmap_config *config, 609 struct lock_class_key *lock_key, 610 const char *lock_name) 611 { 612 struct regmap *map; 613 int ret = -EINVAL; 614 enum regmap_endian reg_endian, val_endian; 615 int i, j; 616 617 if (!config) 618 goto err; 619 620 map = kzalloc(sizeof(*map), GFP_KERNEL); 621 if (map == NULL) { 622 ret = -ENOMEM; 623 goto err; 624 } 625 626 if (config->lock && config->unlock) { 627 map->lock = config->lock; 628 map->unlock = config->unlock; 629 map->lock_arg = config->lock_arg; 630 } else { 631 if ((bus && bus->fast_io) || 632 config->fast_io) { 633 spin_lock_init(&map->spinlock); 634 map->lock = regmap_lock_spinlock; 635 map->unlock = regmap_unlock_spinlock; 636 lockdep_set_class_and_name(&map->spinlock, 637 lock_key, lock_name); 638 } else { 639 mutex_init(&map->mutex); 640 map->lock = regmap_lock_mutex; 641 map->unlock = regmap_unlock_mutex; 642 lockdep_set_class_and_name(&map->mutex, 643 lock_key, lock_name); 644 } 645 map->lock_arg = map; 646 } 647 648 /* 649 * When we write in fast-paths with regmap_bulk_write() don't allocate 650 * scratch buffers with sleeping allocations. 651 */ 652 if ((bus && bus->fast_io) || config->fast_io) 653 map->alloc_flags = GFP_ATOMIC; 654 else 655 map->alloc_flags = GFP_KERNEL; 656 657 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 658 map->format.pad_bytes = config->pad_bits / 8; 659 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 660 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 661 config->val_bits + config->pad_bits, 8); 662 map->reg_shift = config->pad_bits % 8; 663 if (config->reg_stride) 664 map->reg_stride = config->reg_stride; 665 else 666 map->reg_stride = 1; 667 if (is_power_of_2(map->reg_stride)) 668 map->reg_stride_order = ilog2(map->reg_stride); 669 else 670 map->reg_stride_order = -1; 671 map->use_single_read = config->use_single_rw || !bus || !bus->read; 672 map->use_single_write = config->use_single_rw || !bus || !bus->write; 673 map->can_multi_write = config->can_multi_write && bus && bus->write; 674 if (bus) { 675 map->max_raw_read = bus->max_raw_read; 676 map->max_raw_write = bus->max_raw_write; 677 } 678 map->dev = dev; 679 map->bus = bus; 680 map->bus_context = bus_context; 681 map->max_register = config->max_register; 682 map->wr_table = config->wr_table; 683 map->rd_table = config->rd_table; 684 map->volatile_table = config->volatile_table; 685 map->precious_table = config->precious_table; 686 map->writeable_reg = config->writeable_reg; 687 map->readable_reg = config->readable_reg; 688 map->volatile_reg = config->volatile_reg; 689 map->precious_reg = config->precious_reg; 690 map->cache_type = config->cache_type; 691 map->name = config->name; 692 693 spin_lock_init(&map->async_lock); 694 INIT_LIST_HEAD(&map->async_list); 695 INIT_LIST_HEAD(&map->async_free); 696 init_waitqueue_head(&map->async_waitq); 697 698 if (config->read_flag_mask || config->write_flag_mask) { 699 map->read_flag_mask = config->read_flag_mask; 700 map->write_flag_mask = config->write_flag_mask; 701 } else if (bus) { 702 map->read_flag_mask = bus->read_flag_mask; 703 } 704 705 if (!bus) { 706 map->reg_read = config->reg_read; 707 map->reg_write = config->reg_write; 708 709 map->defer_caching = false; 710 goto skip_format_initialization; 711 } else if (!bus->read || !bus->write) { 712 map->reg_read = _regmap_bus_reg_read; 713 map->reg_write = _regmap_bus_reg_write; 714 715 map->defer_caching = false; 716 goto skip_format_initialization; 717 } else { 718 map->reg_read = _regmap_bus_read; 719 map->reg_update_bits = bus->reg_update_bits; 720 } 721 722 reg_endian = regmap_get_reg_endian(bus, config); 723 val_endian = regmap_get_val_endian(dev, bus, config); 724 725 switch (config->reg_bits + map->reg_shift) { 726 case 2: 727 switch (config->val_bits) { 728 case 6: 729 map->format.format_write = regmap_format_2_6_write; 730 break; 731 default: 732 goto err_map; 733 } 734 break; 735 736 case 4: 737 switch (config->val_bits) { 738 case 12: 739 map->format.format_write = regmap_format_4_12_write; 740 break; 741 default: 742 goto err_map; 743 } 744 break; 745 746 case 7: 747 switch (config->val_bits) { 748 case 9: 749 map->format.format_write = regmap_format_7_9_write; 750 break; 751 default: 752 goto err_map; 753 } 754 break; 755 756 case 10: 757 switch (config->val_bits) { 758 case 14: 759 map->format.format_write = regmap_format_10_14_write; 760 break; 761 default: 762 goto err_map; 763 } 764 break; 765 766 case 8: 767 map->format.format_reg = regmap_format_8; 768 break; 769 770 case 16: 771 switch (reg_endian) { 772 case REGMAP_ENDIAN_BIG: 773 map->format.format_reg = regmap_format_16_be; 774 break; 775 case REGMAP_ENDIAN_LITTLE: 776 map->format.format_reg = regmap_format_16_le; 777 break; 778 case REGMAP_ENDIAN_NATIVE: 779 map->format.format_reg = regmap_format_16_native; 780 break; 781 default: 782 goto err_map; 783 } 784 break; 785 786 case 24: 787 if (reg_endian != REGMAP_ENDIAN_BIG) 788 goto err_map; 789 map->format.format_reg = regmap_format_24; 790 break; 791 792 case 32: 793 switch (reg_endian) { 794 case REGMAP_ENDIAN_BIG: 795 map->format.format_reg = regmap_format_32_be; 796 break; 797 case REGMAP_ENDIAN_LITTLE: 798 map->format.format_reg = regmap_format_32_le; 799 break; 800 case REGMAP_ENDIAN_NATIVE: 801 map->format.format_reg = regmap_format_32_native; 802 break; 803 default: 804 goto err_map; 805 } 806 break; 807 808 #ifdef CONFIG_64BIT 809 case 64: 810 switch (reg_endian) { 811 case REGMAP_ENDIAN_BIG: 812 map->format.format_reg = regmap_format_64_be; 813 break; 814 case REGMAP_ENDIAN_LITTLE: 815 map->format.format_reg = regmap_format_64_le; 816 break; 817 case REGMAP_ENDIAN_NATIVE: 818 map->format.format_reg = regmap_format_64_native; 819 break; 820 default: 821 goto err_map; 822 } 823 break; 824 #endif 825 826 default: 827 goto err_map; 828 } 829 830 if (val_endian == REGMAP_ENDIAN_NATIVE) 831 map->format.parse_inplace = regmap_parse_inplace_noop; 832 833 switch (config->val_bits) { 834 case 8: 835 map->format.format_val = regmap_format_8; 836 map->format.parse_val = regmap_parse_8; 837 map->format.parse_inplace = regmap_parse_inplace_noop; 838 break; 839 case 16: 840 switch (val_endian) { 841 case REGMAP_ENDIAN_BIG: 842 map->format.format_val = regmap_format_16_be; 843 map->format.parse_val = regmap_parse_16_be; 844 map->format.parse_inplace = regmap_parse_16_be_inplace; 845 break; 846 case REGMAP_ENDIAN_LITTLE: 847 map->format.format_val = regmap_format_16_le; 848 map->format.parse_val = regmap_parse_16_le; 849 map->format.parse_inplace = regmap_parse_16_le_inplace; 850 break; 851 case REGMAP_ENDIAN_NATIVE: 852 map->format.format_val = regmap_format_16_native; 853 map->format.parse_val = regmap_parse_16_native; 854 break; 855 default: 856 goto err_map; 857 } 858 break; 859 case 24: 860 if (val_endian != REGMAP_ENDIAN_BIG) 861 goto err_map; 862 map->format.format_val = regmap_format_24; 863 map->format.parse_val = regmap_parse_24; 864 break; 865 case 32: 866 switch (val_endian) { 867 case REGMAP_ENDIAN_BIG: 868 map->format.format_val = regmap_format_32_be; 869 map->format.parse_val = regmap_parse_32_be; 870 map->format.parse_inplace = regmap_parse_32_be_inplace; 871 break; 872 case REGMAP_ENDIAN_LITTLE: 873 map->format.format_val = regmap_format_32_le; 874 map->format.parse_val = regmap_parse_32_le; 875 map->format.parse_inplace = regmap_parse_32_le_inplace; 876 break; 877 case REGMAP_ENDIAN_NATIVE: 878 map->format.format_val = regmap_format_32_native; 879 map->format.parse_val = regmap_parse_32_native; 880 break; 881 default: 882 goto err_map; 883 } 884 break; 885 #ifdef CONFIG_64BIT 886 case 64: 887 switch (val_endian) { 888 case REGMAP_ENDIAN_BIG: 889 map->format.format_val = regmap_format_64_be; 890 map->format.parse_val = regmap_parse_64_be; 891 map->format.parse_inplace = regmap_parse_64_be_inplace; 892 break; 893 case REGMAP_ENDIAN_LITTLE: 894 map->format.format_val = regmap_format_64_le; 895 map->format.parse_val = regmap_parse_64_le; 896 map->format.parse_inplace = regmap_parse_64_le_inplace; 897 break; 898 case REGMAP_ENDIAN_NATIVE: 899 map->format.format_val = regmap_format_64_native; 900 map->format.parse_val = regmap_parse_64_native; 901 break; 902 default: 903 goto err_map; 904 } 905 break; 906 #endif 907 } 908 909 if (map->format.format_write) { 910 if ((reg_endian != REGMAP_ENDIAN_BIG) || 911 (val_endian != REGMAP_ENDIAN_BIG)) 912 goto err_map; 913 map->use_single_write = true; 914 } 915 916 if (!map->format.format_write && 917 !(map->format.format_reg && map->format.format_val)) 918 goto err_map; 919 920 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 921 if (map->work_buf == NULL) { 922 ret = -ENOMEM; 923 goto err_map; 924 } 925 926 if (map->format.format_write) { 927 map->defer_caching = false; 928 map->reg_write = _regmap_bus_formatted_write; 929 } else if (map->format.format_val) { 930 map->defer_caching = true; 931 map->reg_write = _regmap_bus_raw_write; 932 } 933 934 skip_format_initialization: 935 936 map->range_tree = RB_ROOT; 937 for (i = 0; i < config->num_ranges; i++) { 938 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 939 struct regmap_range_node *new; 940 941 /* Sanity check */ 942 if (range_cfg->range_max < range_cfg->range_min) { 943 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 944 range_cfg->range_max, range_cfg->range_min); 945 goto err_range; 946 } 947 948 if (range_cfg->range_max > map->max_register) { 949 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 950 range_cfg->range_max, map->max_register); 951 goto err_range; 952 } 953 954 if (range_cfg->selector_reg > map->max_register) { 955 dev_err(map->dev, 956 "Invalid range %d: selector out of map\n", i); 957 goto err_range; 958 } 959 960 if (range_cfg->window_len == 0) { 961 dev_err(map->dev, "Invalid range %d: window_len 0\n", 962 i); 963 goto err_range; 964 } 965 966 /* Make sure, that this register range has no selector 967 or data window within its boundary */ 968 for (j = 0; j < config->num_ranges; j++) { 969 unsigned sel_reg = config->ranges[j].selector_reg; 970 unsigned win_min = config->ranges[j].window_start; 971 unsigned win_max = win_min + 972 config->ranges[j].window_len - 1; 973 974 /* Allow data window inside its own virtual range */ 975 if (j == i) 976 continue; 977 978 if (range_cfg->range_min <= sel_reg && 979 sel_reg <= range_cfg->range_max) { 980 dev_err(map->dev, 981 "Range %d: selector for %d in window\n", 982 i, j); 983 goto err_range; 984 } 985 986 if (!(win_max < range_cfg->range_min || 987 win_min > range_cfg->range_max)) { 988 dev_err(map->dev, 989 "Range %d: window for %d in window\n", 990 i, j); 991 goto err_range; 992 } 993 } 994 995 new = kzalloc(sizeof(*new), GFP_KERNEL); 996 if (new == NULL) { 997 ret = -ENOMEM; 998 goto err_range; 999 } 1000 1001 new->map = map; 1002 new->name = range_cfg->name; 1003 new->range_min = range_cfg->range_min; 1004 new->range_max = range_cfg->range_max; 1005 new->selector_reg = range_cfg->selector_reg; 1006 new->selector_mask = range_cfg->selector_mask; 1007 new->selector_shift = range_cfg->selector_shift; 1008 new->window_start = range_cfg->window_start; 1009 new->window_len = range_cfg->window_len; 1010 1011 if (!_regmap_range_add(map, new)) { 1012 dev_err(map->dev, "Failed to add range %d\n", i); 1013 kfree(new); 1014 goto err_range; 1015 } 1016 1017 if (map->selector_work_buf == NULL) { 1018 map->selector_work_buf = 1019 kzalloc(map->format.buf_size, GFP_KERNEL); 1020 if (map->selector_work_buf == NULL) { 1021 ret = -ENOMEM; 1022 goto err_range; 1023 } 1024 } 1025 } 1026 1027 ret = regcache_init(map, config); 1028 if (ret != 0) 1029 goto err_range; 1030 1031 if (dev) { 1032 ret = regmap_attach_dev(dev, map, config); 1033 if (ret != 0) 1034 goto err_regcache; 1035 } 1036 1037 return map; 1038 1039 err_regcache: 1040 regcache_exit(map); 1041 err_range: 1042 regmap_range_exit(map); 1043 kfree(map->work_buf); 1044 err_map: 1045 kfree(map); 1046 err: 1047 return ERR_PTR(ret); 1048 } 1049 EXPORT_SYMBOL_GPL(__regmap_init); 1050 1051 static void devm_regmap_release(struct device *dev, void *res) 1052 { 1053 regmap_exit(*(struct regmap **)res); 1054 } 1055 1056 struct regmap *__devm_regmap_init(struct device *dev, 1057 const struct regmap_bus *bus, 1058 void *bus_context, 1059 const struct regmap_config *config, 1060 struct lock_class_key *lock_key, 1061 const char *lock_name) 1062 { 1063 struct regmap **ptr, *regmap; 1064 1065 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1066 if (!ptr) 1067 return ERR_PTR(-ENOMEM); 1068 1069 regmap = __regmap_init(dev, bus, bus_context, config, 1070 lock_key, lock_name); 1071 if (!IS_ERR(regmap)) { 1072 *ptr = regmap; 1073 devres_add(dev, ptr); 1074 } else { 1075 devres_free(ptr); 1076 } 1077 1078 return regmap; 1079 } 1080 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1081 1082 static void regmap_field_init(struct regmap_field *rm_field, 1083 struct regmap *regmap, struct reg_field reg_field) 1084 { 1085 rm_field->regmap = regmap; 1086 rm_field->reg = reg_field.reg; 1087 rm_field->shift = reg_field.lsb; 1088 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1089 rm_field->id_size = reg_field.id_size; 1090 rm_field->id_offset = reg_field.id_offset; 1091 } 1092 1093 /** 1094 * devm_regmap_field_alloc(): Allocate and initialise a register field 1095 * in a register map. 1096 * 1097 * @dev: Device that will be interacted with 1098 * @regmap: regmap bank in which this register field is located. 1099 * @reg_field: Register field with in the bank. 1100 * 1101 * The return value will be an ERR_PTR() on error or a valid pointer 1102 * to a struct regmap_field. The regmap_field will be automatically freed 1103 * by the device management code. 1104 */ 1105 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1106 struct regmap *regmap, struct reg_field reg_field) 1107 { 1108 struct regmap_field *rm_field = devm_kzalloc(dev, 1109 sizeof(*rm_field), GFP_KERNEL); 1110 if (!rm_field) 1111 return ERR_PTR(-ENOMEM); 1112 1113 regmap_field_init(rm_field, regmap, reg_field); 1114 1115 return rm_field; 1116 1117 } 1118 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1119 1120 /** 1121 * devm_regmap_field_free(): Free register field allocated using 1122 * devm_regmap_field_alloc. Usally drivers need not call this function, 1123 * as the memory allocated via devm will be freed as per device-driver 1124 * life-cyle. 1125 * 1126 * @dev: Device that will be interacted with 1127 * @field: regmap field which should be freed. 1128 */ 1129 void devm_regmap_field_free(struct device *dev, 1130 struct regmap_field *field) 1131 { 1132 devm_kfree(dev, field); 1133 } 1134 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1135 1136 /** 1137 * regmap_field_alloc(): Allocate and initialise a register field 1138 * in a register map. 1139 * 1140 * @regmap: regmap bank in which this register field is located. 1141 * @reg_field: Register field with in the bank. 1142 * 1143 * The return value will be an ERR_PTR() on error or a valid pointer 1144 * to a struct regmap_field. The regmap_field should be freed by the 1145 * user once its finished working with it using regmap_field_free(). 1146 */ 1147 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1148 struct reg_field reg_field) 1149 { 1150 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1151 1152 if (!rm_field) 1153 return ERR_PTR(-ENOMEM); 1154 1155 regmap_field_init(rm_field, regmap, reg_field); 1156 1157 return rm_field; 1158 } 1159 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1160 1161 /** 1162 * regmap_field_free(): Free register field allocated using regmap_field_alloc 1163 * 1164 * @field: regmap field which should be freed. 1165 */ 1166 void regmap_field_free(struct regmap_field *field) 1167 { 1168 kfree(field); 1169 } 1170 EXPORT_SYMBOL_GPL(regmap_field_free); 1171 1172 /** 1173 * regmap_reinit_cache(): Reinitialise the current register cache 1174 * 1175 * @map: Register map to operate on. 1176 * @config: New configuration. Only the cache data will be used. 1177 * 1178 * Discard any existing register cache for the map and initialize a 1179 * new cache. This can be used to restore the cache to defaults or to 1180 * update the cache configuration to reflect runtime discovery of the 1181 * hardware. 1182 * 1183 * No explicit locking is done here, the user needs to ensure that 1184 * this function will not race with other calls to regmap. 1185 */ 1186 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1187 { 1188 regcache_exit(map); 1189 regmap_debugfs_exit(map); 1190 1191 map->max_register = config->max_register; 1192 map->writeable_reg = config->writeable_reg; 1193 map->readable_reg = config->readable_reg; 1194 map->volatile_reg = config->volatile_reg; 1195 map->precious_reg = config->precious_reg; 1196 map->cache_type = config->cache_type; 1197 1198 regmap_debugfs_init(map, config->name); 1199 1200 map->cache_bypass = false; 1201 map->cache_only = false; 1202 1203 return regcache_init(map, config); 1204 } 1205 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1206 1207 /** 1208 * regmap_exit(): Free a previously allocated register map 1209 */ 1210 void regmap_exit(struct regmap *map) 1211 { 1212 struct regmap_async *async; 1213 1214 regcache_exit(map); 1215 regmap_debugfs_exit(map); 1216 regmap_range_exit(map); 1217 if (map->bus && map->bus->free_context) 1218 map->bus->free_context(map->bus_context); 1219 kfree(map->work_buf); 1220 while (!list_empty(&map->async_free)) { 1221 async = list_first_entry_or_null(&map->async_free, 1222 struct regmap_async, 1223 list); 1224 list_del(&async->list); 1225 kfree(async->work_buf); 1226 kfree(async); 1227 } 1228 kfree(map); 1229 } 1230 EXPORT_SYMBOL_GPL(regmap_exit); 1231 1232 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1233 { 1234 struct regmap **r = res; 1235 if (!r || !*r) { 1236 WARN_ON(!r || !*r); 1237 return 0; 1238 } 1239 1240 /* If the user didn't specify a name match any */ 1241 if (data) 1242 return (*r)->name == data; 1243 else 1244 return 1; 1245 } 1246 1247 /** 1248 * dev_get_regmap(): Obtain the regmap (if any) for a device 1249 * 1250 * @dev: Device to retrieve the map for 1251 * @name: Optional name for the register map, usually NULL. 1252 * 1253 * Returns the regmap for the device if one is present, or NULL. If 1254 * name is specified then it must match the name specified when 1255 * registering the device, if it is NULL then the first regmap found 1256 * will be used. Devices with multiple register maps are very rare, 1257 * generic code should normally not need to specify a name. 1258 */ 1259 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1260 { 1261 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1262 dev_get_regmap_match, (void *)name); 1263 1264 if (!r) 1265 return NULL; 1266 return *r; 1267 } 1268 EXPORT_SYMBOL_GPL(dev_get_regmap); 1269 1270 /** 1271 * regmap_get_device(): Obtain the device from a regmap 1272 * 1273 * @map: Register map to operate on. 1274 * 1275 * Returns the underlying device that the regmap has been created for. 1276 */ 1277 struct device *regmap_get_device(struct regmap *map) 1278 { 1279 return map->dev; 1280 } 1281 EXPORT_SYMBOL_GPL(regmap_get_device); 1282 1283 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1284 struct regmap_range_node *range, 1285 unsigned int val_num) 1286 { 1287 void *orig_work_buf; 1288 unsigned int win_offset; 1289 unsigned int win_page; 1290 bool page_chg; 1291 int ret; 1292 1293 win_offset = (*reg - range->range_min) % range->window_len; 1294 win_page = (*reg - range->range_min) / range->window_len; 1295 1296 if (val_num > 1) { 1297 /* Bulk write shouldn't cross range boundary */ 1298 if (*reg + val_num - 1 > range->range_max) 1299 return -EINVAL; 1300 1301 /* ... or single page boundary */ 1302 if (val_num > range->window_len - win_offset) 1303 return -EINVAL; 1304 } 1305 1306 /* It is possible to have selector register inside data window. 1307 In that case, selector register is located on every page and 1308 it needs no page switching, when accessed alone. */ 1309 if (val_num > 1 || 1310 range->window_start + win_offset != range->selector_reg) { 1311 /* Use separate work_buf during page switching */ 1312 orig_work_buf = map->work_buf; 1313 map->work_buf = map->selector_work_buf; 1314 1315 ret = _regmap_update_bits(map, range->selector_reg, 1316 range->selector_mask, 1317 win_page << range->selector_shift, 1318 &page_chg, false); 1319 1320 map->work_buf = orig_work_buf; 1321 1322 if (ret != 0) 1323 return ret; 1324 } 1325 1326 *reg = range->window_start + win_offset; 1327 1328 return 0; 1329 } 1330 1331 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1332 unsigned long mask) 1333 { 1334 u8 *buf; 1335 int i; 1336 1337 if (!mask || !map->work_buf) 1338 return; 1339 1340 buf = map->work_buf; 1341 1342 for (i = 0; i < max_bytes; i++) 1343 buf[i] |= (mask >> (8 * i)) & 0xff; 1344 } 1345 1346 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1347 const void *val, size_t val_len) 1348 { 1349 struct regmap_range_node *range; 1350 unsigned long flags; 1351 void *work_val = map->work_buf + map->format.reg_bytes + 1352 map->format.pad_bytes; 1353 void *buf; 1354 int ret = -ENOTSUPP; 1355 size_t len; 1356 int i; 1357 1358 WARN_ON(!map->bus); 1359 1360 /* Check for unwritable registers before we start */ 1361 if (map->writeable_reg) 1362 for (i = 0; i < val_len / map->format.val_bytes; i++) 1363 if (!map->writeable_reg(map->dev, 1364 reg + regmap_get_offset(map, i))) 1365 return -EINVAL; 1366 1367 if (!map->cache_bypass && map->format.parse_val) { 1368 unsigned int ival; 1369 int val_bytes = map->format.val_bytes; 1370 for (i = 0; i < val_len / val_bytes; i++) { 1371 ival = map->format.parse_val(val + (i * val_bytes)); 1372 ret = regcache_write(map, 1373 reg + regmap_get_offset(map, i), 1374 ival); 1375 if (ret) { 1376 dev_err(map->dev, 1377 "Error in caching of register: %x ret: %d\n", 1378 reg + i, ret); 1379 return ret; 1380 } 1381 } 1382 if (map->cache_only) { 1383 map->cache_dirty = true; 1384 return 0; 1385 } 1386 } 1387 1388 range = _regmap_range_lookup(map, reg); 1389 if (range) { 1390 int val_num = val_len / map->format.val_bytes; 1391 int win_offset = (reg - range->range_min) % range->window_len; 1392 int win_residue = range->window_len - win_offset; 1393 1394 /* If the write goes beyond the end of the window split it */ 1395 while (val_num > win_residue) { 1396 dev_dbg(map->dev, "Writing window %d/%zu\n", 1397 win_residue, val_len / map->format.val_bytes); 1398 ret = _regmap_raw_write(map, reg, val, win_residue * 1399 map->format.val_bytes); 1400 if (ret != 0) 1401 return ret; 1402 1403 reg += win_residue; 1404 val_num -= win_residue; 1405 val += win_residue * map->format.val_bytes; 1406 val_len -= win_residue * map->format.val_bytes; 1407 1408 win_offset = (reg - range->range_min) % 1409 range->window_len; 1410 win_residue = range->window_len - win_offset; 1411 } 1412 1413 ret = _regmap_select_page(map, ®, range, val_num); 1414 if (ret != 0) 1415 return ret; 1416 } 1417 1418 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1419 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1420 map->write_flag_mask); 1421 1422 /* 1423 * Essentially all I/O mechanisms will be faster with a single 1424 * buffer to write. Since register syncs often generate raw 1425 * writes of single registers optimise that case. 1426 */ 1427 if (val != work_val && val_len == map->format.val_bytes) { 1428 memcpy(work_val, val, map->format.val_bytes); 1429 val = work_val; 1430 } 1431 1432 if (map->async && map->bus->async_write) { 1433 struct regmap_async *async; 1434 1435 trace_regmap_async_write_start(map, reg, val_len); 1436 1437 spin_lock_irqsave(&map->async_lock, flags); 1438 async = list_first_entry_or_null(&map->async_free, 1439 struct regmap_async, 1440 list); 1441 if (async) 1442 list_del(&async->list); 1443 spin_unlock_irqrestore(&map->async_lock, flags); 1444 1445 if (!async) { 1446 async = map->bus->async_alloc(); 1447 if (!async) 1448 return -ENOMEM; 1449 1450 async->work_buf = kzalloc(map->format.buf_size, 1451 GFP_KERNEL | GFP_DMA); 1452 if (!async->work_buf) { 1453 kfree(async); 1454 return -ENOMEM; 1455 } 1456 } 1457 1458 async->map = map; 1459 1460 /* If the caller supplied the value we can use it safely. */ 1461 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1462 map->format.reg_bytes + map->format.val_bytes); 1463 1464 spin_lock_irqsave(&map->async_lock, flags); 1465 list_add_tail(&async->list, &map->async_list); 1466 spin_unlock_irqrestore(&map->async_lock, flags); 1467 1468 if (val != work_val) 1469 ret = map->bus->async_write(map->bus_context, 1470 async->work_buf, 1471 map->format.reg_bytes + 1472 map->format.pad_bytes, 1473 val, val_len, async); 1474 else 1475 ret = map->bus->async_write(map->bus_context, 1476 async->work_buf, 1477 map->format.reg_bytes + 1478 map->format.pad_bytes + 1479 val_len, NULL, 0, async); 1480 1481 if (ret != 0) { 1482 dev_err(map->dev, "Failed to schedule write: %d\n", 1483 ret); 1484 1485 spin_lock_irqsave(&map->async_lock, flags); 1486 list_move(&async->list, &map->async_free); 1487 spin_unlock_irqrestore(&map->async_lock, flags); 1488 } 1489 1490 return ret; 1491 } 1492 1493 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1494 1495 /* If we're doing a single register write we can probably just 1496 * send the work_buf directly, otherwise try to do a gather 1497 * write. 1498 */ 1499 if (val == work_val) 1500 ret = map->bus->write(map->bus_context, map->work_buf, 1501 map->format.reg_bytes + 1502 map->format.pad_bytes + 1503 val_len); 1504 else if (map->bus->gather_write) 1505 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1506 map->format.reg_bytes + 1507 map->format.pad_bytes, 1508 val, val_len); 1509 1510 /* If that didn't work fall back on linearising by hand. */ 1511 if (ret == -ENOTSUPP) { 1512 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1513 buf = kzalloc(len, GFP_KERNEL); 1514 if (!buf) 1515 return -ENOMEM; 1516 1517 memcpy(buf, map->work_buf, map->format.reg_bytes); 1518 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1519 val, val_len); 1520 ret = map->bus->write(map->bus_context, buf, len); 1521 1522 kfree(buf); 1523 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1524 /* regcache_drop_region() takes lock that we already have, 1525 * thus call map->cache_ops->drop() directly 1526 */ 1527 if (map->cache_ops && map->cache_ops->drop) 1528 map->cache_ops->drop(map, reg, reg + 1); 1529 } 1530 1531 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1532 1533 return ret; 1534 } 1535 1536 /** 1537 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1538 * 1539 * @map: Map to check. 1540 */ 1541 bool regmap_can_raw_write(struct regmap *map) 1542 { 1543 return map->bus && map->bus->write && map->format.format_val && 1544 map->format.format_reg; 1545 } 1546 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1547 1548 /** 1549 * regmap_get_raw_read_max - Get the maximum size we can read 1550 * 1551 * @map: Map to check. 1552 */ 1553 size_t regmap_get_raw_read_max(struct regmap *map) 1554 { 1555 return map->max_raw_read; 1556 } 1557 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1558 1559 /** 1560 * regmap_get_raw_write_max - Get the maximum size we can read 1561 * 1562 * @map: Map to check. 1563 */ 1564 size_t regmap_get_raw_write_max(struct regmap *map) 1565 { 1566 return map->max_raw_write; 1567 } 1568 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1569 1570 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1571 unsigned int val) 1572 { 1573 int ret; 1574 struct regmap_range_node *range; 1575 struct regmap *map = context; 1576 1577 WARN_ON(!map->bus || !map->format.format_write); 1578 1579 range = _regmap_range_lookup(map, reg); 1580 if (range) { 1581 ret = _regmap_select_page(map, ®, range, 1); 1582 if (ret != 0) 1583 return ret; 1584 } 1585 1586 map->format.format_write(map, reg, val); 1587 1588 trace_regmap_hw_write_start(map, reg, 1); 1589 1590 ret = map->bus->write(map->bus_context, map->work_buf, 1591 map->format.buf_size); 1592 1593 trace_regmap_hw_write_done(map, reg, 1); 1594 1595 return ret; 1596 } 1597 1598 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1599 unsigned int val) 1600 { 1601 struct regmap *map = context; 1602 1603 return map->bus->reg_write(map->bus_context, reg, val); 1604 } 1605 1606 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1607 unsigned int val) 1608 { 1609 struct regmap *map = context; 1610 1611 WARN_ON(!map->bus || !map->format.format_val); 1612 1613 map->format.format_val(map->work_buf + map->format.reg_bytes 1614 + map->format.pad_bytes, val, 0); 1615 return _regmap_raw_write(map, reg, 1616 map->work_buf + 1617 map->format.reg_bytes + 1618 map->format.pad_bytes, 1619 map->format.val_bytes); 1620 } 1621 1622 static inline void *_regmap_map_get_context(struct regmap *map) 1623 { 1624 return (map->bus) ? map : map->bus_context; 1625 } 1626 1627 int _regmap_write(struct regmap *map, unsigned int reg, 1628 unsigned int val) 1629 { 1630 int ret; 1631 void *context = _regmap_map_get_context(map); 1632 1633 if (!regmap_writeable(map, reg)) 1634 return -EIO; 1635 1636 if (!map->cache_bypass && !map->defer_caching) { 1637 ret = regcache_write(map, reg, val); 1638 if (ret != 0) 1639 return ret; 1640 if (map->cache_only) { 1641 map->cache_dirty = true; 1642 return 0; 1643 } 1644 } 1645 1646 #ifdef LOG_DEVICE 1647 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1648 dev_info(map->dev, "%x <= %x\n", reg, val); 1649 #endif 1650 1651 trace_regmap_reg_write(map, reg, val); 1652 1653 return map->reg_write(context, reg, val); 1654 } 1655 1656 /** 1657 * regmap_write(): Write a value to a single register 1658 * 1659 * @map: Register map to write to 1660 * @reg: Register to write to 1661 * @val: Value to be written 1662 * 1663 * A value of zero will be returned on success, a negative errno will 1664 * be returned in error cases. 1665 */ 1666 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1667 { 1668 int ret; 1669 1670 if (!IS_ALIGNED(reg, map->reg_stride)) 1671 return -EINVAL; 1672 1673 map->lock(map->lock_arg); 1674 1675 ret = _regmap_write(map, reg, val); 1676 1677 map->unlock(map->lock_arg); 1678 1679 return ret; 1680 } 1681 EXPORT_SYMBOL_GPL(regmap_write); 1682 1683 /** 1684 * regmap_write_async(): Write a value to a single register asynchronously 1685 * 1686 * @map: Register map to write to 1687 * @reg: Register to write to 1688 * @val: Value to be written 1689 * 1690 * A value of zero will be returned on success, a negative errno will 1691 * be returned in error cases. 1692 */ 1693 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1694 { 1695 int ret; 1696 1697 if (!IS_ALIGNED(reg, map->reg_stride)) 1698 return -EINVAL; 1699 1700 map->lock(map->lock_arg); 1701 1702 map->async = true; 1703 1704 ret = _regmap_write(map, reg, val); 1705 1706 map->async = false; 1707 1708 map->unlock(map->lock_arg); 1709 1710 return ret; 1711 } 1712 EXPORT_SYMBOL_GPL(regmap_write_async); 1713 1714 /** 1715 * regmap_raw_write(): Write raw values to one or more registers 1716 * 1717 * @map: Register map to write to 1718 * @reg: Initial register to write to 1719 * @val: Block of data to be written, laid out for direct transmission to the 1720 * device 1721 * @val_len: Length of data pointed to by val. 1722 * 1723 * This function is intended to be used for things like firmware 1724 * download where a large block of data needs to be transferred to the 1725 * device. No formatting will be done on the data provided. 1726 * 1727 * A value of zero will be returned on success, a negative errno will 1728 * be returned in error cases. 1729 */ 1730 int regmap_raw_write(struct regmap *map, unsigned int reg, 1731 const void *val, size_t val_len) 1732 { 1733 int ret; 1734 1735 if (!regmap_can_raw_write(map)) 1736 return -EINVAL; 1737 if (val_len % map->format.val_bytes) 1738 return -EINVAL; 1739 if (map->max_raw_write && map->max_raw_write > val_len) 1740 return -E2BIG; 1741 1742 map->lock(map->lock_arg); 1743 1744 ret = _regmap_raw_write(map, reg, val, val_len); 1745 1746 map->unlock(map->lock_arg); 1747 1748 return ret; 1749 } 1750 EXPORT_SYMBOL_GPL(regmap_raw_write); 1751 1752 /** 1753 * regmap_field_update_bits_base(): 1754 * Perform a read/modify/write cycle on the register field 1755 * with change, async, force option 1756 * 1757 * @field: Register field to write to 1758 * @mask: Bitmask to change 1759 * @val: Value to be written 1760 * @change: Boolean indicating if a write was done 1761 * @async: Boolean indicating asynchronously 1762 * @force: Boolean indicating use force update 1763 * 1764 * A value of zero will be returned on success, a negative errno will 1765 * be returned in error cases. 1766 */ 1767 int regmap_field_update_bits_base(struct regmap_field *field, 1768 unsigned int mask, unsigned int val, 1769 bool *change, bool async, bool force) 1770 { 1771 mask = (mask << field->shift) & field->mask; 1772 1773 return regmap_update_bits_base(field->regmap, field->reg, 1774 mask, val << field->shift, 1775 change, async, force); 1776 } 1777 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 1778 1779 /** 1780 * regmap_fields_update_bits_base(): 1781 * Perform a read/modify/write cycle on the register field 1782 * with change, async, force option 1783 * 1784 * @field: Register field to write to 1785 * @id: port ID 1786 * @mask: Bitmask to change 1787 * @val: Value to be written 1788 * @change: Boolean indicating if a write was done 1789 * @async: Boolean indicating asynchronously 1790 * @force: Boolean indicating use force update 1791 * 1792 * A value of zero will be returned on success, a negative errno will 1793 * be returned in error cases. 1794 */ 1795 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 1796 unsigned int mask, unsigned int val, 1797 bool *change, bool async, bool force) 1798 { 1799 if (id >= field->id_size) 1800 return -EINVAL; 1801 1802 mask = (mask << field->shift) & field->mask; 1803 1804 return regmap_update_bits_base(field->regmap, 1805 field->reg + (field->id_offset * id), 1806 mask, val << field->shift, 1807 change, async, force); 1808 } 1809 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 1810 1811 /* 1812 * regmap_bulk_write(): Write multiple registers to the device 1813 * 1814 * @map: Register map to write to 1815 * @reg: First register to be write from 1816 * @val: Block of data to be written, in native register size for device 1817 * @val_count: Number of registers to write 1818 * 1819 * This function is intended to be used for writing a large block of 1820 * data to the device either in single transfer or multiple transfer. 1821 * 1822 * A value of zero will be returned on success, a negative errno will 1823 * be returned in error cases. 1824 */ 1825 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1826 size_t val_count) 1827 { 1828 int ret = 0, i; 1829 size_t val_bytes = map->format.val_bytes; 1830 size_t total_size = val_bytes * val_count; 1831 1832 if (!IS_ALIGNED(reg, map->reg_stride)) 1833 return -EINVAL; 1834 1835 /* 1836 * Some devices don't support bulk write, for 1837 * them we have a series of single write operations in the first two if 1838 * blocks. 1839 * 1840 * The first if block is used for memory mapped io. It does not allow 1841 * val_bytes of 3 for example. 1842 * The second one is for busses that do not provide raw I/O. 1843 * The third one is used for busses which do not have these limitations 1844 * and can write arbitrary value lengths. 1845 */ 1846 if (!map->bus) { 1847 map->lock(map->lock_arg); 1848 for (i = 0; i < val_count; i++) { 1849 unsigned int ival; 1850 1851 switch (val_bytes) { 1852 case 1: 1853 ival = *(u8 *)(val + (i * val_bytes)); 1854 break; 1855 case 2: 1856 ival = *(u16 *)(val + (i * val_bytes)); 1857 break; 1858 case 4: 1859 ival = *(u32 *)(val + (i * val_bytes)); 1860 break; 1861 #ifdef CONFIG_64BIT 1862 case 8: 1863 ival = *(u64 *)(val + (i * val_bytes)); 1864 break; 1865 #endif 1866 default: 1867 ret = -EINVAL; 1868 goto out; 1869 } 1870 1871 ret = _regmap_write(map, 1872 reg + regmap_get_offset(map, i), 1873 ival); 1874 if (ret != 0) 1875 goto out; 1876 } 1877 out: 1878 map->unlock(map->lock_arg); 1879 } else if (map->bus && !map->format.parse_inplace) { 1880 const u8 *u8 = val; 1881 const u16 *u16 = val; 1882 const u32 *u32 = val; 1883 unsigned int ival; 1884 1885 for (i = 0; i < val_count; i++) { 1886 switch (map->format.val_bytes) { 1887 case 4: 1888 ival = u32[i]; 1889 break; 1890 case 2: 1891 ival = u16[i]; 1892 break; 1893 case 1: 1894 ival = u8[i]; 1895 break; 1896 default: 1897 return -EINVAL; 1898 } 1899 1900 ret = regmap_write(map, reg + (i * map->reg_stride), 1901 ival); 1902 if (ret) 1903 return ret; 1904 } 1905 } else if (map->use_single_write || 1906 (map->max_raw_write && map->max_raw_write < total_size)) { 1907 int chunk_stride = map->reg_stride; 1908 size_t chunk_size = val_bytes; 1909 size_t chunk_count = val_count; 1910 1911 if (!map->use_single_write) { 1912 chunk_size = map->max_raw_write; 1913 if (chunk_size % val_bytes) 1914 chunk_size -= chunk_size % val_bytes; 1915 chunk_count = total_size / chunk_size; 1916 chunk_stride *= chunk_size / val_bytes; 1917 } 1918 1919 map->lock(map->lock_arg); 1920 /* Write as many bytes as possible with chunk_size */ 1921 for (i = 0; i < chunk_count; i++) { 1922 ret = _regmap_raw_write(map, 1923 reg + (i * chunk_stride), 1924 val + (i * chunk_size), 1925 chunk_size); 1926 if (ret) 1927 break; 1928 } 1929 1930 /* Write remaining bytes */ 1931 if (!ret && chunk_size * i < total_size) { 1932 ret = _regmap_raw_write(map, reg + (i * chunk_stride), 1933 val + (i * chunk_size), 1934 total_size - i * chunk_size); 1935 } 1936 map->unlock(map->lock_arg); 1937 } else { 1938 void *wval; 1939 1940 if (!val_count) 1941 return -EINVAL; 1942 1943 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 1944 if (!wval) { 1945 dev_err(map->dev, "Error in memory allocation\n"); 1946 return -ENOMEM; 1947 } 1948 for (i = 0; i < val_count * val_bytes; i += val_bytes) 1949 map->format.parse_inplace(wval + i); 1950 1951 map->lock(map->lock_arg); 1952 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1953 map->unlock(map->lock_arg); 1954 1955 kfree(wval); 1956 } 1957 return ret; 1958 } 1959 EXPORT_SYMBOL_GPL(regmap_bulk_write); 1960 1961 /* 1962 * _regmap_raw_multi_reg_write() 1963 * 1964 * the (register,newvalue) pairs in regs have not been formatted, but 1965 * they are all in the same page and have been changed to being page 1966 * relative. The page register has been written if that was necessary. 1967 */ 1968 static int _regmap_raw_multi_reg_write(struct regmap *map, 1969 const struct reg_sequence *regs, 1970 size_t num_regs) 1971 { 1972 int ret; 1973 void *buf; 1974 int i; 1975 u8 *u8; 1976 size_t val_bytes = map->format.val_bytes; 1977 size_t reg_bytes = map->format.reg_bytes; 1978 size_t pad_bytes = map->format.pad_bytes; 1979 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 1980 size_t len = pair_size * num_regs; 1981 1982 if (!len) 1983 return -EINVAL; 1984 1985 buf = kzalloc(len, GFP_KERNEL); 1986 if (!buf) 1987 return -ENOMEM; 1988 1989 /* We have to linearise by hand. */ 1990 1991 u8 = buf; 1992 1993 for (i = 0; i < num_regs; i++) { 1994 unsigned int reg = regs[i].reg; 1995 unsigned int val = regs[i].def; 1996 trace_regmap_hw_write_start(map, reg, 1); 1997 map->format.format_reg(u8, reg, map->reg_shift); 1998 u8 += reg_bytes + pad_bytes; 1999 map->format.format_val(u8, val, 0); 2000 u8 += val_bytes; 2001 } 2002 u8 = buf; 2003 *u8 |= map->write_flag_mask; 2004 2005 ret = map->bus->write(map->bus_context, buf, len); 2006 2007 kfree(buf); 2008 2009 for (i = 0; i < num_regs; i++) { 2010 int reg = regs[i].reg; 2011 trace_regmap_hw_write_done(map, reg, 1); 2012 } 2013 return ret; 2014 } 2015 2016 static unsigned int _regmap_register_page(struct regmap *map, 2017 unsigned int reg, 2018 struct regmap_range_node *range) 2019 { 2020 unsigned int win_page = (reg - range->range_min) / range->window_len; 2021 2022 return win_page; 2023 } 2024 2025 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2026 struct reg_sequence *regs, 2027 size_t num_regs) 2028 { 2029 int ret; 2030 int i, n; 2031 struct reg_sequence *base; 2032 unsigned int this_page = 0; 2033 unsigned int page_change = 0; 2034 /* 2035 * the set of registers are not neccessarily in order, but 2036 * since the order of write must be preserved this algorithm 2037 * chops the set each time the page changes. This also applies 2038 * if there is a delay required at any point in the sequence. 2039 */ 2040 base = regs; 2041 for (i = 0, n = 0; i < num_regs; i++, n++) { 2042 unsigned int reg = regs[i].reg; 2043 struct regmap_range_node *range; 2044 2045 range = _regmap_range_lookup(map, reg); 2046 if (range) { 2047 unsigned int win_page = _regmap_register_page(map, reg, 2048 range); 2049 2050 if (i == 0) 2051 this_page = win_page; 2052 if (win_page != this_page) { 2053 this_page = win_page; 2054 page_change = 1; 2055 } 2056 } 2057 2058 /* If we have both a page change and a delay make sure to 2059 * write the regs and apply the delay before we change the 2060 * page. 2061 */ 2062 2063 if (page_change || regs[i].delay_us) { 2064 2065 /* For situations where the first write requires 2066 * a delay we need to make sure we don't call 2067 * raw_multi_reg_write with n=0 2068 * This can't occur with page breaks as we 2069 * never write on the first iteration 2070 */ 2071 if (regs[i].delay_us && i == 0) 2072 n = 1; 2073 2074 ret = _regmap_raw_multi_reg_write(map, base, n); 2075 if (ret != 0) 2076 return ret; 2077 2078 if (regs[i].delay_us) 2079 udelay(regs[i].delay_us); 2080 2081 base += n; 2082 n = 0; 2083 2084 if (page_change) { 2085 ret = _regmap_select_page(map, 2086 &base[n].reg, 2087 range, 1); 2088 if (ret != 0) 2089 return ret; 2090 2091 page_change = 0; 2092 } 2093 2094 } 2095 2096 } 2097 if (n > 0) 2098 return _regmap_raw_multi_reg_write(map, base, n); 2099 return 0; 2100 } 2101 2102 static int _regmap_multi_reg_write(struct regmap *map, 2103 const struct reg_sequence *regs, 2104 size_t num_regs) 2105 { 2106 int i; 2107 int ret; 2108 2109 if (!map->can_multi_write) { 2110 for (i = 0; i < num_regs; i++) { 2111 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2112 if (ret != 0) 2113 return ret; 2114 2115 if (regs[i].delay_us) 2116 udelay(regs[i].delay_us); 2117 } 2118 return 0; 2119 } 2120 2121 if (!map->format.parse_inplace) 2122 return -EINVAL; 2123 2124 if (map->writeable_reg) 2125 for (i = 0; i < num_regs; i++) { 2126 int reg = regs[i].reg; 2127 if (!map->writeable_reg(map->dev, reg)) 2128 return -EINVAL; 2129 if (!IS_ALIGNED(reg, map->reg_stride)) 2130 return -EINVAL; 2131 } 2132 2133 if (!map->cache_bypass) { 2134 for (i = 0; i < num_regs; i++) { 2135 unsigned int val = regs[i].def; 2136 unsigned int reg = regs[i].reg; 2137 ret = regcache_write(map, reg, val); 2138 if (ret) { 2139 dev_err(map->dev, 2140 "Error in caching of register: %x ret: %d\n", 2141 reg, ret); 2142 return ret; 2143 } 2144 } 2145 if (map->cache_only) { 2146 map->cache_dirty = true; 2147 return 0; 2148 } 2149 } 2150 2151 WARN_ON(!map->bus); 2152 2153 for (i = 0; i < num_regs; i++) { 2154 unsigned int reg = regs[i].reg; 2155 struct regmap_range_node *range; 2156 2157 /* Coalesce all the writes between a page break or a delay 2158 * in a sequence 2159 */ 2160 range = _regmap_range_lookup(map, reg); 2161 if (range || regs[i].delay_us) { 2162 size_t len = sizeof(struct reg_sequence)*num_regs; 2163 struct reg_sequence *base = kmemdup(regs, len, 2164 GFP_KERNEL); 2165 if (!base) 2166 return -ENOMEM; 2167 ret = _regmap_range_multi_paged_reg_write(map, base, 2168 num_regs); 2169 kfree(base); 2170 2171 return ret; 2172 } 2173 } 2174 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2175 } 2176 2177 /* 2178 * regmap_multi_reg_write(): Write multiple registers to the device 2179 * 2180 * where the set of register,value pairs are supplied in any order, 2181 * possibly not all in a single range. 2182 * 2183 * @map: Register map to write to 2184 * @regs: Array of structures containing register,value to be written 2185 * @num_regs: Number of registers to write 2186 * 2187 * The 'normal' block write mode will send ultimately send data on the 2188 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are 2189 * addressed. However, this alternative block multi write mode will send 2190 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2191 * must of course support the mode. 2192 * 2193 * A value of zero will be returned on success, a negative errno will be 2194 * returned in error cases. 2195 */ 2196 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2197 int num_regs) 2198 { 2199 int ret; 2200 2201 map->lock(map->lock_arg); 2202 2203 ret = _regmap_multi_reg_write(map, regs, num_regs); 2204 2205 map->unlock(map->lock_arg); 2206 2207 return ret; 2208 } 2209 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2210 2211 /* 2212 * regmap_multi_reg_write_bypassed(): Write multiple registers to the 2213 * device but not the cache 2214 * 2215 * where the set of register are supplied in any order 2216 * 2217 * @map: Register map to write to 2218 * @regs: Array of structures containing register,value to be written 2219 * @num_regs: Number of registers to write 2220 * 2221 * This function is intended to be used for writing a large block of data 2222 * atomically to the device in single transfer for those I2C client devices 2223 * that implement this alternative block write mode. 2224 * 2225 * A value of zero will be returned on success, a negative errno will 2226 * be returned in error cases. 2227 */ 2228 int regmap_multi_reg_write_bypassed(struct regmap *map, 2229 const struct reg_sequence *regs, 2230 int num_regs) 2231 { 2232 int ret; 2233 bool bypass; 2234 2235 map->lock(map->lock_arg); 2236 2237 bypass = map->cache_bypass; 2238 map->cache_bypass = true; 2239 2240 ret = _regmap_multi_reg_write(map, regs, num_regs); 2241 2242 map->cache_bypass = bypass; 2243 2244 map->unlock(map->lock_arg); 2245 2246 return ret; 2247 } 2248 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2249 2250 /** 2251 * regmap_raw_write_async(): Write raw values to one or more registers 2252 * asynchronously 2253 * 2254 * @map: Register map to write to 2255 * @reg: Initial register to write to 2256 * @val: Block of data to be written, laid out for direct transmission to the 2257 * device. Must be valid until regmap_async_complete() is called. 2258 * @val_len: Length of data pointed to by val. 2259 * 2260 * This function is intended to be used for things like firmware 2261 * download where a large block of data needs to be transferred to the 2262 * device. No formatting will be done on the data provided. 2263 * 2264 * If supported by the underlying bus the write will be scheduled 2265 * asynchronously, helping maximise I/O speed on higher speed buses 2266 * like SPI. regmap_async_complete() can be called to ensure that all 2267 * asynchrnous writes have been completed. 2268 * 2269 * A value of zero will be returned on success, a negative errno will 2270 * be returned in error cases. 2271 */ 2272 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2273 const void *val, size_t val_len) 2274 { 2275 int ret; 2276 2277 if (val_len % map->format.val_bytes) 2278 return -EINVAL; 2279 if (!IS_ALIGNED(reg, map->reg_stride)) 2280 return -EINVAL; 2281 2282 map->lock(map->lock_arg); 2283 2284 map->async = true; 2285 2286 ret = _regmap_raw_write(map, reg, val, val_len); 2287 2288 map->async = false; 2289 2290 map->unlock(map->lock_arg); 2291 2292 return ret; 2293 } 2294 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2295 2296 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2297 unsigned int val_len) 2298 { 2299 struct regmap_range_node *range; 2300 int ret; 2301 2302 WARN_ON(!map->bus); 2303 2304 if (!map->bus || !map->bus->read) 2305 return -EINVAL; 2306 2307 range = _regmap_range_lookup(map, reg); 2308 if (range) { 2309 ret = _regmap_select_page(map, ®, range, 2310 val_len / map->format.val_bytes); 2311 if (ret != 0) 2312 return ret; 2313 } 2314 2315 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2316 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2317 map->read_flag_mask); 2318 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2319 2320 ret = map->bus->read(map->bus_context, map->work_buf, 2321 map->format.reg_bytes + map->format.pad_bytes, 2322 val, val_len); 2323 2324 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2325 2326 return ret; 2327 } 2328 2329 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2330 unsigned int *val) 2331 { 2332 struct regmap *map = context; 2333 2334 return map->bus->reg_read(map->bus_context, reg, val); 2335 } 2336 2337 static int _regmap_bus_read(void *context, unsigned int reg, 2338 unsigned int *val) 2339 { 2340 int ret; 2341 struct regmap *map = context; 2342 2343 if (!map->format.parse_val) 2344 return -EINVAL; 2345 2346 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 2347 if (ret == 0) 2348 *val = map->format.parse_val(map->work_buf); 2349 2350 return ret; 2351 } 2352 2353 static int _regmap_read(struct regmap *map, unsigned int reg, 2354 unsigned int *val) 2355 { 2356 int ret; 2357 void *context = _regmap_map_get_context(map); 2358 2359 if (!map->cache_bypass) { 2360 ret = regcache_read(map, reg, val); 2361 if (ret == 0) 2362 return 0; 2363 } 2364 2365 if (map->cache_only) 2366 return -EBUSY; 2367 2368 if (!regmap_readable(map, reg)) 2369 return -EIO; 2370 2371 ret = map->reg_read(context, reg, val); 2372 if (ret == 0) { 2373 #ifdef LOG_DEVICE 2374 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2375 dev_info(map->dev, "%x => %x\n", reg, *val); 2376 #endif 2377 2378 trace_regmap_reg_read(map, reg, *val); 2379 2380 if (!map->cache_bypass) 2381 regcache_write(map, reg, *val); 2382 } 2383 2384 return ret; 2385 } 2386 2387 /** 2388 * regmap_read(): Read a value from a single register 2389 * 2390 * @map: Register map to read from 2391 * @reg: Register to be read from 2392 * @val: Pointer to store read value 2393 * 2394 * A value of zero will be returned on success, a negative errno will 2395 * be returned in error cases. 2396 */ 2397 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2398 { 2399 int ret; 2400 2401 if (!IS_ALIGNED(reg, map->reg_stride)) 2402 return -EINVAL; 2403 2404 map->lock(map->lock_arg); 2405 2406 ret = _regmap_read(map, reg, val); 2407 2408 map->unlock(map->lock_arg); 2409 2410 return ret; 2411 } 2412 EXPORT_SYMBOL_GPL(regmap_read); 2413 2414 /** 2415 * regmap_raw_read(): Read raw data from the device 2416 * 2417 * @map: Register map to read from 2418 * @reg: First register to be read from 2419 * @val: Pointer to store read value 2420 * @val_len: Size of data to read 2421 * 2422 * A value of zero will be returned on success, a negative errno will 2423 * be returned in error cases. 2424 */ 2425 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2426 size_t val_len) 2427 { 2428 size_t val_bytes = map->format.val_bytes; 2429 size_t val_count = val_len / val_bytes; 2430 unsigned int v; 2431 int ret, i; 2432 2433 if (!map->bus) 2434 return -EINVAL; 2435 if (val_len % map->format.val_bytes) 2436 return -EINVAL; 2437 if (!IS_ALIGNED(reg, map->reg_stride)) 2438 return -EINVAL; 2439 if (val_count == 0) 2440 return -EINVAL; 2441 2442 map->lock(map->lock_arg); 2443 2444 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2445 map->cache_type == REGCACHE_NONE) { 2446 if (!map->bus->read) { 2447 ret = -ENOTSUPP; 2448 goto out; 2449 } 2450 if (map->max_raw_read && map->max_raw_read < val_len) { 2451 ret = -E2BIG; 2452 goto out; 2453 } 2454 2455 /* Physical block read if there's no cache involved */ 2456 ret = _regmap_raw_read(map, reg, val, val_len); 2457 2458 } else { 2459 /* Otherwise go word by word for the cache; should be low 2460 * cost as we expect to hit the cache. 2461 */ 2462 for (i = 0; i < val_count; i++) { 2463 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2464 &v); 2465 if (ret != 0) 2466 goto out; 2467 2468 map->format.format_val(val + (i * val_bytes), v, 0); 2469 } 2470 } 2471 2472 out: 2473 map->unlock(map->lock_arg); 2474 2475 return ret; 2476 } 2477 EXPORT_SYMBOL_GPL(regmap_raw_read); 2478 2479 /** 2480 * regmap_field_read(): Read a value to a single register field 2481 * 2482 * @field: Register field to read from 2483 * @val: Pointer to store read value 2484 * 2485 * A value of zero will be returned on success, a negative errno will 2486 * be returned in error cases. 2487 */ 2488 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2489 { 2490 int ret; 2491 unsigned int reg_val; 2492 ret = regmap_read(field->regmap, field->reg, ®_val); 2493 if (ret != 0) 2494 return ret; 2495 2496 reg_val &= field->mask; 2497 reg_val >>= field->shift; 2498 *val = reg_val; 2499 2500 return ret; 2501 } 2502 EXPORT_SYMBOL_GPL(regmap_field_read); 2503 2504 /** 2505 * regmap_fields_read(): Read a value to a single register field with port ID 2506 * 2507 * @field: Register field to read from 2508 * @id: port ID 2509 * @val: Pointer to store read value 2510 * 2511 * A value of zero will be returned on success, a negative errno will 2512 * be returned in error cases. 2513 */ 2514 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2515 unsigned int *val) 2516 { 2517 int ret; 2518 unsigned int reg_val; 2519 2520 if (id >= field->id_size) 2521 return -EINVAL; 2522 2523 ret = regmap_read(field->regmap, 2524 field->reg + (field->id_offset * id), 2525 ®_val); 2526 if (ret != 0) 2527 return ret; 2528 2529 reg_val &= field->mask; 2530 reg_val >>= field->shift; 2531 *val = reg_val; 2532 2533 return ret; 2534 } 2535 EXPORT_SYMBOL_GPL(regmap_fields_read); 2536 2537 /** 2538 * regmap_bulk_read(): Read multiple registers from the device 2539 * 2540 * @map: Register map to read from 2541 * @reg: First register to be read from 2542 * @val: Pointer to store read value, in native register size for device 2543 * @val_count: Number of registers to read 2544 * 2545 * A value of zero will be returned on success, a negative errno will 2546 * be returned in error cases. 2547 */ 2548 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2549 size_t val_count) 2550 { 2551 int ret, i; 2552 size_t val_bytes = map->format.val_bytes; 2553 bool vol = regmap_volatile_range(map, reg, val_count); 2554 2555 if (!IS_ALIGNED(reg, map->reg_stride)) 2556 return -EINVAL; 2557 2558 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2559 /* 2560 * Some devices does not support bulk read, for 2561 * them we have a series of single read operations. 2562 */ 2563 size_t total_size = val_bytes * val_count; 2564 2565 if (!map->use_single_read && 2566 (!map->max_raw_read || map->max_raw_read > total_size)) { 2567 ret = regmap_raw_read(map, reg, val, 2568 val_bytes * val_count); 2569 if (ret != 0) 2570 return ret; 2571 } else { 2572 /* 2573 * Some devices do not support bulk read or do not 2574 * support large bulk reads, for them we have a series 2575 * of read operations. 2576 */ 2577 int chunk_stride = map->reg_stride; 2578 size_t chunk_size = val_bytes; 2579 size_t chunk_count = val_count; 2580 2581 if (!map->use_single_read) { 2582 chunk_size = map->max_raw_read; 2583 if (chunk_size % val_bytes) 2584 chunk_size -= chunk_size % val_bytes; 2585 chunk_count = total_size / chunk_size; 2586 chunk_stride *= chunk_size / val_bytes; 2587 } 2588 2589 /* Read bytes that fit into a multiple of chunk_size */ 2590 for (i = 0; i < chunk_count; i++) { 2591 ret = regmap_raw_read(map, 2592 reg + (i * chunk_stride), 2593 val + (i * chunk_size), 2594 chunk_size); 2595 if (ret != 0) 2596 return ret; 2597 } 2598 2599 /* Read remaining bytes */ 2600 if (chunk_size * i < total_size) { 2601 ret = regmap_raw_read(map, 2602 reg + (i * chunk_stride), 2603 val + (i * chunk_size), 2604 total_size - i * chunk_size); 2605 if (ret != 0) 2606 return ret; 2607 } 2608 } 2609 2610 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2611 map->format.parse_inplace(val + i); 2612 } else { 2613 for (i = 0; i < val_count; i++) { 2614 unsigned int ival; 2615 ret = regmap_read(map, reg + regmap_get_offset(map, i), 2616 &ival); 2617 if (ret != 0) 2618 return ret; 2619 2620 if (map->format.format_val) { 2621 map->format.format_val(val + (i * val_bytes), ival, 0); 2622 } else { 2623 /* Devices providing read and write 2624 * operations can use the bulk I/O 2625 * functions if they define a val_bytes, 2626 * we assume that the values are native 2627 * endian. 2628 */ 2629 #ifdef CONFIG_64BIT 2630 u64 *u64 = val; 2631 #endif 2632 u32 *u32 = val; 2633 u16 *u16 = val; 2634 u8 *u8 = val; 2635 2636 switch (map->format.val_bytes) { 2637 #ifdef CONFIG_64BIT 2638 case 8: 2639 u64[i] = ival; 2640 break; 2641 #endif 2642 case 4: 2643 u32[i] = ival; 2644 break; 2645 case 2: 2646 u16[i] = ival; 2647 break; 2648 case 1: 2649 u8[i] = ival; 2650 break; 2651 default: 2652 return -EINVAL; 2653 } 2654 } 2655 } 2656 } 2657 2658 return 0; 2659 } 2660 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2661 2662 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2663 unsigned int mask, unsigned int val, 2664 bool *change, bool force_write) 2665 { 2666 int ret; 2667 unsigned int tmp, orig; 2668 2669 if (change) 2670 *change = false; 2671 2672 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2673 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2674 if (ret == 0 && change) 2675 *change = true; 2676 } else { 2677 ret = _regmap_read(map, reg, &orig); 2678 if (ret != 0) 2679 return ret; 2680 2681 tmp = orig & ~mask; 2682 tmp |= val & mask; 2683 2684 if (force_write || (tmp != orig)) { 2685 ret = _regmap_write(map, reg, tmp); 2686 if (ret == 0 && change) 2687 *change = true; 2688 } 2689 } 2690 2691 return ret; 2692 } 2693 2694 /** 2695 * regmap_update_bits_base: 2696 * Perform a read/modify/write cycle on the 2697 * register map with change, async, force option 2698 * 2699 * @map: Register map to update 2700 * @reg: Register to update 2701 * @mask: Bitmask to change 2702 * @val: New value for bitmask 2703 * @change: Boolean indicating if a write was done 2704 * @async: Boolean indicating asynchronously 2705 * @force: Boolean indicating use force update 2706 * 2707 * if async was true, 2708 * With most buses the read must be done synchronously so this is most 2709 * useful for devices with a cache which do not need to interact with 2710 * the hardware to determine the current register value. 2711 * 2712 * Returns zero for success, a negative number on error. 2713 */ 2714 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2715 unsigned int mask, unsigned int val, 2716 bool *change, bool async, bool force) 2717 { 2718 int ret; 2719 2720 map->lock(map->lock_arg); 2721 2722 map->async = async; 2723 2724 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2725 2726 map->async = false; 2727 2728 map->unlock(map->lock_arg); 2729 2730 return ret; 2731 } 2732 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2733 2734 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2735 { 2736 struct regmap *map = async->map; 2737 bool wake; 2738 2739 trace_regmap_async_io_complete(map); 2740 2741 spin_lock(&map->async_lock); 2742 list_move(&async->list, &map->async_free); 2743 wake = list_empty(&map->async_list); 2744 2745 if (ret != 0) 2746 map->async_ret = ret; 2747 2748 spin_unlock(&map->async_lock); 2749 2750 if (wake) 2751 wake_up(&map->async_waitq); 2752 } 2753 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2754 2755 static int regmap_async_is_done(struct regmap *map) 2756 { 2757 unsigned long flags; 2758 int ret; 2759 2760 spin_lock_irqsave(&map->async_lock, flags); 2761 ret = list_empty(&map->async_list); 2762 spin_unlock_irqrestore(&map->async_lock, flags); 2763 2764 return ret; 2765 } 2766 2767 /** 2768 * regmap_async_complete: Ensure all asynchronous I/O has completed. 2769 * 2770 * @map: Map to operate on. 2771 * 2772 * Blocks until any pending asynchronous I/O has completed. Returns 2773 * an error code for any failed I/O operations. 2774 */ 2775 int regmap_async_complete(struct regmap *map) 2776 { 2777 unsigned long flags; 2778 int ret; 2779 2780 /* Nothing to do with no async support */ 2781 if (!map->bus || !map->bus->async_write) 2782 return 0; 2783 2784 trace_regmap_async_complete_start(map); 2785 2786 wait_event(map->async_waitq, regmap_async_is_done(map)); 2787 2788 spin_lock_irqsave(&map->async_lock, flags); 2789 ret = map->async_ret; 2790 map->async_ret = 0; 2791 spin_unlock_irqrestore(&map->async_lock, flags); 2792 2793 trace_regmap_async_complete_done(map); 2794 2795 return ret; 2796 } 2797 EXPORT_SYMBOL_GPL(regmap_async_complete); 2798 2799 /** 2800 * regmap_register_patch: Register and apply register updates to be applied 2801 * on device initialistion 2802 * 2803 * @map: Register map to apply updates to. 2804 * @regs: Values to update. 2805 * @num_regs: Number of entries in regs. 2806 * 2807 * Register a set of register updates to be applied to the device 2808 * whenever the device registers are synchronised with the cache and 2809 * apply them immediately. Typically this is used to apply 2810 * corrections to be applied to the device defaults on startup, such 2811 * as the updates some vendors provide to undocumented registers. 2812 * 2813 * The caller must ensure that this function cannot be called 2814 * concurrently with either itself or regcache_sync(). 2815 */ 2816 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 2817 int num_regs) 2818 { 2819 struct reg_sequence *p; 2820 int ret; 2821 bool bypass; 2822 2823 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2824 num_regs)) 2825 return 0; 2826 2827 p = krealloc(map->patch, 2828 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 2829 GFP_KERNEL); 2830 if (p) { 2831 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2832 map->patch = p; 2833 map->patch_regs += num_regs; 2834 } else { 2835 return -ENOMEM; 2836 } 2837 2838 map->lock(map->lock_arg); 2839 2840 bypass = map->cache_bypass; 2841 2842 map->cache_bypass = true; 2843 map->async = true; 2844 2845 ret = _regmap_multi_reg_write(map, regs, num_regs); 2846 2847 map->async = false; 2848 map->cache_bypass = bypass; 2849 2850 map->unlock(map->lock_arg); 2851 2852 regmap_async_complete(map); 2853 2854 return ret; 2855 } 2856 EXPORT_SYMBOL_GPL(regmap_register_patch); 2857 2858 /* 2859 * regmap_get_val_bytes(): Report the size of a register value 2860 * 2861 * Report the size of a register value, mainly intended to for use by 2862 * generic infrastructure built on top of regmap. 2863 */ 2864 int regmap_get_val_bytes(struct regmap *map) 2865 { 2866 if (map->format.format_write) 2867 return -EINVAL; 2868 2869 return map->format.val_bytes; 2870 } 2871 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2872 2873 /** 2874 * regmap_get_max_register(): Report the max register value 2875 * 2876 * Report the max register value, mainly intended to for use by 2877 * generic infrastructure built on top of regmap. 2878 */ 2879 int regmap_get_max_register(struct regmap *map) 2880 { 2881 return map->max_register ? map->max_register : -EINVAL; 2882 } 2883 EXPORT_SYMBOL_GPL(regmap_get_max_register); 2884 2885 /** 2886 * regmap_get_reg_stride(): Report the register address stride 2887 * 2888 * Report the register address stride, mainly intended to for use by 2889 * generic infrastructure built on top of regmap. 2890 */ 2891 int regmap_get_reg_stride(struct regmap *map) 2892 { 2893 return map->reg_stride; 2894 } 2895 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 2896 2897 int regmap_parse_val(struct regmap *map, const void *buf, 2898 unsigned int *val) 2899 { 2900 if (!map->format.parse_val) 2901 return -EINVAL; 2902 2903 *val = map->format.parse_val(buf); 2904 2905 return 0; 2906 } 2907 EXPORT_SYMBOL_GPL(regmap_parse_val); 2908 2909 static int __init regmap_initcall(void) 2910 { 2911 regmap_debugfs_initcall(); 2912 2913 return 0; 2914 } 2915 postcore_initcall(regmap_initcall); 2916