1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/of.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 #include <linux/log2.h> 23 24 #define CREATE_TRACE_POINTS 25 #include "trace.h" 26 27 #include "internal.h" 28 29 /* 30 * Sometimes for failures during very early init the trace 31 * infrastructure isn't available early enough to be used. For this 32 * sort of problem defining LOG_DEVICE will add printks for basic 33 * register I/O on a specific device. 34 */ 35 #undef LOG_DEVICE 36 37 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 38 unsigned int mask, unsigned int val, 39 bool *change, bool force_write); 40 41 static int _regmap_bus_reg_read(void *context, unsigned int reg, 42 unsigned int *val); 43 static int _regmap_bus_read(void *context, unsigned int reg, 44 unsigned int *val); 45 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 46 unsigned int val); 47 static int _regmap_bus_reg_write(void *context, unsigned int reg, 48 unsigned int val); 49 static int _regmap_bus_raw_write(void *context, unsigned int reg, 50 unsigned int val); 51 52 bool regmap_reg_in_ranges(unsigned int reg, 53 const struct regmap_range *ranges, 54 unsigned int nranges) 55 { 56 const struct regmap_range *r; 57 int i; 58 59 for (i = 0, r = ranges; i < nranges; i++, r++) 60 if (regmap_reg_in_range(reg, r)) 61 return true; 62 return false; 63 } 64 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 65 66 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 67 const struct regmap_access_table *table) 68 { 69 /* Check "no ranges" first */ 70 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 71 return false; 72 73 /* In case zero "yes ranges" are supplied, any reg is OK */ 74 if (!table->n_yes_ranges) 75 return true; 76 77 return regmap_reg_in_ranges(reg, table->yes_ranges, 78 table->n_yes_ranges); 79 } 80 EXPORT_SYMBOL_GPL(regmap_check_range_table); 81 82 bool regmap_writeable(struct regmap *map, unsigned int reg) 83 { 84 if (map->max_register && reg > map->max_register) 85 return false; 86 87 if (map->writeable_reg) 88 return map->writeable_reg(map->dev, reg); 89 90 if (map->wr_table) 91 return regmap_check_range_table(map, reg, map->wr_table); 92 93 return true; 94 } 95 96 bool regmap_readable(struct regmap *map, unsigned int reg) 97 { 98 if (!map->reg_read) 99 return false; 100 101 if (map->max_register && reg > map->max_register) 102 return false; 103 104 if (map->format.format_write) 105 return false; 106 107 if (map->readable_reg) 108 return map->readable_reg(map->dev, reg); 109 110 if (map->rd_table) 111 return regmap_check_range_table(map, reg, map->rd_table); 112 113 return true; 114 } 115 116 bool regmap_volatile(struct regmap *map, unsigned int reg) 117 { 118 if (!map->format.format_write && !regmap_readable(map, reg)) 119 return false; 120 121 if (map->volatile_reg) 122 return map->volatile_reg(map->dev, reg); 123 124 if (map->volatile_table) 125 return regmap_check_range_table(map, reg, map->volatile_table); 126 127 if (map->cache_ops) 128 return false; 129 else 130 return true; 131 } 132 133 bool regmap_precious(struct regmap *map, unsigned int reg) 134 { 135 if (!regmap_readable(map, reg)) 136 return false; 137 138 if (map->precious_reg) 139 return map->precious_reg(map->dev, reg); 140 141 if (map->precious_table) 142 return regmap_check_range_table(map, reg, map->precious_table); 143 144 return false; 145 } 146 147 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 148 size_t num) 149 { 150 unsigned int i; 151 152 for (i = 0; i < num; i++) 153 if (!regmap_volatile(map, reg + i)) 154 return false; 155 156 return true; 157 } 158 159 static void regmap_format_2_6_write(struct regmap *map, 160 unsigned int reg, unsigned int val) 161 { 162 u8 *out = map->work_buf; 163 164 *out = (reg << 6) | val; 165 } 166 167 static void regmap_format_4_12_write(struct regmap *map, 168 unsigned int reg, unsigned int val) 169 { 170 __be16 *out = map->work_buf; 171 *out = cpu_to_be16((reg << 12) | val); 172 } 173 174 static void regmap_format_7_9_write(struct regmap *map, 175 unsigned int reg, unsigned int val) 176 { 177 __be16 *out = map->work_buf; 178 *out = cpu_to_be16((reg << 9) | val); 179 } 180 181 static void regmap_format_10_14_write(struct regmap *map, 182 unsigned int reg, unsigned int val) 183 { 184 u8 *out = map->work_buf; 185 186 out[2] = val; 187 out[1] = (val >> 8) | (reg << 6); 188 out[0] = reg >> 2; 189 } 190 191 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 192 { 193 u8 *b = buf; 194 195 b[0] = val << shift; 196 } 197 198 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 199 { 200 __be16 *b = buf; 201 202 b[0] = cpu_to_be16(val << shift); 203 } 204 205 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 206 { 207 __le16 *b = buf; 208 209 b[0] = cpu_to_le16(val << shift); 210 } 211 212 static void regmap_format_16_native(void *buf, unsigned int val, 213 unsigned int shift) 214 { 215 *(u16 *)buf = val << shift; 216 } 217 218 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 219 { 220 u8 *b = buf; 221 222 val <<= shift; 223 224 b[0] = val >> 16; 225 b[1] = val >> 8; 226 b[2] = val; 227 } 228 229 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 230 { 231 __be32 *b = buf; 232 233 b[0] = cpu_to_be32(val << shift); 234 } 235 236 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 237 { 238 __le32 *b = buf; 239 240 b[0] = cpu_to_le32(val << shift); 241 } 242 243 static void regmap_format_32_native(void *buf, unsigned int val, 244 unsigned int shift) 245 { 246 *(u32 *)buf = val << shift; 247 } 248 249 #ifdef CONFIG_64BIT 250 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 251 { 252 __be64 *b = buf; 253 254 b[0] = cpu_to_be64((u64)val << shift); 255 } 256 257 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 258 { 259 __le64 *b = buf; 260 261 b[0] = cpu_to_le64((u64)val << shift); 262 } 263 264 static void regmap_format_64_native(void *buf, unsigned int val, 265 unsigned int shift) 266 { 267 *(u64 *)buf = (u64)val << shift; 268 } 269 #endif 270 271 static void regmap_parse_inplace_noop(void *buf) 272 { 273 } 274 275 static unsigned int regmap_parse_8(const void *buf) 276 { 277 const u8 *b = buf; 278 279 return b[0]; 280 } 281 282 static unsigned int regmap_parse_16_be(const void *buf) 283 { 284 const __be16 *b = buf; 285 286 return be16_to_cpu(b[0]); 287 } 288 289 static unsigned int regmap_parse_16_le(const void *buf) 290 { 291 const __le16 *b = buf; 292 293 return le16_to_cpu(b[0]); 294 } 295 296 static void regmap_parse_16_be_inplace(void *buf) 297 { 298 __be16 *b = buf; 299 300 b[0] = be16_to_cpu(b[0]); 301 } 302 303 static void regmap_parse_16_le_inplace(void *buf) 304 { 305 __le16 *b = buf; 306 307 b[0] = le16_to_cpu(b[0]); 308 } 309 310 static unsigned int regmap_parse_16_native(const void *buf) 311 { 312 return *(u16 *)buf; 313 } 314 315 static unsigned int regmap_parse_24(const void *buf) 316 { 317 const u8 *b = buf; 318 unsigned int ret = b[2]; 319 ret |= ((unsigned int)b[1]) << 8; 320 ret |= ((unsigned int)b[0]) << 16; 321 322 return ret; 323 } 324 325 static unsigned int regmap_parse_32_be(const void *buf) 326 { 327 const __be32 *b = buf; 328 329 return be32_to_cpu(b[0]); 330 } 331 332 static unsigned int regmap_parse_32_le(const void *buf) 333 { 334 const __le32 *b = buf; 335 336 return le32_to_cpu(b[0]); 337 } 338 339 static void regmap_parse_32_be_inplace(void *buf) 340 { 341 __be32 *b = buf; 342 343 b[0] = be32_to_cpu(b[0]); 344 } 345 346 static void regmap_parse_32_le_inplace(void *buf) 347 { 348 __le32 *b = buf; 349 350 b[0] = le32_to_cpu(b[0]); 351 } 352 353 static unsigned int regmap_parse_32_native(const void *buf) 354 { 355 return *(u32 *)buf; 356 } 357 358 #ifdef CONFIG_64BIT 359 static unsigned int regmap_parse_64_be(const void *buf) 360 { 361 const __be64 *b = buf; 362 363 return be64_to_cpu(b[0]); 364 } 365 366 static unsigned int regmap_parse_64_le(const void *buf) 367 { 368 const __le64 *b = buf; 369 370 return le64_to_cpu(b[0]); 371 } 372 373 static void regmap_parse_64_be_inplace(void *buf) 374 { 375 __be64 *b = buf; 376 377 b[0] = be64_to_cpu(b[0]); 378 } 379 380 static void regmap_parse_64_le_inplace(void *buf) 381 { 382 __le64 *b = buf; 383 384 b[0] = le64_to_cpu(b[0]); 385 } 386 387 static unsigned int regmap_parse_64_native(const void *buf) 388 { 389 return *(u64 *)buf; 390 } 391 #endif 392 393 static void regmap_lock_mutex(void *__map) 394 { 395 struct regmap *map = __map; 396 mutex_lock(&map->mutex); 397 } 398 399 static void regmap_unlock_mutex(void *__map) 400 { 401 struct regmap *map = __map; 402 mutex_unlock(&map->mutex); 403 } 404 405 static void regmap_lock_spinlock(void *__map) 406 __acquires(&map->spinlock) 407 { 408 struct regmap *map = __map; 409 unsigned long flags; 410 411 spin_lock_irqsave(&map->spinlock, flags); 412 map->spinlock_flags = flags; 413 } 414 415 static void regmap_unlock_spinlock(void *__map) 416 __releases(&map->spinlock) 417 { 418 struct regmap *map = __map; 419 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 420 } 421 422 static void dev_get_regmap_release(struct device *dev, void *res) 423 { 424 /* 425 * We don't actually have anything to do here; the goal here 426 * is not to manage the regmap but to provide a simple way to 427 * get the regmap back given a struct device. 428 */ 429 } 430 431 static bool _regmap_range_add(struct regmap *map, 432 struct regmap_range_node *data) 433 { 434 struct rb_root *root = &map->range_tree; 435 struct rb_node **new = &(root->rb_node), *parent = NULL; 436 437 while (*new) { 438 struct regmap_range_node *this = 439 container_of(*new, struct regmap_range_node, node); 440 441 parent = *new; 442 if (data->range_max < this->range_min) 443 new = &((*new)->rb_left); 444 else if (data->range_min > this->range_max) 445 new = &((*new)->rb_right); 446 else 447 return false; 448 } 449 450 rb_link_node(&data->node, parent, new); 451 rb_insert_color(&data->node, root); 452 453 return true; 454 } 455 456 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 457 unsigned int reg) 458 { 459 struct rb_node *node = map->range_tree.rb_node; 460 461 while (node) { 462 struct regmap_range_node *this = 463 container_of(node, struct regmap_range_node, node); 464 465 if (reg < this->range_min) 466 node = node->rb_left; 467 else if (reg > this->range_max) 468 node = node->rb_right; 469 else 470 return this; 471 } 472 473 return NULL; 474 } 475 476 static void regmap_range_exit(struct regmap *map) 477 { 478 struct rb_node *next; 479 struct regmap_range_node *range_node; 480 481 next = rb_first(&map->range_tree); 482 while (next) { 483 range_node = rb_entry(next, struct regmap_range_node, node); 484 next = rb_next(&range_node->node); 485 rb_erase(&range_node->node, &map->range_tree); 486 kfree(range_node); 487 } 488 489 kfree(map->selector_work_buf); 490 } 491 492 int regmap_attach_dev(struct device *dev, struct regmap *map, 493 const struct regmap_config *config) 494 { 495 struct regmap **m; 496 497 map->dev = dev; 498 499 regmap_debugfs_init(map, config->name); 500 501 /* Add a devres resource for dev_get_regmap() */ 502 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 503 if (!m) { 504 regmap_debugfs_exit(map); 505 return -ENOMEM; 506 } 507 *m = map; 508 devres_add(dev, m); 509 510 return 0; 511 } 512 EXPORT_SYMBOL_GPL(regmap_attach_dev); 513 514 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 515 const struct regmap_config *config) 516 { 517 enum regmap_endian endian; 518 519 /* Retrieve the endianness specification from the regmap config */ 520 endian = config->reg_format_endian; 521 522 /* If the regmap config specified a non-default value, use that */ 523 if (endian != REGMAP_ENDIAN_DEFAULT) 524 return endian; 525 526 /* Retrieve the endianness specification from the bus config */ 527 if (bus && bus->reg_format_endian_default) 528 endian = bus->reg_format_endian_default; 529 530 /* If the bus specified a non-default value, use that */ 531 if (endian != REGMAP_ENDIAN_DEFAULT) 532 return endian; 533 534 /* Use this if no other value was found */ 535 return REGMAP_ENDIAN_BIG; 536 } 537 538 enum regmap_endian regmap_get_val_endian(struct device *dev, 539 const struct regmap_bus *bus, 540 const struct regmap_config *config) 541 { 542 struct device_node *np; 543 enum regmap_endian endian; 544 545 /* Retrieve the endianness specification from the regmap config */ 546 endian = config->val_format_endian; 547 548 /* If the regmap config specified a non-default value, use that */ 549 if (endian != REGMAP_ENDIAN_DEFAULT) 550 return endian; 551 552 /* If the dev and dev->of_node exist try to get endianness from DT */ 553 if (dev && dev->of_node) { 554 np = dev->of_node; 555 556 /* Parse the device's DT node for an endianness specification */ 557 if (of_property_read_bool(np, "big-endian")) 558 endian = REGMAP_ENDIAN_BIG; 559 else if (of_property_read_bool(np, "little-endian")) 560 endian = REGMAP_ENDIAN_LITTLE; 561 else if (of_property_read_bool(np, "native-endian")) 562 endian = REGMAP_ENDIAN_NATIVE; 563 564 /* If the endianness was specified in DT, use that */ 565 if (endian != REGMAP_ENDIAN_DEFAULT) 566 return endian; 567 } 568 569 /* Retrieve the endianness specification from the bus config */ 570 if (bus && bus->val_format_endian_default) 571 endian = bus->val_format_endian_default; 572 573 /* If the bus specified a non-default value, use that */ 574 if (endian != REGMAP_ENDIAN_DEFAULT) 575 return endian; 576 577 /* Use this if no other value was found */ 578 return REGMAP_ENDIAN_BIG; 579 } 580 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 581 582 struct regmap *__regmap_init(struct device *dev, 583 const struct regmap_bus *bus, 584 void *bus_context, 585 const struct regmap_config *config, 586 struct lock_class_key *lock_key, 587 const char *lock_name) 588 { 589 struct regmap *map; 590 int ret = -EINVAL; 591 enum regmap_endian reg_endian, val_endian; 592 int i, j; 593 594 if (!config) 595 goto err; 596 597 map = kzalloc(sizeof(*map), GFP_KERNEL); 598 if (map == NULL) { 599 ret = -ENOMEM; 600 goto err; 601 } 602 603 if (config->lock && config->unlock) { 604 map->lock = config->lock; 605 map->unlock = config->unlock; 606 map->lock_arg = config->lock_arg; 607 } else { 608 if ((bus && bus->fast_io) || 609 config->fast_io) { 610 spin_lock_init(&map->spinlock); 611 map->lock = regmap_lock_spinlock; 612 map->unlock = regmap_unlock_spinlock; 613 lockdep_set_class_and_name(&map->spinlock, 614 lock_key, lock_name); 615 } else { 616 mutex_init(&map->mutex); 617 map->lock = regmap_lock_mutex; 618 map->unlock = regmap_unlock_mutex; 619 lockdep_set_class_and_name(&map->mutex, 620 lock_key, lock_name); 621 } 622 map->lock_arg = map; 623 } 624 625 /* 626 * When we write in fast-paths with regmap_bulk_write() don't allocate 627 * scratch buffers with sleeping allocations. 628 */ 629 if ((bus && bus->fast_io) || config->fast_io) 630 map->alloc_flags = GFP_ATOMIC; 631 else 632 map->alloc_flags = GFP_KERNEL; 633 634 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 635 map->format.pad_bytes = config->pad_bits / 8; 636 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 637 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 638 config->val_bits + config->pad_bits, 8); 639 map->reg_shift = config->pad_bits % 8; 640 if (config->reg_stride) 641 map->reg_stride = config->reg_stride; 642 else 643 map->reg_stride = 1; 644 if (is_power_of_2(map->reg_stride)) 645 map->reg_stride_order = ilog2(map->reg_stride); 646 else 647 map->reg_stride_order = -1; 648 map->use_single_read = config->use_single_rw || !bus || !bus->read; 649 map->use_single_write = config->use_single_rw || !bus || !bus->write; 650 map->can_multi_write = config->can_multi_write && bus && bus->write; 651 if (bus) { 652 map->max_raw_read = bus->max_raw_read; 653 map->max_raw_write = bus->max_raw_write; 654 } 655 map->dev = dev; 656 map->bus = bus; 657 map->bus_context = bus_context; 658 map->max_register = config->max_register; 659 map->wr_table = config->wr_table; 660 map->rd_table = config->rd_table; 661 map->volatile_table = config->volatile_table; 662 map->precious_table = config->precious_table; 663 map->writeable_reg = config->writeable_reg; 664 map->readable_reg = config->readable_reg; 665 map->volatile_reg = config->volatile_reg; 666 map->precious_reg = config->precious_reg; 667 map->cache_type = config->cache_type; 668 map->name = config->name; 669 670 spin_lock_init(&map->async_lock); 671 INIT_LIST_HEAD(&map->async_list); 672 INIT_LIST_HEAD(&map->async_free); 673 init_waitqueue_head(&map->async_waitq); 674 675 if (config->read_flag_mask || config->write_flag_mask) { 676 map->read_flag_mask = config->read_flag_mask; 677 map->write_flag_mask = config->write_flag_mask; 678 } else if (bus) { 679 map->read_flag_mask = bus->read_flag_mask; 680 } 681 682 if (!bus) { 683 map->reg_read = config->reg_read; 684 map->reg_write = config->reg_write; 685 686 map->defer_caching = false; 687 goto skip_format_initialization; 688 } else if (!bus->read || !bus->write) { 689 map->reg_read = _regmap_bus_reg_read; 690 map->reg_write = _regmap_bus_reg_write; 691 692 map->defer_caching = false; 693 goto skip_format_initialization; 694 } else { 695 map->reg_read = _regmap_bus_read; 696 map->reg_update_bits = bus->reg_update_bits; 697 } 698 699 reg_endian = regmap_get_reg_endian(bus, config); 700 val_endian = regmap_get_val_endian(dev, bus, config); 701 702 switch (config->reg_bits + map->reg_shift) { 703 case 2: 704 switch (config->val_bits) { 705 case 6: 706 map->format.format_write = regmap_format_2_6_write; 707 break; 708 default: 709 goto err_map; 710 } 711 break; 712 713 case 4: 714 switch (config->val_bits) { 715 case 12: 716 map->format.format_write = regmap_format_4_12_write; 717 break; 718 default: 719 goto err_map; 720 } 721 break; 722 723 case 7: 724 switch (config->val_bits) { 725 case 9: 726 map->format.format_write = regmap_format_7_9_write; 727 break; 728 default: 729 goto err_map; 730 } 731 break; 732 733 case 10: 734 switch (config->val_bits) { 735 case 14: 736 map->format.format_write = regmap_format_10_14_write; 737 break; 738 default: 739 goto err_map; 740 } 741 break; 742 743 case 8: 744 map->format.format_reg = regmap_format_8; 745 break; 746 747 case 16: 748 switch (reg_endian) { 749 case REGMAP_ENDIAN_BIG: 750 map->format.format_reg = regmap_format_16_be; 751 break; 752 case REGMAP_ENDIAN_NATIVE: 753 map->format.format_reg = regmap_format_16_native; 754 break; 755 default: 756 goto err_map; 757 } 758 break; 759 760 case 24: 761 if (reg_endian != REGMAP_ENDIAN_BIG) 762 goto err_map; 763 map->format.format_reg = regmap_format_24; 764 break; 765 766 case 32: 767 switch (reg_endian) { 768 case REGMAP_ENDIAN_BIG: 769 map->format.format_reg = regmap_format_32_be; 770 break; 771 case REGMAP_ENDIAN_NATIVE: 772 map->format.format_reg = regmap_format_32_native; 773 break; 774 default: 775 goto err_map; 776 } 777 break; 778 779 #ifdef CONFIG_64BIT 780 case 64: 781 switch (reg_endian) { 782 case REGMAP_ENDIAN_BIG: 783 map->format.format_reg = regmap_format_64_be; 784 break; 785 case REGMAP_ENDIAN_NATIVE: 786 map->format.format_reg = regmap_format_64_native; 787 break; 788 default: 789 goto err_map; 790 } 791 break; 792 #endif 793 794 default: 795 goto err_map; 796 } 797 798 if (val_endian == REGMAP_ENDIAN_NATIVE) 799 map->format.parse_inplace = regmap_parse_inplace_noop; 800 801 switch (config->val_bits) { 802 case 8: 803 map->format.format_val = regmap_format_8; 804 map->format.parse_val = regmap_parse_8; 805 map->format.parse_inplace = regmap_parse_inplace_noop; 806 break; 807 case 16: 808 switch (val_endian) { 809 case REGMAP_ENDIAN_BIG: 810 map->format.format_val = regmap_format_16_be; 811 map->format.parse_val = regmap_parse_16_be; 812 map->format.parse_inplace = regmap_parse_16_be_inplace; 813 break; 814 case REGMAP_ENDIAN_LITTLE: 815 map->format.format_val = regmap_format_16_le; 816 map->format.parse_val = regmap_parse_16_le; 817 map->format.parse_inplace = regmap_parse_16_le_inplace; 818 break; 819 case REGMAP_ENDIAN_NATIVE: 820 map->format.format_val = regmap_format_16_native; 821 map->format.parse_val = regmap_parse_16_native; 822 break; 823 default: 824 goto err_map; 825 } 826 break; 827 case 24: 828 if (val_endian != REGMAP_ENDIAN_BIG) 829 goto err_map; 830 map->format.format_val = regmap_format_24; 831 map->format.parse_val = regmap_parse_24; 832 break; 833 case 32: 834 switch (val_endian) { 835 case REGMAP_ENDIAN_BIG: 836 map->format.format_val = regmap_format_32_be; 837 map->format.parse_val = regmap_parse_32_be; 838 map->format.parse_inplace = regmap_parse_32_be_inplace; 839 break; 840 case REGMAP_ENDIAN_LITTLE: 841 map->format.format_val = regmap_format_32_le; 842 map->format.parse_val = regmap_parse_32_le; 843 map->format.parse_inplace = regmap_parse_32_le_inplace; 844 break; 845 case REGMAP_ENDIAN_NATIVE: 846 map->format.format_val = regmap_format_32_native; 847 map->format.parse_val = regmap_parse_32_native; 848 break; 849 default: 850 goto err_map; 851 } 852 break; 853 #ifdef CONFIG_64BIT 854 case 64: 855 switch (val_endian) { 856 case REGMAP_ENDIAN_BIG: 857 map->format.format_val = regmap_format_64_be; 858 map->format.parse_val = regmap_parse_64_be; 859 map->format.parse_inplace = regmap_parse_64_be_inplace; 860 break; 861 case REGMAP_ENDIAN_LITTLE: 862 map->format.format_val = regmap_format_64_le; 863 map->format.parse_val = regmap_parse_64_le; 864 map->format.parse_inplace = regmap_parse_64_le_inplace; 865 break; 866 case REGMAP_ENDIAN_NATIVE: 867 map->format.format_val = regmap_format_64_native; 868 map->format.parse_val = regmap_parse_64_native; 869 break; 870 default: 871 goto err_map; 872 } 873 break; 874 #endif 875 } 876 877 if (map->format.format_write) { 878 if ((reg_endian != REGMAP_ENDIAN_BIG) || 879 (val_endian != REGMAP_ENDIAN_BIG)) 880 goto err_map; 881 map->use_single_write = true; 882 } 883 884 if (!map->format.format_write && 885 !(map->format.format_reg && map->format.format_val)) 886 goto err_map; 887 888 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 889 if (map->work_buf == NULL) { 890 ret = -ENOMEM; 891 goto err_map; 892 } 893 894 if (map->format.format_write) { 895 map->defer_caching = false; 896 map->reg_write = _regmap_bus_formatted_write; 897 } else if (map->format.format_val) { 898 map->defer_caching = true; 899 map->reg_write = _regmap_bus_raw_write; 900 } 901 902 skip_format_initialization: 903 904 map->range_tree = RB_ROOT; 905 for (i = 0; i < config->num_ranges; i++) { 906 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 907 struct regmap_range_node *new; 908 909 /* Sanity check */ 910 if (range_cfg->range_max < range_cfg->range_min) { 911 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 912 range_cfg->range_max, range_cfg->range_min); 913 goto err_range; 914 } 915 916 if (range_cfg->range_max > map->max_register) { 917 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 918 range_cfg->range_max, map->max_register); 919 goto err_range; 920 } 921 922 if (range_cfg->selector_reg > map->max_register) { 923 dev_err(map->dev, 924 "Invalid range %d: selector out of map\n", i); 925 goto err_range; 926 } 927 928 if (range_cfg->window_len == 0) { 929 dev_err(map->dev, "Invalid range %d: window_len 0\n", 930 i); 931 goto err_range; 932 } 933 934 /* Make sure, that this register range has no selector 935 or data window within its boundary */ 936 for (j = 0; j < config->num_ranges; j++) { 937 unsigned sel_reg = config->ranges[j].selector_reg; 938 unsigned win_min = config->ranges[j].window_start; 939 unsigned win_max = win_min + 940 config->ranges[j].window_len - 1; 941 942 /* Allow data window inside its own virtual range */ 943 if (j == i) 944 continue; 945 946 if (range_cfg->range_min <= sel_reg && 947 sel_reg <= range_cfg->range_max) { 948 dev_err(map->dev, 949 "Range %d: selector for %d in window\n", 950 i, j); 951 goto err_range; 952 } 953 954 if (!(win_max < range_cfg->range_min || 955 win_min > range_cfg->range_max)) { 956 dev_err(map->dev, 957 "Range %d: window for %d in window\n", 958 i, j); 959 goto err_range; 960 } 961 } 962 963 new = kzalloc(sizeof(*new), GFP_KERNEL); 964 if (new == NULL) { 965 ret = -ENOMEM; 966 goto err_range; 967 } 968 969 new->map = map; 970 new->name = range_cfg->name; 971 new->range_min = range_cfg->range_min; 972 new->range_max = range_cfg->range_max; 973 new->selector_reg = range_cfg->selector_reg; 974 new->selector_mask = range_cfg->selector_mask; 975 new->selector_shift = range_cfg->selector_shift; 976 new->window_start = range_cfg->window_start; 977 new->window_len = range_cfg->window_len; 978 979 if (!_regmap_range_add(map, new)) { 980 dev_err(map->dev, "Failed to add range %d\n", i); 981 kfree(new); 982 goto err_range; 983 } 984 985 if (map->selector_work_buf == NULL) { 986 map->selector_work_buf = 987 kzalloc(map->format.buf_size, GFP_KERNEL); 988 if (map->selector_work_buf == NULL) { 989 ret = -ENOMEM; 990 goto err_range; 991 } 992 } 993 } 994 995 ret = regcache_init(map, config); 996 if (ret != 0) 997 goto err_range; 998 999 if (dev) { 1000 ret = regmap_attach_dev(dev, map, config); 1001 if (ret != 0) 1002 goto err_regcache; 1003 } 1004 1005 return map; 1006 1007 err_regcache: 1008 regcache_exit(map); 1009 err_range: 1010 regmap_range_exit(map); 1011 kfree(map->work_buf); 1012 err_map: 1013 kfree(map); 1014 err: 1015 return ERR_PTR(ret); 1016 } 1017 EXPORT_SYMBOL_GPL(__regmap_init); 1018 1019 static void devm_regmap_release(struct device *dev, void *res) 1020 { 1021 regmap_exit(*(struct regmap **)res); 1022 } 1023 1024 struct regmap *__devm_regmap_init(struct device *dev, 1025 const struct regmap_bus *bus, 1026 void *bus_context, 1027 const struct regmap_config *config, 1028 struct lock_class_key *lock_key, 1029 const char *lock_name) 1030 { 1031 struct regmap **ptr, *regmap; 1032 1033 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1034 if (!ptr) 1035 return ERR_PTR(-ENOMEM); 1036 1037 regmap = __regmap_init(dev, bus, bus_context, config, 1038 lock_key, lock_name); 1039 if (!IS_ERR(regmap)) { 1040 *ptr = regmap; 1041 devres_add(dev, ptr); 1042 } else { 1043 devres_free(ptr); 1044 } 1045 1046 return regmap; 1047 } 1048 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1049 1050 static void regmap_field_init(struct regmap_field *rm_field, 1051 struct regmap *regmap, struct reg_field reg_field) 1052 { 1053 rm_field->regmap = regmap; 1054 rm_field->reg = reg_field.reg; 1055 rm_field->shift = reg_field.lsb; 1056 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1057 rm_field->id_size = reg_field.id_size; 1058 rm_field->id_offset = reg_field.id_offset; 1059 } 1060 1061 /** 1062 * devm_regmap_field_alloc(): Allocate and initialise a register field 1063 * in a register map. 1064 * 1065 * @dev: Device that will be interacted with 1066 * @regmap: regmap bank in which this register field is located. 1067 * @reg_field: Register field with in the bank. 1068 * 1069 * The return value will be an ERR_PTR() on error or a valid pointer 1070 * to a struct regmap_field. The regmap_field will be automatically freed 1071 * by the device management code. 1072 */ 1073 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1074 struct regmap *regmap, struct reg_field reg_field) 1075 { 1076 struct regmap_field *rm_field = devm_kzalloc(dev, 1077 sizeof(*rm_field), GFP_KERNEL); 1078 if (!rm_field) 1079 return ERR_PTR(-ENOMEM); 1080 1081 regmap_field_init(rm_field, regmap, reg_field); 1082 1083 return rm_field; 1084 1085 } 1086 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1087 1088 /** 1089 * devm_regmap_field_free(): Free register field allocated using 1090 * devm_regmap_field_alloc. Usally drivers need not call this function, 1091 * as the memory allocated via devm will be freed as per device-driver 1092 * life-cyle. 1093 * 1094 * @dev: Device that will be interacted with 1095 * @field: regmap field which should be freed. 1096 */ 1097 void devm_regmap_field_free(struct device *dev, 1098 struct regmap_field *field) 1099 { 1100 devm_kfree(dev, field); 1101 } 1102 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1103 1104 /** 1105 * regmap_field_alloc(): Allocate and initialise a register field 1106 * in a register map. 1107 * 1108 * @regmap: regmap bank in which this register field is located. 1109 * @reg_field: Register field with in the bank. 1110 * 1111 * The return value will be an ERR_PTR() on error or a valid pointer 1112 * to a struct regmap_field. The regmap_field should be freed by the 1113 * user once its finished working with it using regmap_field_free(). 1114 */ 1115 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1116 struct reg_field reg_field) 1117 { 1118 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1119 1120 if (!rm_field) 1121 return ERR_PTR(-ENOMEM); 1122 1123 regmap_field_init(rm_field, regmap, reg_field); 1124 1125 return rm_field; 1126 } 1127 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1128 1129 /** 1130 * regmap_field_free(): Free register field allocated using regmap_field_alloc 1131 * 1132 * @field: regmap field which should be freed. 1133 */ 1134 void regmap_field_free(struct regmap_field *field) 1135 { 1136 kfree(field); 1137 } 1138 EXPORT_SYMBOL_GPL(regmap_field_free); 1139 1140 /** 1141 * regmap_reinit_cache(): Reinitialise the current register cache 1142 * 1143 * @map: Register map to operate on. 1144 * @config: New configuration. Only the cache data will be used. 1145 * 1146 * Discard any existing register cache for the map and initialize a 1147 * new cache. This can be used to restore the cache to defaults or to 1148 * update the cache configuration to reflect runtime discovery of the 1149 * hardware. 1150 * 1151 * No explicit locking is done here, the user needs to ensure that 1152 * this function will not race with other calls to regmap. 1153 */ 1154 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1155 { 1156 regcache_exit(map); 1157 regmap_debugfs_exit(map); 1158 1159 map->max_register = config->max_register; 1160 map->writeable_reg = config->writeable_reg; 1161 map->readable_reg = config->readable_reg; 1162 map->volatile_reg = config->volatile_reg; 1163 map->precious_reg = config->precious_reg; 1164 map->cache_type = config->cache_type; 1165 1166 regmap_debugfs_init(map, config->name); 1167 1168 map->cache_bypass = false; 1169 map->cache_only = false; 1170 1171 return regcache_init(map, config); 1172 } 1173 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1174 1175 /** 1176 * regmap_exit(): Free a previously allocated register map 1177 */ 1178 void regmap_exit(struct regmap *map) 1179 { 1180 struct regmap_async *async; 1181 1182 regcache_exit(map); 1183 regmap_debugfs_exit(map); 1184 regmap_range_exit(map); 1185 if (map->bus && map->bus->free_context) 1186 map->bus->free_context(map->bus_context); 1187 kfree(map->work_buf); 1188 while (!list_empty(&map->async_free)) { 1189 async = list_first_entry_or_null(&map->async_free, 1190 struct regmap_async, 1191 list); 1192 list_del(&async->list); 1193 kfree(async->work_buf); 1194 kfree(async); 1195 } 1196 kfree(map); 1197 } 1198 EXPORT_SYMBOL_GPL(regmap_exit); 1199 1200 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1201 { 1202 struct regmap **r = res; 1203 if (!r || !*r) { 1204 WARN_ON(!r || !*r); 1205 return 0; 1206 } 1207 1208 /* If the user didn't specify a name match any */ 1209 if (data) 1210 return (*r)->name == data; 1211 else 1212 return 1; 1213 } 1214 1215 /** 1216 * dev_get_regmap(): Obtain the regmap (if any) for a device 1217 * 1218 * @dev: Device to retrieve the map for 1219 * @name: Optional name for the register map, usually NULL. 1220 * 1221 * Returns the regmap for the device if one is present, or NULL. If 1222 * name is specified then it must match the name specified when 1223 * registering the device, if it is NULL then the first regmap found 1224 * will be used. Devices with multiple register maps are very rare, 1225 * generic code should normally not need to specify a name. 1226 */ 1227 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1228 { 1229 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1230 dev_get_regmap_match, (void *)name); 1231 1232 if (!r) 1233 return NULL; 1234 return *r; 1235 } 1236 EXPORT_SYMBOL_GPL(dev_get_regmap); 1237 1238 /** 1239 * regmap_get_device(): Obtain the device from a regmap 1240 * 1241 * @map: Register map to operate on. 1242 * 1243 * Returns the underlying device that the regmap has been created for. 1244 */ 1245 struct device *regmap_get_device(struct regmap *map) 1246 { 1247 return map->dev; 1248 } 1249 EXPORT_SYMBOL_GPL(regmap_get_device); 1250 1251 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1252 struct regmap_range_node *range, 1253 unsigned int val_num) 1254 { 1255 void *orig_work_buf; 1256 unsigned int win_offset; 1257 unsigned int win_page; 1258 bool page_chg; 1259 int ret; 1260 1261 win_offset = (*reg - range->range_min) % range->window_len; 1262 win_page = (*reg - range->range_min) / range->window_len; 1263 1264 if (val_num > 1) { 1265 /* Bulk write shouldn't cross range boundary */ 1266 if (*reg + val_num - 1 > range->range_max) 1267 return -EINVAL; 1268 1269 /* ... or single page boundary */ 1270 if (val_num > range->window_len - win_offset) 1271 return -EINVAL; 1272 } 1273 1274 /* It is possible to have selector register inside data window. 1275 In that case, selector register is located on every page and 1276 it needs no page switching, when accessed alone. */ 1277 if (val_num > 1 || 1278 range->window_start + win_offset != range->selector_reg) { 1279 /* Use separate work_buf during page switching */ 1280 orig_work_buf = map->work_buf; 1281 map->work_buf = map->selector_work_buf; 1282 1283 ret = _regmap_update_bits(map, range->selector_reg, 1284 range->selector_mask, 1285 win_page << range->selector_shift, 1286 &page_chg, false); 1287 1288 map->work_buf = orig_work_buf; 1289 1290 if (ret != 0) 1291 return ret; 1292 } 1293 1294 *reg = range->window_start + win_offset; 1295 1296 return 0; 1297 } 1298 1299 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1300 const void *val, size_t val_len) 1301 { 1302 struct regmap_range_node *range; 1303 unsigned long flags; 1304 u8 *u8 = map->work_buf; 1305 void *work_val = map->work_buf + map->format.reg_bytes + 1306 map->format.pad_bytes; 1307 void *buf; 1308 int ret = -ENOTSUPP; 1309 size_t len; 1310 int i; 1311 1312 WARN_ON(!map->bus); 1313 1314 /* Check for unwritable registers before we start */ 1315 if (map->writeable_reg) 1316 for (i = 0; i < val_len / map->format.val_bytes; i++) 1317 if (!map->writeable_reg(map->dev, 1318 reg + regmap_get_offset(map, i))) 1319 return -EINVAL; 1320 1321 if (!map->cache_bypass && map->format.parse_val) { 1322 unsigned int ival; 1323 int val_bytes = map->format.val_bytes; 1324 for (i = 0; i < val_len / val_bytes; i++) { 1325 ival = map->format.parse_val(val + (i * val_bytes)); 1326 ret = regcache_write(map, 1327 reg + regmap_get_offset(map, i), 1328 ival); 1329 if (ret) { 1330 dev_err(map->dev, 1331 "Error in caching of register: %x ret: %d\n", 1332 reg + i, ret); 1333 return ret; 1334 } 1335 } 1336 if (map->cache_only) { 1337 map->cache_dirty = true; 1338 return 0; 1339 } 1340 } 1341 1342 range = _regmap_range_lookup(map, reg); 1343 if (range) { 1344 int val_num = val_len / map->format.val_bytes; 1345 int win_offset = (reg - range->range_min) % range->window_len; 1346 int win_residue = range->window_len - win_offset; 1347 1348 /* If the write goes beyond the end of the window split it */ 1349 while (val_num > win_residue) { 1350 dev_dbg(map->dev, "Writing window %d/%zu\n", 1351 win_residue, val_len / map->format.val_bytes); 1352 ret = _regmap_raw_write(map, reg, val, win_residue * 1353 map->format.val_bytes); 1354 if (ret != 0) 1355 return ret; 1356 1357 reg += win_residue; 1358 val_num -= win_residue; 1359 val += win_residue * map->format.val_bytes; 1360 val_len -= win_residue * map->format.val_bytes; 1361 1362 win_offset = (reg - range->range_min) % 1363 range->window_len; 1364 win_residue = range->window_len - win_offset; 1365 } 1366 1367 ret = _regmap_select_page(map, ®, range, val_num); 1368 if (ret != 0) 1369 return ret; 1370 } 1371 1372 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1373 1374 u8[0] |= map->write_flag_mask; 1375 1376 /* 1377 * Essentially all I/O mechanisms will be faster with a single 1378 * buffer to write. Since register syncs often generate raw 1379 * writes of single registers optimise that case. 1380 */ 1381 if (val != work_val && val_len == map->format.val_bytes) { 1382 memcpy(work_val, val, map->format.val_bytes); 1383 val = work_val; 1384 } 1385 1386 if (map->async && map->bus->async_write) { 1387 struct regmap_async *async; 1388 1389 trace_regmap_async_write_start(map, reg, val_len); 1390 1391 spin_lock_irqsave(&map->async_lock, flags); 1392 async = list_first_entry_or_null(&map->async_free, 1393 struct regmap_async, 1394 list); 1395 if (async) 1396 list_del(&async->list); 1397 spin_unlock_irqrestore(&map->async_lock, flags); 1398 1399 if (!async) { 1400 async = map->bus->async_alloc(); 1401 if (!async) 1402 return -ENOMEM; 1403 1404 async->work_buf = kzalloc(map->format.buf_size, 1405 GFP_KERNEL | GFP_DMA); 1406 if (!async->work_buf) { 1407 kfree(async); 1408 return -ENOMEM; 1409 } 1410 } 1411 1412 async->map = map; 1413 1414 /* If the caller supplied the value we can use it safely. */ 1415 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1416 map->format.reg_bytes + map->format.val_bytes); 1417 1418 spin_lock_irqsave(&map->async_lock, flags); 1419 list_add_tail(&async->list, &map->async_list); 1420 spin_unlock_irqrestore(&map->async_lock, flags); 1421 1422 if (val != work_val) 1423 ret = map->bus->async_write(map->bus_context, 1424 async->work_buf, 1425 map->format.reg_bytes + 1426 map->format.pad_bytes, 1427 val, val_len, async); 1428 else 1429 ret = map->bus->async_write(map->bus_context, 1430 async->work_buf, 1431 map->format.reg_bytes + 1432 map->format.pad_bytes + 1433 val_len, NULL, 0, async); 1434 1435 if (ret != 0) { 1436 dev_err(map->dev, "Failed to schedule write: %d\n", 1437 ret); 1438 1439 spin_lock_irqsave(&map->async_lock, flags); 1440 list_move(&async->list, &map->async_free); 1441 spin_unlock_irqrestore(&map->async_lock, flags); 1442 } 1443 1444 return ret; 1445 } 1446 1447 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1448 1449 /* If we're doing a single register write we can probably just 1450 * send the work_buf directly, otherwise try to do a gather 1451 * write. 1452 */ 1453 if (val == work_val) 1454 ret = map->bus->write(map->bus_context, map->work_buf, 1455 map->format.reg_bytes + 1456 map->format.pad_bytes + 1457 val_len); 1458 else if (map->bus->gather_write) 1459 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1460 map->format.reg_bytes + 1461 map->format.pad_bytes, 1462 val, val_len); 1463 1464 /* If that didn't work fall back on linearising by hand. */ 1465 if (ret == -ENOTSUPP) { 1466 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1467 buf = kzalloc(len, GFP_KERNEL); 1468 if (!buf) 1469 return -ENOMEM; 1470 1471 memcpy(buf, map->work_buf, map->format.reg_bytes); 1472 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1473 val, val_len); 1474 ret = map->bus->write(map->bus_context, buf, len); 1475 1476 kfree(buf); 1477 } 1478 1479 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1480 1481 return ret; 1482 } 1483 1484 /** 1485 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1486 * 1487 * @map: Map to check. 1488 */ 1489 bool regmap_can_raw_write(struct regmap *map) 1490 { 1491 return map->bus && map->bus->write && map->format.format_val && 1492 map->format.format_reg; 1493 } 1494 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1495 1496 /** 1497 * regmap_get_raw_read_max - Get the maximum size we can read 1498 * 1499 * @map: Map to check. 1500 */ 1501 size_t regmap_get_raw_read_max(struct regmap *map) 1502 { 1503 return map->max_raw_read; 1504 } 1505 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1506 1507 /** 1508 * regmap_get_raw_write_max - Get the maximum size we can read 1509 * 1510 * @map: Map to check. 1511 */ 1512 size_t regmap_get_raw_write_max(struct regmap *map) 1513 { 1514 return map->max_raw_write; 1515 } 1516 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1517 1518 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1519 unsigned int val) 1520 { 1521 int ret; 1522 struct regmap_range_node *range; 1523 struct regmap *map = context; 1524 1525 WARN_ON(!map->bus || !map->format.format_write); 1526 1527 range = _regmap_range_lookup(map, reg); 1528 if (range) { 1529 ret = _regmap_select_page(map, ®, range, 1); 1530 if (ret != 0) 1531 return ret; 1532 } 1533 1534 map->format.format_write(map, reg, val); 1535 1536 trace_regmap_hw_write_start(map, reg, 1); 1537 1538 ret = map->bus->write(map->bus_context, map->work_buf, 1539 map->format.buf_size); 1540 1541 trace_regmap_hw_write_done(map, reg, 1); 1542 1543 return ret; 1544 } 1545 1546 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1547 unsigned int val) 1548 { 1549 struct regmap *map = context; 1550 1551 return map->bus->reg_write(map->bus_context, reg, val); 1552 } 1553 1554 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1555 unsigned int val) 1556 { 1557 struct regmap *map = context; 1558 1559 WARN_ON(!map->bus || !map->format.format_val); 1560 1561 map->format.format_val(map->work_buf + map->format.reg_bytes 1562 + map->format.pad_bytes, val, 0); 1563 return _regmap_raw_write(map, reg, 1564 map->work_buf + 1565 map->format.reg_bytes + 1566 map->format.pad_bytes, 1567 map->format.val_bytes); 1568 } 1569 1570 static inline void *_regmap_map_get_context(struct regmap *map) 1571 { 1572 return (map->bus) ? map : map->bus_context; 1573 } 1574 1575 int _regmap_write(struct regmap *map, unsigned int reg, 1576 unsigned int val) 1577 { 1578 int ret; 1579 void *context = _regmap_map_get_context(map); 1580 1581 if (!regmap_writeable(map, reg)) 1582 return -EIO; 1583 1584 if (!map->cache_bypass && !map->defer_caching) { 1585 ret = regcache_write(map, reg, val); 1586 if (ret != 0) 1587 return ret; 1588 if (map->cache_only) { 1589 map->cache_dirty = true; 1590 return 0; 1591 } 1592 } 1593 1594 #ifdef LOG_DEVICE 1595 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1596 dev_info(map->dev, "%x <= %x\n", reg, val); 1597 #endif 1598 1599 trace_regmap_reg_write(map, reg, val); 1600 1601 return map->reg_write(context, reg, val); 1602 } 1603 1604 /** 1605 * regmap_write(): Write a value to a single register 1606 * 1607 * @map: Register map to write to 1608 * @reg: Register to write to 1609 * @val: Value to be written 1610 * 1611 * A value of zero will be returned on success, a negative errno will 1612 * be returned in error cases. 1613 */ 1614 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1615 { 1616 int ret; 1617 1618 if (!IS_ALIGNED(reg, map->reg_stride)) 1619 return -EINVAL; 1620 1621 map->lock(map->lock_arg); 1622 1623 ret = _regmap_write(map, reg, val); 1624 1625 map->unlock(map->lock_arg); 1626 1627 return ret; 1628 } 1629 EXPORT_SYMBOL_GPL(regmap_write); 1630 1631 /** 1632 * regmap_write_async(): Write a value to a single register asynchronously 1633 * 1634 * @map: Register map to write to 1635 * @reg: Register to write to 1636 * @val: Value to be written 1637 * 1638 * A value of zero will be returned on success, a negative errno will 1639 * be returned in error cases. 1640 */ 1641 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1642 { 1643 int ret; 1644 1645 if (!IS_ALIGNED(reg, map->reg_stride)) 1646 return -EINVAL; 1647 1648 map->lock(map->lock_arg); 1649 1650 map->async = true; 1651 1652 ret = _regmap_write(map, reg, val); 1653 1654 map->async = false; 1655 1656 map->unlock(map->lock_arg); 1657 1658 return ret; 1659 } 1660 EXPORT_SYMBOL_GPL(regmap_write_async); 1661 1662 /** 1663 * regmap_raw_write(): Write raw values to one or more registers 1664 * 1665 * @map: Register map to write to 1666 * @reg: Initial register to write to 1667 * @val: Block of data to be written, laid out for direct transmission to the 1668 * device 1669 * @val_len: Length of data pointed to by val. 1670 * 1671 * This function is intended to be used for things like firmware 1672 * download where a large block of data needs to be transferred to the 1673 * device. No formatting will be done on the data provided. 1674 * 1675 * A value of zero will be returned on success, a negative errno will 1676 * be returned in error cases. 1677 */ 1678 int regmap_raw_write(struct regmap *map, unsigned int reg, 1679 const void *val, size_t val_len) 1680 { 1681 int ret; 1682 1683 if (!regmap_can_raw_write(map)) 1684 return -EINVAL; 1685 if (val_len % map->format.val_bytes) 1686 return -EINVAL; 1687 if (map->max_raw_write && map->max_raw_write > val_len) 1688 return -E2BIG; 1689 1690 map->lock(map->lock_arg); 1691 1692 ret = _regmap_raw_write(map, reg, val, val_len); 1693 1694 map->unlock(map->lock_arg); 1695 1696 return ret; 1697 } 1698 EXPORT_SYMBOL_GPL(regmap_raw_write); 1699 1700 /** 1701 * regmap_field_update_bits_base(): 1702 * Perform a read/modify/write cycle on the register field 1703 * with change, async, force option 1704 * 1705 * @field: Register field to write to 1706 * @mask: Bitmask to change 1707 * @val: Value to be written 1708 * @change: Boolean indicating if a write was done 1709 * @async: Boolean indicating asynchronously 1710 * @force: Boolean indicating use force update 1711 * 1712 * A value of zero will be returned on success, a negative errno will 1713 * be returned in error cases. 1714 */ 1715 int regmap_field_update_bits_base(struct regmap_field *field, 1716 unsigned int mask, unsigned int val, 1717 bool *change, bool async, bool force) 1718 { 1719 mask = (mask << field->shift) & field->mask; 1720 1721 return regmap_update_bits_base(field->regmap, field->reg, 1722 mask, val << field->shift, 1723 change, async, force); 1724 } 1725 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 1726 1727 /** 1728 * regmap_fields_update_bits_base(): 1729 * Perform a read/modify/write cycle on the register field 1730 * with change, async, force option 1731 * 1732 * @field: Register field to write to 1733 * @id: port ID 1734 * @mask: Bitmask to change 1735 * @val: Value to be written 1736 * @change: Boolean indicating if a write was done 1737 * @async: Boolean indicating asynchronously 1738 * @force: Boolean indicating use force update 1739 * 1740 * A value of zero will be returned on success, a negative errno will 1741 * be returned in error cases. 1742 */ 1743 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 1744 unsigned int mask, unsigned int val, 1745 bool *change, bool async, bool force) 1746 { 1747 if (id >= field->id_size) 1748 return -EINVAL; 1749 1750 mask = (mask << field->shift) & field->mask; 1751 1752 return regmap_update_bits_base(field->regmap, 1753 field->reg + (field->id_offset * id), 1754 mask, val << field->shift, 1755 change, async, force); 1756 } 1757 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 1758 1759 /* 1760 * regmap_bulk_write(): Write multiple registers to the device 1761 * 1762 * @map: Register map to write to 1763 * @reg: First register to be write from 1764 * @val: Block of data to be written, in native register size for device 1765 * @val_count: Number of registers to write 1766 * 1767 * This function is intended to be used for writing a large block of 1768 * data to the device either in single transfer or multiple transfer. 1769 * 1770 * A value of zero will be returned on success, a negative errno will 1771 * be returned in error cases. 1772 */ 1773 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1774 size_t val_count) 1775 { 1776 int ret = 0, i; 1777 size_t val_bytes = map->format.val_bytes; 1778 size_t total_size = val_bytes * val_count; 1779 1780 if (!IS_ALIGNED(reg, map->reg_stride)) 1781 return -EINVAL; 1782 1783 /* 1784 * Some devices don't support bulk write, for 1785 * them we have a series of single write operations in the first two if 1786 * blocks. 1787 * 1788 * The first if block is used for memory mapped io. It does not allow 1789 * val_bytes of 3 for example. 1790 * The second one is for busses that do not provide raw I/O. 1791 * The third one is used for busses which do not have these limitations 1792 * and can write arbitrary value lengths. 1793 */ 1794 if (!map->bus) { 1795 map->lock(map->lock_arg); 1796 for (i = 0; i < val_count; i++) { 1797 unsigned int ival; 1798 1799 switch (val_bytes) { 1800 case 1: 1801 ival = *(u8 *)(val + (i * val_bytes)); 1802 break; 1803 case 2: 1804 ival = *(u16 *)(val + (i * val_bytes)); 1805 break; 1806 case 4: 1807 ival = *(u32 *)(val + (i * val_bytes)); 1808 break; 1809 #ifdef CONFIG_64BIT 1810 case 8: 1811 ival = *(u64 *)(val + (i * val_bytes)); 1812 break; 1813 #endif 1814 default: 1815 ret = -EINVAL; 1816 goto out; 1817 } 1818 1819 ret = _regmap_write(map, 1820 reg + regmap_get_offset(map, i), 1821 ival); 1822 if (ret != 0) 1823 goto out; 1824 } 1825 out: 1826 map->unlock(map->lock_arg); 1827 } else if (map->bus && !map->format.parse_inplace) { 1828 const u8 *u8 = val; 1829 const u16 *u16 = val; 1830 const u32 *u32 = val; 1831 unsigned int ival; 1832 1833 for (i = 0; i < val_count; i++) { 1834 switch (map->format.val_bytes) { 1835 case 4: 1836 ival = u32[i]; 1837 break; 1838 case 2: 1839 ival = u16[i]; 1840 break; 1841 case 1: 1842 ival = u8[i]; 1843 break; 1844 default: 1845 return -EINVAL; 1846 } 1847 1848 ret = regmap_write(map, reg + (i * map->reg_stride), 1849 ival); 1850 if (ret) 1851 return ret; 1852 } 1853 } else if (map->use_single_write || 1854 (map->max_raw_write && map->max_raw_write < total_size)) { 1855 int chunk_stride = map->reg_stride; 1856 size_t chunk_size = val_bytes; 1857 size_t chunk_count = val_count; 1858 1859 if (!map->use_single_write) { 1860 chunk_size = map->max_raw_write; 1861 if (chunk_size % val_bytes) 1862 chunk_size -= chunk_size % val_bytes; 1863 chunk_count = total_size / chunk_size; 1864 chunk_stride *= chunk_size / val_bytes; 1865 } 1866 1867 map->lock(map->lock_arg); 1868 /* Write as many bytes as possible with chunk_size */ 1869 for (i = 0; i < chunk_count; i++) { 1870 ret = _regmap_raw_write(map, 1871 reg + (i * chunk_stride), 1872 val + (i * chunk_size), 1873 chunk_size); 1874 if (ret) 1875 break; 1876 } 1877 1878 /* Write remaining bytes */ 1879 if (!ret && chunk_size * i < total_size) { 1880 ret = _regmap_raw_write(map, reg + (i * chunk_stride), 1881 val + (i * chunk_size), 1882 total_size - i * chunk_size); 1883 } 1884 map->unlock(map->lock_arg); 1885 } else { 1886 void *wval; 1887 1888 if (!val_count) 1889 return -EINVAL; 1890 1891 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 1892 if (!wval) { 1893 dev_err(map->dev, "Error in memory allocation\n"); 1894 return -ENOMEM; 1895 } 1896 for (i = 0; i < val_count * val_bytes; i += val_bytes) 1897 map->format.parse_inplace(wval + i); 1898 1899 map->lock(map->lock_arg); 1900 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1901 map->unlock(map->lock_arg); 1902 1903 kfree(wval); 1904 } 1905 return ret; 1906 } 1907 EXPORT_SYMBOL_GPL(regmap_bulk_write); 1908 1909 /* 1910 * _regmap_raw_multi_reg_write() 1911 * 1912 * the (register,newvalue) pairs in regs have not been formatted, but 1913 * they are all in the same page and have been changed to being page 1914 * relative. The page register has been written if that was necessary. 1915 */ 1916 static int _regmap_raw_multi_reg_write(struct regmap *map, 1917 const struct reg_sequence *regs, 1918 size_t num_regs) 1919 { 1920 int ret; 1921 void *buf; 1922 int i; 1923 u8 *u8; 1924 size_t val_bytes = map->format.val_bytes; 1925 size_t reg_bytes = map->format.reg_bytes; 1926 size_t pad_bytes = map->format.pad_bytes; 1927 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 1928 size_t len = pair_size * num_regs; 1929 1930 if (!len) 1931 return -EINVAL; 1932 1933 buf = kzalloc(len, GFP_KERNEL); 1934 if (!buf) 1935 return -ENOMEM; 1936 1937 /* We have to linearise by hand. */ 1938 1939 u8 = buf; 1940 1941 for (i = 0; i < num_regs; i++) { 1942 unsigned int reg = regs[i].reg; 1943 unsigned int val = regs[i].def; 1944 trace_regmap_hw_write_start(map, reg, 1); 1945 map->format.format_reg(u8, reg, map->reg_shift); 1946 u8 += reg_bytes + pad_bytes; 1947 map->format.format_val(u8, val, 0); 1948 u8 += val_bytes; 1949 } 1950 u8 = buf; 1951 *u8 |= map->write_flag_mask; 1952 1953 ret = map->bus->write(map->bus_context, buf, len); 1954 1955 kfree(buf); 1956 1957 for (i = 0; i < num_regs; i++) { 1958 int reg = regs[i].reg; 1959 trace_regmap_hw_write_done(map, reg, 1); 1960 } 1961 return ret; 1962 } 1963 1964 static unsigned int _regmap_register_page(struct regmap *map, 1965 unsigned int reg, 1966 struct regmap_range_node *range) 1967 { 1968 unsigned int win_page = (reg - range->range_min) / range->window_len; 1969 1970 return win_page; 1971 } 1972 1973 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 1974 struct reg_sequence *regs, 1975 size_t num_regs) 1976 { 1977 int ret; 1978 int i, n; 1979 struct reg_sequence *base; 1980 unsigned int this_page = 0; 1981 unsigned int page_change = 0; 1982 /* 1983 * the set of registers are not neccessarily in order, but 1984 * since the order of write must be preserved this algorithm 1985 * chops the set each time the page changes. This also applies 1986 * if there is a delay required at any point in the sequence. 1987 */ 1988 base = regs; 1989 for (i = 0, n = 0; i < num_regs; i++, n++) { 1990 unsigned int reg = regs[i].reg; 1991 struct regmap_range_node *range; 1992 1993 range = _regmap_range_lookup(map, reg); 1994 if (range) { 1995 unsigned int win_page = _regmap_register_page(map, reg, 1996 range); 1997 1998 if (i == 0) 1999 this_page = win_page; 2000 if (win_page != this_page) { 2001 this_page = win_page; 2002 page_change = 1; 2003 } 2004 } 2005 2006 /* If we have both a page change and a delay make sure to 2007 * write the regs and apply the delay before we change the 2008 * page. 2009 */ 2010 2011 if (page_change || regs[i].delay_us) { 2012 2013 /* For situations where the first write requires 2014 * a delay we need to make sure we don't call 2015 * raw_multi_reg_write with n=0 2016 * This can't occur with page breaks as we 2017 * never write on the first iteration 2018 */ 2019 if (regs[i].delay_us && i == 0) 2020 n = 1; 2021 2022 ret = _regmap_raw_multi_reg_write(map, base, n); 2023 if (ret != 0) 2024 return ret; 2025 2026 if (regs[i].delay_us) 2027 udelay(regs[i].delay_us); 2028 2029 base += n; 2030 n = 0; 2031 2032 if (page_change) { 2033 ret = _regmap_select_page(map, 2034 &base[n].reg, 2035 range, 1); 2036 if (ret != 0) 2037 return ret; 2038 2039 page_change = 0; 2040 } 2041 2042 } 2043 2044 } 2045 if (n > 0) 2046 return _regmap_raw_multi_reg_write(map, base, n); 2047 return 0; 2048 } 2049 2050 static int _regmap_multi_reg_write(struct regmap *map, 2051 const struct reg_sequence *regs, 2052 size_t num_regs) 2053 { 2054 int i; 2055 int ret; 2056 2057 if (!map->can_multi_write) { 2058 for (i = 0; i < num_regs; i++) { 2059 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2060 if (ret != 0) 2061 return ret; 2062 2063 if (regs[i].delay_us) 2064 udelay(regs[i].delay_us); 2065 } 2066 return 0; 2067 } 2068 2069 if (!map->format.parse_inplace) 2070 return -EINVAL; 2071 2072 if (map->writeable_reg) 2073 for (i = 0; i < num_regs; i++) { 2074 int reg = regs[i].reg; 2075 if (!map->writeable_reg(map->dev, reg)) 2076 return -EINVAL; 2077 if (!IS_ALIGNED(reg, map->reg_stride)) 2078 return -EINVAL; 2079 } 2080 2081 if (!map->cache_bypass) { 2082 for (i = 0; i < num_regs; i++) { 2083 unsigned int val = regs[i].def; 2084 unsigned int reg = regs[i].reg; 2085 ret = regcache_write(map, reg, val); 2086 if (ret) { 2087 dev_err(map->dev, 2088 "Error in caching of register: %x ret: %d\n", 2089 reg, ret); 2090 return ret; 2091 } 2092 } 2093 if (map->cache_only) { 2094 map->cache_dirty = true; 2095 return 0; 2096 } 2097 } 2098 2099 WARN_ON(!map->bus); 2100 2101 for (i = 0; i < num_regs; i++) { 2102 unsigned int reg = regs[i].reg; 2103 struct regmap_range_node *range; 2104 2105 /* Coalesce all the writes between a page break or a delay 2106 * in a sequence 2107 */ 2108 range = _regmap_range_lookup(map, reg); 2109 if (range || regs[i].delay_us) { 2110 size_t len = sizeof(struct reg_sequence)*num_regs; 2111 struct reg_sequence *base = kmemdup(regs, len, 2112 GFP_KERNEL); 2113 if (!base) 2114 return -ENOMEM; 2115 ret = _regmap_range_multi_paged_reg_write(map, base, 2116 num_regs); 2117 kfree(base); 2118 2119 return ret; 2120 } 2121 } 2122 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2123 } 2124 2125 /* 2126 * regmap_multi_reg_write(): Write multiple registers to the device 2127 * 2128 * where the set of register,value pairs are supplied in any order, 2129 * possibly not all in a single range. 2130 * 2131 * @map: Register map to write to 2132 * @regs: Array of structures containing register,value to be written 2133 * @num_regs: Number of registers to write 2134 * 2135 * The 'normal' block write mode will send ultimately send data on the 2136 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are 2137 * addressed. However, this alternative block multi write mode will send 2138 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2139 * must of course support the mode. 2140 * 2141 * A value of zero will be returned on success, a negative errno will be 2142 * returned in error cases. 2143 */ 2144 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2145 int num_regs) 2146 { 2147 int ret; 2148 2149 map->lock(map->lock_arg); 2150 2151 ret = _regmap_multi_reg_write(map, regs, num_regs); 2152 2153 map->unlock(map->lock_arg); 2154 2155 return ret; 2156 } 2157 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2158 2159 /* 2160 * regmap_multi_reg_write_bypassed(): Write multiple registers to the 2161 * device but not the cache 2162 * 2163 * where the set of register are supplied in any order 2164 * 2165 * @map: Register map to write to 2166 * @regs: Array of structures containing register,value to be written 2167 * @num_regs: Number of registers to write 2168 * 2169 * This function is intended to be used for writing a large block of data 2170 * atomically to the device in single transfer for those I2C client devices 2171 * that implement this alternative block write mode. 2172 * 2173 * A value of zero will be returned on success, a negative errno will 2174 * be returned in error cases. 2175 */ 2176 int regmap_multi_reg_write_bypassed(struct regmap *map, 2177 const struct reg_sequence *regs, 2178 int num_regs) 2179 { 2180 int ret; 2181 bool bypass; 2182 2183 map->lock(map->lock_arg); 2184 2185 bypass = map->cache_bypass; 2186 map->cache_bypass = true; 2187 2188 ret = _regmap_multi_reg_write(map, regs, num_regs); 2189 2190 map->cache_bypass = bypass; 2191 2192 map->unlock(map->lock_arg); 2193 2194 return ret; 2195 } 2196 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2197 2198 /** 2199 * regmap_raw_write_async(): Write raw values to one or more registers 2200 * asynchronously 2201 * 2202 * @map: Register map to write to 2203 * @reg: Initial register to write to 2204 * @val: Block of data to be written, laid out for direct transmission to the 2205 * device. Must be valid until regmap_async_complete() is called. 2206 * @val_len: Length of data pointed to by val. 2207 * 2208 * This function is intended to be used for things like firmware 2209 * download where a large block of data needs to be transferred to the 2210 * device. No formatting will be done on the data provided. 2211 * 2212 * If supported by the underlying bus the write will be scheduled 2213 * asynchronously, helping maximise I/O speed on higher speed buses 2214 * like SPI. regmap_async_complete() can be called to ensure that all 2215 * asynchrnous writes have been completed. 2216 * 2217 * A value of zero will be returned on success, a negative errno will 2218 * be returned in error cases. 2219 */ 2220 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2221 const void *val, size_t val_len) 2222 { 2223 int ret; 2224 2225 if (val_len % map->format.val_bytes) 2226 return -EINVAL; 2227 if (!IS_ALIGNED(reg, map->reg_stride)) 2228 return -EINVAL; 2229 2230 map->lock(map->lock_arg); 2231 2232 map->async = true; 2233 2234 ret = _regmap_raw_write(map, reg, val, val_len); 2235 2236 map->async = false; 2237 2238 map->unlock(map->lock_arg); 2239 2240 return ret; 2241 } 2242 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2243 2244 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2245 unsigned int val_len) 2246 { 2247 struct regmap_range_node *range; 2248 u8 *u8 = map->work_buf; 2249 int ret; 2250 2251 WARN_ON(!map->bus); 2252 2253 if (!map->bus || !map->bus->read) 2254 return -EINVAL; 2255 2256 range = _regmap_range_lookup(map, reg); 2257 if (range) { 2258 ret = _regmap_select_page(map, ®, range, 2259 val_len / map->format.val_bytes); 2260 if (ret != 0) 2261 return ret; 2262 } 2263 2264 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2265 2266 /* 2267 * Some buses or devices flag reads by setting the high bits in the 2268 * register address; since it's always the high bits for all 2269 * current formats we can do this here rather than in 2270 * formatting. This may break if we get interesting formats. 2271 */ 2272 u8[0] |= map->read_flag_mask; 2273 2274 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2275 2276 ret = map->bus->read(map->bus_context, map->work_buf, 2277 map->format.reg_bytes + map->format.pad_bytes, 2278 val, val_len); 2279 2280 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2281 2282 return ret; 2283 } 2284 2285 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2286 unsigned int *val) 2287 { 2288 struct regmap *map = context; 2289 2290 return map->bus->reg_read(map->bus_context, reg, val); 2291 } 2292 2293 static int _regmap_bus_read(void *context, unsigned int reg, 2294 unsigned int *val) 2295 { 2296 int ret; 2297 struct regmap *map = context; 2298 2299 if (!map->format.parse_val) 2300 return -EINVAL; 2301 2302 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 2303 if (ret == 0) 2304 *val = map->format.parse_val(map->work_buf); 2305 2306 return ret; 2307 } 2308 2309 static int _regmap_read(struct regmap *map, unsigned int reg, 2310 unsigned int *val) 2311 { 2312 int ret; 2313 void *context = _regmap_map_get_context(map); 2314 2315 if (!map->cache_bypass) { 2316 ret = regcache_read(map, reg, val); 2317 if (ret == 0) 2318 return 0; 2319 } 2320 2321 if (map->cache_only) 2322 return -EBUSY; 2323 2324 if (!regmap_readable(map, reg)) 2325 return -EIO; 2326 2327 ret = map->reg_read(context, reg, val); 2328 if (ret == 0) { 2329 #ifdef LOG_DEVICE 2330 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2331 dev_info(map->dev, "%x => %x\n", reg, *val); 2332 #endif 2333 2334 trace_regmap_reg_read(map, reg, *val); 2335 2336 if (!map->cache_bypass) 2337 regcache_write(map, reg, *val); 2338 } 2339 2340 return ret; 2341 } 2342 2343 /** 2344 * regmap_read(): Read a value from a single register 2345 * 2346 * @map: Register map to read from 2347 * @reg: Register to be read from 2348 * @val: Pointer to store read value 2349 * 2350 * A value of zero will be returned on success, a negative errno will 2351 * be returned in error cases. 2352 */ 2353 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2354 { 2355 int ret; 2356 2357 if (!IS_ALIGNED(reg, map->reg_stride)) 2358 return -EINVAL; 2359 2360 map->lock(map->lock_arg); 2361 2362 ret = _regmap_read(map, reg, val); 2363 2364 map->unlock(map->lock_arg); 2365 2366 return ret; 2367 } 2368 EXPORT_SYMBOL_GPL(regmap_read); 2369 2370 /** 2371 * regmap_raw_read(): Read raw data from the device 2372 * 2373 * @map: Register map to read from 2374 * @reg: First register to be read from 2375 * @val: Pointer to store read value 2376 * @val_len: Size of data to read 2377 * 2378 * A value of zero will be returned on success, a negative errno will 2379 * be returned in error cases. 2380 */ 2381 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2382 size_t val_len) 2383 { 2384 size_t val_bytes = map->format.val_bytes; 2385 size_t val_count = val_len / val_bytes; 2386 unsigned int v; 2387 int ret, i; 2388 2389 if (!map->bus) 2390 return -EINVAL; 2391 if (val_len % map->format.val_bytes) 2392 return -EINVAL; 2393 if (!IS_ALIGNED(reg, map->reg_stride)) 2394 return -EINVAL; 2395 if (val_count == 0) 2396 return -EINVAL; 2397 2398 map->lock(map->lock_arg); 2399 2400 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2401 map->cache_type == REGCACHE_NONE) { 2402 if (!map->bus->read) { 2403 ret = -ENOTSUPP; 2404 goto out; 2405 } 2406 if (map->max_raw_read && map->max_raw_read < val_len) { 2407 ret = -E2BIG; 2408 goto out; 2409 } 2410 2411 /* Physical block read if there's no cache involved */ 2412 ret = _regmap_raw_read(map, reg, val, val_len); 2413 2414 } else { 2415 /* Otherwise go word by word for the cache; should be low 2416 * cost as we expect to hit the cache. 2417 */ 2418 for (i = 0; i < val_count; i++) { 2419 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2420 &v); 2421 if (ret != 0) 2422 goto out; 2423 2424 map->format.format_val(val + (i * val_bytes), v, 0); 2425 } 2426 } 2427 2428 out: 2429 map->unlock(map->lock_arg); 2430 2431 return ret; 2432 } 2433 EXPORT_SYMBOL_GPL(regmap_raw_read); 2434 2435 /** 2436 * regmap_field_read(): Read a value to a single register field 2437 * 2438 * @field: Register field to read from 2439 * @val: Pointer to store read value 2440 * 2441 * A value of zero will be returned on success, a negative errno will 2442 * be returned in error cases. 2443 */ 2444 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2445 { 2446 int ret; 2447 unsigned int reg_val; 2448 ret = regmap_read(field->regmap, field->reg, ®_val); 2449 if (ret != 0) 2450 return ret; 2451 2452 reg_val &= field->mask; 2453 reg_val >>= field->shift; 2454 *val = reg_val; 2455 2456 return ret; 2457 } 2458 EXPORT_SYMBOL_GPL(regmap_field_read); 2459 2460 /** 2461 * regmap_fields_read(): Read a value to a single register field with port ID 2462 * 2463 * @field: Register field to read from 2464 * @id: port ID 2465 * @val: Pointer to store read value 2466 * 2467 * A value of zero will be returned on success, a negative errno will 2468 * be returned in error cases. 2469 */ 2470 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2471 unsigned int *val) 2472 { 2473 int ret; 2474 unsigned int reg_val; 2475 2476 if (id >= field->id_size) 2477 return -EINVAL; 2478 2479 ret = regmap_read(field->regmap, 2480 field->reg + (field->id_offset * id), 2481 ®_val); 2482 if (ret != 0) 2483 return ret; 2484 2485 reg_val &= field->mask; 2486 reg_val >>= field->shift; 2487 *val = reg_val; 2488 2489 return ret; 2490 } 2491 EXPORT_SYMBOL_GPL(regmap_fields_read); 2492 2493 /** 2494 * regmap_bulk_read(): Read multiple registers from the device 2495 * 2496 * @map: Register map to read from 2497 * @reg: First register to be read from 2498 * @val: Pointer to store read value, in native register size for device 2499 * @val_count: Number of registers to read 2500 * 2501 * A value of zero will be returned on success, a negative errno will 2502 * be returned in error cases. 2503 */ 2504 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2505 size_t val_count) 2506 { 2507 int ret, i; 2508 size_t val_bytes = map->format.val_bytes; 2509 bool vol = regmap_volatile_range(map, reg, val_count); 2510 2511 if (!IS_ALIGNED(reg, map->reg_stride)) 2512 return -EINVAL; 2513 2514 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2515 /* 2516 * Some devices does not support bulk read, for 2517 * them we have a series of single read operations. 2518 */ 2519 size_t total_size = val_bytes * val_count; 2520 2521 if (!map->use_single_read && 2522 (!map->max_raw_read || map->max_raw_read > total_size)) { 2523 ret = regmap_raw_read(map, reg, val, 2524 val_bytes * val_count); 2525 if (ret != 0) 2526 return ret; 2527 } else { 2528 /* 2529 * Some devices do not support bulk read or do not 2530 * support large bulk reads, for them we have a series 2531 * of read operations. 2532 */ 2533 int chunk_stride = map->reg_stride; 2534 size_t chunk_size = val_bytes; 2535 size_t chunk_count = val_count; 2536 2537 if (!map->use_single_read) { 2538 chunk_size = map->max_raw_read; 2539 if (chunk_size % val_bytes) 2540 chunk_size -= chunk_size % val_bytes; 2541 chunk_count = total_size / chunk_size; 2542 chunk_stride *= chunk_size / val_bytes; 2543 } 2544 2545 /* Read bytes that fit into a multiple of chunk_size */ 2546 for (i = 0; i < chunk_count; i++) { 2547 ret = regmap_raw_read(map, 2548 reg + (i * chunk_stride), 2549 val + (i * chunk_size), 2550 chunk_size); 2551 if (ret != 0) 2552 return ret; 2553 } 2554 2555 /* Read remaining bytes */ 2556 if (chunk_size * i < total_size) { 2557 ret = regmap_raw_read(map, 2558 reg + (i * chunk_stride), 2559 val + (i * chunk_size), 2560 total_size - i * chunk_size); 2561 if (ret != 0) 2562 return ret; 2563 } 2564 } 2565 2566 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2567 map->format.parse_inplace(val + i); 2568 } else { 2569 for (i = 0; i < val_count; i++) { 2570 unsigned int ival; 2571 ret = regmap_read(map, reg + regmap_get_offset(map, i), 2572 &ival); 2573 if (ret != 0) 2574 return ret; 2575 2576 if (map->format.format_val) { 2577 map->format.format_val(val + (i * val_bytes), ival, 0); 2578 } else { 2579 /* Devices providing read and write 2580 * operations can use the bulk I/O 2581 * functions if they define a val_bytes, 2582 * we assume that the values are native 2583 * endian. 2584 */ 2585 #ifdef CONFIG_64BIT 2586 u64 *u64 = val; 2587 #endif 2588 u32 *u32 = val; 2589 u16 *u16 = val; 2590 u8 *u8 = val; 2591 2592 switch (map->format.val_bytes) { 2593 #ifdef CONFIG_64BIT 2594 case 8: 2595 u64[i] = ival; 2596 break; 2597 #endif 2598 case 4: 2599 u32[i] = ival; 2600 break; 2601 case 2: 2602 u16[i] = ival; 2603 break; 2604 case 1: 2605 u8[i] = ival; 2606 break; 2607 default: 2608 return -EINVAL; 2609 } 2610 } 2611 } 2612 } 2613 2614 return 0; 2615 } 2616 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2617 2618 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2619 unsigned int mask, unsigned int val, 2620 bool *change, bool force_write) 2621 { 2622 int ret; 2623 unsigned int tmp, orig; 2624 2625 if (change) 2626 *change = false; 2627 2628 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2629 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2630 if (ret == 0 && change) 2631 *change = true; 2632 } else { 2633 ret = _regmap_read(map, reg, &orig); 2634 if (ret != 0) 2635 return ret; 2636 2637 tmp = orig & ~mask; 2638 tmp |= val & mask; 2639 2640 if (force_write || (tmp != orig)) { 2641 ret = _regmap_write(map, reg, tmp); 2642 if (ret == 0 && change) 2643 *change = true; 2644 } 2645 } 2646 2647 return ret; 2648 } 2649 2650 /** 2651 * regmap_update_bits_base: 2652 * Perform a read/modify/write cycle on the 2653 * register map with change, async, force option 2654 * 2655 * @map: Register map to update 2656 * @reg: Register to update 2657 * @mask: Bitmask to change 2658 * @val: New value for bitmask 2659 * @change: Boolean indicating if a write was done 2660 * @async: Boolean indicating asynchronously 2661 * @force: Boolean indicating use force update 2662 * 2663 * if async was true, 2664 * With most buses the read must be done synchronously so this is most 2665 * useful for devices with a cache which do not need to interact with 2666 * the hardware to determine the current register value. 2667 * 2668 * Returns zero for success, a negative number on error. 2669 */ 2670 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2671 unsigned int mask, unsigned int val, 2672 bool *change, bool async, bool force) 2673 { 2674 int ret; 2675 2676 map->lock(map->lock_arg); 2677 2678 map->async = async; 2679 2680 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2681 2682 map->async = false; 2683 2684 map->unlock(map->lock_arg); 2685 2686 return ret; 2687 } 2688 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2689 2690 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2691 { 2692 struct regmap *map = async->map; 2693 bool wake; 2694 2695 trace_regmap_async_io_complete(map); 2696 2697 spin_lock(&map->async_lock); 2698 list_move(&async->list, &map->async_free); 2699 wake = list_empty(&map->async_list); 2700 2701 if (ret != 0) 2702 map->async_ret = ret; 2703 2704 spin_unlock(&map->async_lock); 2705 2706 if (wake) 2707 wake_up(&map->async_waitq); 2708 } 2709 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2710 2711 static int regmap_async_is_done(struct regmap *map) 2712 { 2713 unsigned long flags; 2714 int ret; 2715 2716 spin_lock_irqsave(&map->async_lock, flags); 2717 ret = list_empty(&map->async_list); 2718 spin_unlock_irqrestore(&map->async_lock, flags); 2719 2720 return ret; 2721 } 2722 2723 /** 2724 * regmap_async_complete: Ensure all asynchronous I/O has completed. 2725 * 2726 * @map: Map to operate on. 2727 * 2728 * Blocks until any pending asynchronous I/O has completed. Returns 2729 * an error code for any failed I/O operations. 2730 */ 2731 int regmap_async_complete(struct regmap *map) 2732 { 2733 unsigned long flags; 2734 int ret; 2735 2736 /* Nothing to do with no async support */ 2737 if (!map->bus || !map->bus->async_write) 2738 return 0; 2739 2740 trace_regmap_async_complete_start(map); 2741 2742 wait_event(map->async_waitq, regmap_async_is_done(map)); 2743 2744 spin_lock_irqsave(&map->async_lock, flags); 2745 ret = map->async_ret; 2746 map->async_ret = 0; 2747 spin_unlock_irqrestore(&map->async_lock, flags); 2748 2749 trace_regmap_async_complete_done(map); 2750 2751 return ret; 2752 } 2753 EXPORT_SYMBOL_GPL(regmap_async_complete); 2754 2755 /** 2756 * regmap_register_patch: Register and apply register updates to be applied 2757 * on device initialistion 2758 * 2759 * @map: Register map to apply updates to. 2760 * @regs: Values to update. 2761 * @num_regs: Number of entries in regs. 2762 * 2763 * Register a set of register updates to be applied to the device 2764 * whenever the device registers are synchronised with the cache and 2765 * apply them immediately. Typically this is used to apply 2766 * corrections to be applied to the device defaults on startup, such 2767 * as the updates some vendors provide to undocumented registers. 2768 * 2769 * The caller must ensure that this function cannot be called 2770 * concurrently with either itself or regcache_sync(). 2771 */ 2772 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 2773 int num_regs) 2774 { 2775 struct reg_sequence *p; 2776 int ret; 2777 bool bypass; 2778 2779 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2780 num_regs)) 2781 return 0; 2782 2783 p = krealloc(map->patch, 2784 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 2785 GFP_KERNEL); 2786 if (p) { 2787 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2788 map->patch = p; 2789 map->patch_regs += num_regs; 2790 } else { 2791 return -ENOMEM; 2792 } 2793 2794 map->lock(map->lock_arg); 2795 2796 bypass = map->cache_bypass; 2797 2798 map->cache_bypass = true; 2799 map->async = true; 2800 2801 ret = _regmap_multi_reg_write(map, regs, num_regs); 2802 2803 map->async = false; 2804 map->cache_bypass = bypass; 2805 2806 map->unlock(map->lock_arg); 2807 2808 regmap_async_complete(map); 2809 2810 return ret; 2811 } 2812 EXPORT_SYMBOL_GPL(regmap_register_patch); 2813 2814 /* 2815 * regmap_get_val_bytes(): Report the size of a register value 2816 * 2817 * Report the size of a register value, mainly intended to for use by 2818 * generic infrastructure built on top of regmap. 2819 */ 2820 int regmap_get_val_bytes(struct regmap *map) 2821 { 2822 if (map->format.format_write) 2823 return -EINVAL; 2824 2825 return map->format.val_bytes; 2826 } 2827 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2828 2829 /** 2830 * regmap_get_max_register(): Report the max register value 2831 * 2832 * Report the max register value, mainly intended to for use by 2833 * generic infrastructure built on top of regmap. 2834 */ 2835 int regmap_get_max_register(struct regmap *map) 2836 { 2837 return map->max_register ? map->max_register : -EINVAL; 2838 } 2839 EXPORT_SYMBOL_GPL(regmap_get_max_register); 2840 2841 /** 2842 * regmap_get_reg_stride(): Report the register address stride 2843 * 2844 * Report the register address stride, mainly intended to for use by 2845 * generic infrastructure built on top of regmap. 2846 */ 2847 int regmap_get_reg_stride(struct regmap *map) 2848 { 2849 return map->reg_stride; 2850 } 2851 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 2852 2853 int regmap_parse_val(struct regmap *map, const void *buf, 2854 unsigned int *val) 2855 { 2856 if (!map->format.parse_val) 2857 return -EINVAL; 2858 2859 *val = map->format.parse_val(buf); 2860 2861 return 0; 2862 } 2863 EXPORT_SYMBOL_GPL(regmap_parse_val); 2864 2865 static int __init regmap_initcall(void) 2866 { 2867 regmap_debugfs_initcall(); 2868 2869 return 0; 2870 } 2871 postcore_initcall(regmap_initcall); 2872