1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/of.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 23 #define CREATE_TRACE_POINTS 24 #include "trace.h" 25 26 #include "internal.h" 27 28 /* 29 * Sometimes for failures during very early init the trace 30 * infrastructure isn't available early enough to be used. For this 31 * sort of problem defining LOG_DEVICE will add printks for basic 32 * register I/O on a specific device. 33 */ 34 #undef LOG_DEVICE 35 36 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 37 unsigned int mask, unsigned int val, 38 bool *change, bool force_write); 39 40 static int _regmap_bus_reg_read(void *context, unsigned int reg, 41 unsigned int *val); 42 static int _regmap_bus_read(void *context, unsigned int reg, 43 unsigned int *val); 44 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 45 unsigned int val); 46 static int _regmap_bus_reg_write(void *context, unsigned int reg, 47 unsigned int val); 48 static int _regmap_bus_raw_write(void *context, unsigned int reg, 49 unsigned int val); 50 51 bool regmap_reg_in_ranges(unsigned int reg, 52 const struct regmap_range *ranges, 53 unsigned int nranges) 54 { 55 const struct regmap_range *r; 56 int i; 57 58 for (i = 0, r = ranges; i < nranges; i++, r++) 59 if (regmap_reg_in_range(reg, r)) 60 return true; 61 return false; 62 } 63 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 64 65 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 66 const struct regmap_access_table *table) 67 { 68 /* Check "no ranges" first */ 69 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 70 return false; 71 72 /* In case zero "yes ranges" are supplied, any reg is OK */ 73 if (!table->n_yes_ranges) 74 return true; 75 76 return regmap_reg_in_ranges(reg, table->yes_ranges, 77 table->n_yes_ranges); 78 } 79 EXPORT_SYMBOL_GPL(regmap_check_range_table); 80 81 bool regmap_writeable(struct regmap *map, unsigned int reg) 82 { 83 if (map->max_register && reg > map->max_register) 84 return false; 85 86 if (map->writeable_reg) 87 return map->writeable_reg(map->dev, reg); 88 89 if (map->wr_table) 90 return regmap_check_range_table(map, reg, map->wr_table); 91 92 return true; 93 } 94 95 bool regmap_readable(struct regmap *map, unsigned int reg) 96 { 97 if (!map->reg_read) 98 return false; 99 100 if (map->max_register && reg > map->max_register) 101 return false; 102 103 if (map->format.format_write) 104 return false; 105 106 if (map->readable_reg) 107 return map->readable_reg(map->dev, reg); 108 109 if (map->rd_table) 110 return regmap_check_range_table(map, reg, map->rd_table); 111 112 return true; 113 } 114 115 bool regmap_volatile(struct regmap *map, unsigned int reg) 116 { 117 if (!map->format.format_write && !regmap_readable(map, reg)) 118 return false; 119 120 if (map->volatile_reg) 121 return map->volatile_reg(map->dev, reg); 122 123 if (map->volatile_table) 124 return regmap_check_range_table(map, reg, map->volatile_table); 125 126 if (map->cache_ops) 127 return false; 128 else 129 return true; 130 } 131 132 bool regmap_precious(struct regmap *map, unsigned int reg) 133 { 134 if (!regmap_readable(map, reg)) 135 return false; 136 137 if (map->precious_reg) 138 return map->precious_reg(map->dev, reg); 139 140 if (map->precious_table) 141 return regmap_check_range_table(map, reg, map->precious_table); 142 143 return false; 144 } 145 146 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 147 size_t num) 148 { 149 unsigned int i; 150 151 for (i = 0; i < num; i++) 152 if (!regmap_volatile(map, reg + i)) 153 return false; 154 155 return true; 156 } 157 158 static void regmap_format_2_6_write(struct regmap *map, 159 unsigned int reg, unsigned int val) 160 { 161 u8 *out = map->work_buf; 162 163 *out = (reg << 6) | val; 164 } 165 166 static void regmap_format_4_12_write(struct regmap *map, 167 unsigned int reg, unsigned int val) 168 { 169 __be16 *out = map->work_buf; 170 *out = cpu_to_be16((reg << 12) | val); 171 } 172 173 static void regmap_format_7_9_write(struct regmap *map, 174 unsigned int reg, unsigned int val) 175 { 176 __be16 *out = map->work_buf; 177 *out = cpu_to_be16((reg << 9) | val); 178 } 179 180 static void regmap_format_10_14_write(struct regmap *map, 181 unsigned int reg, unsigned int val) 182 { 183 u8 *out = map->work_buf; 184 185 out[2] = val; 186 out[1] = (val >> 8) | (reg << 6); 187 out[0] = reg >> 2; 188 } 189 190 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 191 { 192 u8 *b = buf; 193 194 b[0] = val << shift; 195 } 196 197 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 198 { 199 __be16 *b = buf; 200 201 b[0] = cpu_to_be16(val << shift); 202 } 203 204 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 205 { 206 __le16 *b = buf; 207 208 b[0] = cpu_to_le16(val << shift); 209 } 210 211 static void regmap_format_16_native(void *buf, unsigned int val, 212 unsigned int shift) 213 { 214 *(u16 *)buf = val << shift; 215 } 216 217 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 218 { 219 u8 *b = buf; 220 221 val <<= shift; 222 223 b[0] = val >> 16; 224 b[1] = val >> 8; 225 b[2] = val; 226 } 227 228 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 229 { 230 __be32 *b = buf; 231 232 b[0] = cpu_to_be32(val << shift); 233 } 234 235 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 236 { 237 __le32 *b = buf; 238 239 b[0] = cpu_to_le32(val << shift); 240 } 241 242 static void regmap_format_32_native(void *buf, unsigned int val, 243 unsigned int shift) 244 { 245 *(u32 *)buf = val << shift; 246 } 247 248 #ifdef CONFIG_64BIT 249 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 250 { 251 __be64 *b = buf; 252 253 b[0] = cpu_to_be64((u64)val << shift); 254 } 255 256 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 257 { 258 __le64 *b = buf; 259 260 b[0] = cpu_to_le64((u64)val << shift); 261 } 262 263 static void regmap_format_64_native(void *buf, unsigned int val, 264 unsigned int shift) 265 { 266 *(u64 *)buf = (u64)val << shift; 267 } 268 #endif 269 270 static void regmap_parse_inplace_noop(void *buf) 271 { 272 } 273 274 static unsigned int regmap_parse_8(const void *buf) 275 { 276 const u8 *b = buf; 277 278 return b[0]; 279 } 280 281 static unsigned int regmap_parse_16_be(const void *buf) 282 { 283 const __be16 *b = buf; 284 285 return be16_to_cpu(b[0]); 286 } 287 288 static unsigned int regmap_parse_16_le(const void *buf) 289 { 290 const __le16 *b = buf; 291 292 return le16_to_cpu(b[0]); 293 } 294 295 static void regmap_parse_16_be_inplace(void *buf) 296 { 297 __be16 *b = buf; 298 299 b[0] = be16_to_cpu(b[0]); 300 } 301 302 static void regmap_parse_16_le_inplace(void *buf) 303 { 304 __le16 *b = buf; 305 306 b[0] = le16_to_cpu(b[0]); 307 } 308 309 static unsigned int regmap_parse_16_native(const void *buf) 310 { 311 return *(u16 *)buf; 312 } 313 314 static unsigned int regmap_parse_24(const void *buf) 315 { 316 const u8 *b = buf; 317 unsigned int ret = b[2]; 318 ret |= ((unsigned int)b[1]) << 8; 319 ret |= ((unsigned int)b[0]) << 16; 320 321 return ret; 322 } 323 324 static unsigned int regmap_parse_32_be(const void *buf) 325 { 326 const __be32 *b = buf; 327 328 return be32_to_cpu(b[0]); 329 } 330 331 static unsigned int regmap_parse_32_le(const void *buf) 332 { 333 const __le32 *b = buf; 334 335 return le32_to_cpu(b[0]); 336 } 337 338 static void regmap_parse_32_be_inplace(void *buf) 339 { 340 __be32 *b = buf; 341 342 b[0] = be32_to_cpu(b[0]); 343 } 344 345 static void regmap_parse_32_le_inplace(void *buf) 346 { 347 __le32 *b = buf; 348 349 b[0] = le32_to_cpu(b[0]); 350 } 351 352 static unsigned int regmap_parse_32_native(const void *buf) 353 { 354 return *(u32 *)buf; 355 } 356 357 #ifdef CONFIG_64BIT 358 static unsigned int regmap_parse_64_be(const void *buf) 359 { 360 const __be64 *b = buf; 361 362 return be64_to_cpu(b[0]); 363 } 364 365 static unsigned int regmap_parse_64_le(const void *buf) 366 { 367 const __le64 *b = buf; 368 369 return le64_to_cpu(b[0]); 370 } 371 372 static void regmap_parse_64_be_inplace(void *buf) 373 { 374 __be64 *b = buf; 375 376 b[0] = be64_to_cpu(b[0]); 377 } 378 379 static void regmap_parse_64_le_inplace(void *buf) 380 { 381 __le64 *b = buf; 382 383 b[0] = le64_to_cpu(b[0]); 384 } 385 386 static unsigned int regmap_parse_64_native(const void *buf) 387 { 388 return *(u64 *)buf; 389 } 390 #endif 391 392 static void regmap_lock_mutex(void *__map) 393 { 394 struct regmap *map = __map; 395 mutex_lock(&map->mutex); 396 } 397 398 static void regmap_unlock_mutex(void *__map) 399 { 400 struct regmap *map = __map; 401 mutex_unlock(&map->mutex); 402 } 403 404 static void regmap_lock_spinlock(void *__map) 405 __acquires(&map->spinlock) 406 { 407 struct regmap *map = __map; 408 unsigned long flags; 409 410 spin_lock_irqsave(&map->spinlock, flags); 411 map->spinlock_flags = flags; 412 } 413 414 static void regmap_unlock_spinlock(void *__map) 415 __releases(&map->spinlock) 416 { 417 struct regmap *map = __map; 418 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 419 } 420 421 static void dev_get_regmap_release(struct device *dev, void *res) 422 { 423 /* 424 * We don't actually have anything to do here; the goal here 425 * is not to manage the regmap but to provide a simple way to 426 * get the regmap back given a struct device. 427 */ 428 } 429 430 static bool _regmap_range_add(struct regmap *map, 431 struct regmap_range_node *data) 432 { 433 struct rb_root *root = &map->range_tree; 434 struct rb_node **new = &(root->rb_node), *parent = NULL; 435 436 while (*new) { 437 struct regmap_range_node *this = 438 container_of(*new, struct regmap_range_node, node); 439 440 parent = *new; 441 if (data->range_max < this->range_min) 442 new = &((*new)->rb_left); 443 else if (data->range_min > this->range_max) 444 new = &((*new)->rb_right); 445 else 446 return false; 447 } 448 449 rb_link_node(&data->node, parent, new); 450 rb_insert_color(&data->node, root); 451 452 return true; 453 } 454 455 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 456 unsigned int reg) 457 { 458 struct rb_node *node = map->range_tree.rb_node; 459 460 while (node) { 461 struct regmap_range_node *this = 462 container_of(node, struct regmap_range_node, node); 463 464 if (reg < this->range_min) 465 node = node->rb_left; 466 else if (reg > this->range_max) 467 node = node->rb_right; 468 else 469 return this; 470 } 471 472 return NULL; 473 } 474 475 static void regmap_range_exit(struct regmap *map) 476 { 477 struct rb_node *next; 478 struct regmap_range_node *range_node; 479 480 next = rb_first(&map->range_tree); 481 while (next) { 482 range_node = rb_entry(next, struct regmap_range_node, node); 483 next = rb_next(&range_node->node); 484 rb_erase(&range_node->node, &map->range_tree); 485 kfree(range_node); 486 } 487 488 kfree(map->selector_work_buf); 489 } 490 491 int regmap_attach_dev(struct device *dev, struct regmap *map, 492 const struct regmap_config *config) 493 { 494 struct regmap **m; 495 496 map->dev = dev; 497 498 regmap_debugfs_init(map, config->name); 499 500 /* Add a devres resource for dev_get_regmap() */ 501 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 502 if (!m) { 503 regmap_debugfs_exit(map); 504 return -ENOMEM; 505 } 506 *m = map; 507 devres_add(dev, m); 508 509 return 0; 510 } 511 EXPORT_SYMBOL_GPL(regmap_attach_dev); 512 513 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 514 const struct regmap_config *config) 515 { 516 enum regmap_endian endian; 517 518 /* Retrieve the endianness specification from the regmap config */ 519 endian = config->reg_format_endian; 520 521 /* If the regmap config specified a non-default value, use that */ 522 if (endian != REGMAP_ENDIAN_DEFAULT) 523 return endian; 524 525 /* Retrieve the endianness specification from the bus config */ 526 if (bus && bus->reg_format_endian_default) 527 endian = bus->reg_format_endian_default; 528 529 /* If the bus specified a non-default value, use that */ 530 if (endian != REGMAP_ENDIAN_DEFAULT) 531 return endian; 532 533 /* Use this if no other value was found */ 534 return REGMAP_ENDIAN_BIG; 535 } 536 537 enum regmap_endian regmap_get_val_endian(struct device *dev, 538 const struct regmap_bus *bus, 539 const struct regmap_config *config) 540 { 541 struct device_node *np; 542 enum regmap_endian endian; 543 544 /* Retrieve the endianness specification from the regmap config */ 545 endian = config->val_format_endian; 546 547 /* If the regmap config specified a non-default value, use that */ 548 if (endian != REGMAP_ENDIAN_DEFAULT) 549 return endian; 550 551 /* If the dev and dev->of_node exist try to get endianness from DT */ 552 if (dev && dev->of_node) { 553 np = dev->of_node; 554 555 /* Parse the device's DT node for an endianness specification */ 556 if (of_property_read_bool(np, "big-endian")) 557 endian = REGMAP_ENDIAN_BIG; 558 else if (of_property_read_bool(np, "little-endian")) 559 endian = REGMAP_ENDIAN_LITTLE; 560 else if (of_property_read_bool(np, "native-endian")) 561 endian = REGMAP_ENDIAN_NATIVE; 562 563 /* If the endianness was specified in DT, use that */ 564 if (endian != REGMAP_ENDIAN_DEFAULT) 565 return endian; 566 } 567 568 /* Retrieve the endianness specification from the bus config */ 569 if (bus && bus->val_format_endian_default) 570 endian = bus->val_format_endian_default; 571 572 /* If the bus specified a non-default value, use that */ 573 if (endian != REGMAP_ENDIAN_DEFAULT) 574 return endian; 575 576 /* Use this if no other value was found */ 577 return REGMAP_ENDIAN_BIG; 578 } 579 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 580 581 struct regmap *__regmap_init(struct device *dev, 582 const struct regmap_bus *bus, 583 void *bus_context, 584 const struct regmap_config *config, 585 struct lock_class_key *lock_key, 586 const char *lock_name) 587 { 588 struct regmap *map; 589 int ret = -EINVAL; 590 enum regmap_endian reg_endian, val_endian; 591 int i, j; 592 593 if (!config) 594 goto err; 595 596 map = kzalloc(sizeof(*map), GFP_KERNEL); 597 if (map == NULL) { 598 ret = -ENOMEM; 599 goto err; 600 } 601 602 if (config->lock && config->unlock) { 603 map->lock = config->lock; 604 map->unlock = config->unlock; 605 map->lock_arg = config->lock_arg; 606 } else { 607 if ((bus && bus->fast_io) || 608 config->fast_io) { 609 spin_lock_init(&map->spinlock); 610 map->lock = regmap_lock_spinlock; 611 map->unlock = regmap_unlock_spinlock; 612 lockdep_set_class_and_name(&map->spinlock, 613 lock_key, lock_name); 614 } else { 615 mutex_init(&map->mutex); 616 map->lock = regmap_lock_mutex; 617 map->unlock = regmap_unlock_mutex; 618 lockdep_set_class_and_name(&map->mutex, 619 lock_key, lock_name); 620 } 621 map->lock_arg = map; 622 } 623 624 /* 625 * When we write in fast-paths with regmap_bulk_write() don't allocate 626 * scratch buffers with sleeping allocations. 627 */ 628 if ((bus && bus->fast_io) || config->fast_io) 629 map->alloc_flags = GFP_ATOMIC; 630 else 631 map->alloc_flags = GFP_KERNEL; 632 633 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 634 map->format.pad_bytes = config->pad_bits / 8; 635 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 636 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 637 config->val_bits + config->pad_bits, 8); 638 map->reg_shift = config->pad_bits % 8; 639 if (config->reg_stride) 640 map->reg_stride = config->reg_stride; 641 else 642 map->reg_stride = 1; 643 map->use_single_read = config->use_single_rw || !bus || !bus->read; 644 map->use_single_write = config->use_single_rw || !bus || !bus->write; 645 map->can_multi_write = config->can_multi_write && bus && bus->write; 646 if (bus) { 647 map->max_raw_read = bus->max_raw_read; 648 map->max_raw_write = bus->max_raw_write; 649 } 650 map->dev = dev; 651 map->bus = bus; 652 map->bus_context = bus_context; 653 map->max_register = config->max_register; 654 map->wr_table = config->wr_table; 655 map->rd_table = config->rd_table; 656 map->volatile_table = config->volatile_table; 657 map->precious_table = config->precious_table; 658 map->writeable_reg = config->writeable_reg; 659 map->readable_reg = config->readable_reg; 660 map->volatile_reg = config->volatile_reg; 661 map->precious_reg = config->precious_reg; 662 map->cache_type = config->cache_type; 663 map->name = config->name; 664 665 spin_lock_init(&map->async_lock); 666 INIT_LIST_HEAD(&map->async_list); 667 INIT_LIST_HEAD(&map->async_free); 668 init_waitqueue_head(&map->async_waitq); 669 670 if (config->read_flag_mask || config->write_flag_mask) { 671 map->read_flag_mask = config->read_flag_mask; 672 map->write_flag_mask = config->write_flag_mask; 673 } else if (bus) { 674 map->read_flag_mask = bus->read_flag_mask; 675 } 676 677 if (!bus) { 678 map->reg_read = config->reg_read; 679 map->reg_write = config->reg_write; 680 681 map->defer_caching = false; 682 goto skip_format_initialization; 683 } else if (!bus->read || !bus->write) { 684 map->reg_read = _regmap_bus_reg_read; 685 map->reg_write = _regmap_bus_reg_write; 686 687 map->defer_caching = false; 688 goto skip_format_initialization; 689 } else { 690 map->reg_read = _regmap_bus_read; 691 map->reg_update_bits = bus->reg_update_bits; 692 } 693 694 reg_endian = regmap_get_reg_endian(bus, config); 695 val_endian = regmap_get_val_endian(dev, bus, config); 696 697 switch (config->reg_bits + map->reg_shift) { 698 case 2: 699 switch (config->val_bits) { 700 case 6: 701 map->format.format_write = regmap_format_2_6_write; 702 break; 703 default: 704 goto err_map; 705 } 706 break; 707 708 case 4: 709 switch (config->val_bits) { 710 case 12: 711 map->format.format_write = regmap_format_4_12_write; 712 break; 713 default: 714 goto err_map; 715 } 716 break; 717 718 case 7: 719 switch (config->val_bits) { 720 case 9: 721 map->format.format_write = regmap_format_7_9_write; 722 break; 723 default: 724 goto err_map; 725 } 726 break; 727 728 case 10: 729 switch (config->val_bits) { 730 case 14: 731 map->format.format_write = regmap_format_10_14_write; 732 break; 733 default: 734 goto err_map; 735 } 736 break; 737 738 case 8: 739 map->format.format_reg = regmap_format_8; 740 break; 741 742 case 16: 743 switch (reg_endian) { 744 case REGMAP_ENDIAN_BIG: 745 map->format.format_reg = regmap_format_16_be; 746 break; 747 case REGMAP_ENDIAN_NATIVE: 748 map->format.format_reg = regmap_format_16_native; 749 break; 750 default: 751 goto err_map; 752 } 753 break; 754 755 case 24: 756 if (reg_endian != REGMAP_ENDIAN_BIG) 757 goto err_map; 758 map->format.format_reg = regmap_format_24; 759 break; 760 761 case 32: 762 switch (reg_endian) { 763 case REGMAP_ENDIAN_BIG: 764 map->format.format_reg = regmap_format_32_be; 765 break; 766 case REGMAP_ENDIAN_NATIVE: 767 map->format.format_reg = regmap_format_32_native; 768 break; 769 default: 770 goto err_map; 771 } 772 break; 773 774 #ifdef CONFIG_64BIT 775 case 64: 776 switch (reg_endian) { 777 case REGMAP_ENDIAN_BIG: 778 map->format.format_reg = regmap_format_64_be; 779 break; 780 case REGMAP_ENDIAN_NATIVE: 781 map->format.format_reg = regmap_format_64_native; 782 break; 783 default: 784 goto err_map; 785 } 786 break; 787 #endif 788 789 default: 790 goto err_map; 791 } 792 793 if (val_endian == REGMAP_ENDIAN_NATIVE) 794 map->format.parse_inplace = regmap_parse_inplace_noop; 795 796 switch (config->val_bits) { 797 case 8: 798 map->format.format_val = regmap_format_8; 799 map->format.parse_val = regmap_parse_8; 800 map->format.parse_inplace = regmap_parse_inplace_noop; 801 break; 802 case 16: 803 switch (val_endian) { 804 case REGMAP_ENDIAN_BIG: 805 map->format.format_val = regmap_format_16_be; 806 map->format.parse_val = regmap_parse_16_be; 807 map->format.parse_inplace = regmap_parse_16_be_inplace; 808 break; 809 case REGMAP_ENDIAN_LITTLE: 810 map->format.format_val = regmap_format_16_le; 811 map->format.parse_val = regmap_parse_16_le; 812 map->format.parse_inplace = regmap_parse_16_le_inplace; 813 break; 814 case REGMAP_ENDIAN_NATIVE: 815 map->format.format_val = regmap_format_16_native; 816 map->format.parse_val = regmap_parse_16_native; 817 break; 818 default: 819 goto err_map; 820 } 821 break; 822 case 24: 823 if (val_endian != REGMAP_ENDIAN_BIG) 824 goto err_map; 825 map->format.format_val = regmap_format_24; 826 map->format.parse_val = regmap_parse_24; 827 break; 828 case 32: 829 switch (val_endian) { 830 case REGMAP_ENDIAN_BIG: 831 map->format.format_val = regmap_format_32_be; 832 map->format.parse_val = regmap_parse_32_be; 833 map->format.parse_inplace = regmap_parse_32_be_inplace; 834 break; 835 case REGMAP_ENDIAN_LITTLE: 836 map->format.format_val = regmap_format_32_le; 837 map->format.parse_val = regmap_parse_32_le; 838 map->format.parse_inplace = regmap_parse_32_le_inplace; 839 break; 840 case REGMAP_ENDIAN_NATIVE: 841 map->format.format_val = regmap_format_32_native; 842 map->format.parse_val = regmap_parse_32_native; 843 break; 844 default: 845 goto err_map; 846 } 847 break; 848 #ifdef CONFIG_64BIT 849 case 64: 850 switch (val_endian) { 851 case REGMAP_ENDIAN_BIG: 852 map->format.format_val = regmap_format_64_be; 853 map->format.parse_val = regmap_parse_64_be; 854 map->format.parse_inplace = regmap_parse_64_be_inplace; 855 break; 856 case REGMAP_ENDIAN_LITTLE: 857 map->format.format_val = regmap_format_64_le; 858 map->format.parse_val = regmap_parse_64_le; 859 map->format.parse_inplace = regmap_parse_64_le_inplace; 860 break; 861 case REGMAP_ENDIAN_NATIVE: 862 map->format.format_val = regmap_format_64_native; 863 map->format.parse_val = regmap_parse_64_native; 864 break; 865 default: 866 goto err_map; 867 } 868 break; 869 #endif 870 } 871 872 if (map->format.format_write) { 873 if ((reg_endian != REGMAP_ENDIAN_BIG) || 874 (val_endian != REGMAP_ENDIAN_BIG)) 875 goto err_map; 876 map->use_single_write = true; 877 } 878 879 if (!map->format.format_write && 880 !(map->format.format_reg && map->format.format_val)) 881 goto err_map; 882 883 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 884 if (map->work_buf == NULL) { 885 ret = -ENOMEM; 886 goto err_map; 887 } 888 889 if (map->format.format_write) { 890 map->defer_caching = false; 891 map->reg_write = _regmap_bus_formatted_write; 892 } else if (map->format.format_val) { 893 map->defer_caching = true; 894 map->reg_write = _regmap_bus_raw_write; 895 } 896 897 skip_format_initialization: 898 899 map->range_tree = RB_ROOT; 900 for (i = 0; i < config->num_ranges; i++) { 901 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 902 struct regmap_range_node *new; 903 904 /* Sanity check */ 905 if (range_cfg->range_max < range_cfg->range_min) { 906 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 907 range_cfg->range_max, range_cfg->range_min); 908 goto err_range; 909 } 910 911 if (range_cfg->range_max > map->max_register) { 912 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 913 range_cfg->range_max, map->max_register); 914 goto err_range; 915 } 916 917 if (range_cfg->selector_reg > map->max_register) { 918 dev_err(map->dev, 919 "Invalid range %d: selector out of map\n", i); 920 goto err_range; 921 } 922 923 if (range_cfg->window_len == 0) { 924 dev_err(map->dev, "Invalid range %d: window_len 0\n", 925 i); 926 goto err_range; 927 } 928 929 /* Make sure, that this register range has no selector 930 or data window within its boundary */ 931 for (j = 0; j < config->num_ranges; j++) { 932 unsigned sel_reg = config->ranges[j].selector_reg; 933 unsigned win_min = config->ranges[j].window_start; 934 unsigned win_max = win_min + 935 config->ranges[j].window_len - 1; 936 937 /* Allow data window inside its own virtual range */ 938 if (j == i) 939 continue; 940 941 if (range_cfg->range_min <= sel_reg && 942 sel_reg <= range_cfg->range_max) { 943 dev_err(map->dev, 944 "Range %d: selector for %d in window\n", 945 i, j); 946 goto err_range; 947 } 948 949 if (!(win_max < range_cfg->range_min || 950 win_min > range_cfg->range_max)) { 951 dev_err(map->dev, 952 "Range %d: window for %d in window\n", 953 i, j); 954 goto err_range; 955 } 956 } 957 958 new = kzalloc(sizeof(*new), GFP_KERNEL); 959 if (new == NULL) { 960 ret = -ENOMEM; 961 goto err_range; 962 } 963 964 new->map = map; 965 new->name = range_cfg->name; 966 new->range_min = range_cfg->range_min; 967 new->range_max = range_cfg->range_max; 968 new->selector_reg = range_cfg->selector_reg; 969 new->selector_mask = range_cfg->selector_mask; 970 new->selector_shift = range_cfg->selector_shift; 971 new->window_start = range_cfg->window_start; 972 new->window_len = range_cfg->window_len; 973 974 if (!_regmap_range_add(map, new)) { 975 dev_err(map->dev, "Failed to add range %d\n", i); 976 kfree(new); 977 goto err_range; 978 } 979 980 if (map->selector_work_buf == NULL) { 981 map->selector_work_buf = 982 kzalloc(map->format.buf_size, GFP_KERNEL); 983 if (map->selector_work_buf == NULL) { 984 ret = -ENOMEM; 985 goto err_range; 986 } 987 } 988 } 989 990 ret = regcache_init(map, config); 991 if (ret != 0) 992 goto err_range; 993 994 if (dev) { 995 ret = regmap_attach_dev(dev, map, config); 996 if (ret != 0) 997 goto err_regcache; 998 } 999 1000 return map; 1001 1002 err_regcache: 1003 regcache_exit(map); 1004 err_range: 1005 regmap_range_exit(map); 1006 kfree(map->work_buf); 1007 err_map: 1008 kfree(map); 1009 err: 1010 return ERR_PTR(ret); 1011 } 1012 EXPORT_SYMBOL_GPL(__regmap_init); 1013 1014 static void devm_regmap_release(struct device *dev, void *res) 1015 { 1016 regmap_exit(*(struct regmap **)res); 1017 } 1018 1019 struct regmap *__devm_regmap_init(struct device *dev, 1020 const struct regmap_bus *bus, 1021 void *bus_context, 1022 const struct regmap_config *config, 1023 struct lock_class_key *lock_key, 1024 const char *lock_name) 1025 { 1026 struct regmap **ptr, *regmap; 1027 1028 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1029 if (!ptr) 1030 return ERR_PTR(-ENOMEM); 1031 1032 regmap = __regmap_init(dev, bus, bus_context, config, 1033 lock_key, lock_name); 1034 if (!IS_ERR(regmap)) { 1035 *ptr = regmap; 1036 devres_add(dev, ptr); 1037 } else { 1038 devres_free(ptr); 1039 } 1040 1041 return regmap; 1042 } 1043 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1044 1045 static void regmap_field_init(struct regmap_field *rm_field, 1046 struct regmap *regmap, struct reg_field reg_field) 1047 { 1048 rm_field->regmap = regmap; 1049 rm_field->reg = reg_field.reg; 1050 rm_field->shift = reg_field.lsb; 1051 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1052 rm_field->id_size = reg_field.id_size; 1053 rm_field->id_offset = reg_field.id_offset; 1054 } 1055 1056 /** 1057 * devm_regmap_field_alloc(): Allocate and initialise a register field 1058 * in a register map. 1059 * 1060 * @dev: Device that will be interacted with 1061 * @regmap: regmap bank in which this register field is located. 1062 * @reg_field: Register field with in the bank. 1063 * 1064 * The return value will be an ERR_PTR() on error or a valid pointer 1065 * to a struct regmap_field. The regmap_field will be automatically freed 1066 * by the device management code. 1067 */ 1068 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1069 struct regmap *regmap, struct reg_field reg_field) 1070 { 1071 struct regmap_field *rm_field = devm_kzalloc(dev, 1072 sizeof(*rm_field), GFP_KERNEL); 1073 if (!rm_field) 1074 return ERR_PTR(-ENOMEM); 1075 1076 regmap_field_init(rm_field, regmap, reg_field); 1077 1078 return rm_field; 1079 1080 } 1081 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1082 1083 /** 1084 * devm_regmap_field_free(): Free register field allocated using 1085 * devm_regmap_field_alloc. Usally drivers need not call this function, 1086 * as the memory allocated via devm will be freed as per device-driver 1087 * life-cyle. 1088 * 1089 * @dev: Device that will be interacted with 1090 * @field: regmap field which should be freed. 1091 */ 1092 void devm_regmap_field_free(struct device *dev, 1093 struct regmap_field *field) 1094 { 1095 devm_kfree(dev, field); 1096 } 1097 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1098 1099 /** 1100 * regmap_field_alloc(): Allocate and initialise a register field 1101 * in a register map. 1102 * 1103 * @regmap: regmap bank in which this register field is located. 1104 * @reg_field: Register field with in the bank. 1105 * 1106 * The return value will be an ERR_PTR() on error or a valid pointer 1107 * to a struct regmap_field. The regmap_field should be freed by the 1108 * user once its finished working with it using regmap_field_free(). 1109 */ 1110 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1111 struct reg_field reg_field) 1112 { 1113 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1114 1115 if (!rm_field) 1116 return ERR_PTR(-ENOMEM); 1117 1118 regmap_field_init(rm_field, regmap, reg_field); 1119 1120 return rm_field; 1121 } 1122 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1123 1124 /** 1125 * regmap_field_free(): Free register field allocated using regmap_field_alloc 1126 * 1127 * @field: regmap field which should be freed. 1128 */ 1129 void regmap_field_free(struct regmap_field *field) 1130 { 1131 kfree(field); 1132 } 1133 EXPORT_SYMBOL_GPL(regmap_field_free); 1134 1135 /** 1136 * regmap_reinit_cache(): Reinitialise the current register cache 1137 * 1138 * @map: Register map to operate on. 1139 * @config: New configuration. Only the cache data will be used. 1140 * 1141 * Discard any existing register cache for the map and initialize a 1142 * new cache. This can be used to restore the cache to defaults or to 1143 * update the cache configuration to reflect runtime discovery of the 1144 * hardware. 1145 * 1146 * No explicit locking is done here, the user needs to ensure that 1147 * this function will not race with other calls to regmap. 1148 */ 1149 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1150 { 1151 regcache_exit(map); 1152 regmap_debugfs_exit(map); 1153 1154 map->max_register = config->max_register; 1155 map->writeable_reg = config->writeable_reg; 1156 map->readable_reg = config->readable_reg; 1157 map->volatile_reg = config->volatile_reg; 1158 map->precious_reg = config->precious_reg; 1159 map->cache_type = config->cache_type; 1160 1161 regmap_debugfs_init(map, config->name); 1162 1163 map->cache_bypass = false; 1164 map->cache_only = false; 1165 1166 return regcache_init(map, config); 1167 } 1168 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1169 1170 /** 1171 * regmap_exit(): Free a previously allocated register map 1172 */ 1173 void regmap_exit(struct regmap *map) 1174 { 1175 struct regmap_async *async; 1176 1177 regcache_exit(map); 1178 regmap_debugfs_exit(map); 1179 regmap_range_exit(map); 1180 if (map->bus && map->bus->free_context) 1181 map->bus->free_context(map->bus_context); 1182 kfree(map->work_buf); 1183 while (!list_empty(&map->async_free)) { 1184 async = list_first_entry_or_null(&map->async_free, 1185 struct regmap_async, 1186 list); 1187 list_del(&async->list); 1188 kfree(async->work_buf); 1189 kfree(async); 1190 } 1191 kfree(map); 1192 } 1193 EXPORT_SYMBOL_GPL(regmap_exit); 1194 1195 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1196 { 1197 struct regmap **r = res; 1198 if (!r || !*r) { 1199 WARN_ON(!r || !*r); 1200 return 0; 1201 } 1202 1203 /* If the user didn't specify a name match any */ 1204 if (data) 1205 return (*r)->name == data; 1206 else 1207 return 1; 1208 } 1209 1210 /** 1211 * dev_get_regmap(): Obtain the regmap (if any) for a device 1212 * 1213 * @dev: Device to retrieve the map for 1214 * @name: Optional name for the register map, usually NULL. 1215 * 1216 * Returns the regmap for the device if one is present, or NULL. If 1217 * name is specified then it must match the name specified when 1218 * registering the device, if it is NULL then the first regmap found 1219 * will be used. Devices with multiple register maps are very rare, 1220 * generic code should normally not need to specify a name. 1221 */ 1222 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1223 { 1224 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1225 dev_get_regmap_match, (void *)name); 1226 1227 if (!r) 1228 return NULL; 1229 return *r; 1230 } 1231 EXPORT_SYMBOL_GPL(dev_get_regmap); 1232 1233 /** 1234 * regmap_get_device(): Obtain the device from a regmap 1235 * 1236 * @map: Register map to operate on. 1237 * 1238 * Returns the underlying device that the regmap has been created for. 1239 */ 1240 struct device *regmap_get_device(struct regmap *map) 1241 { 1242 return map->dev; 1243 } 1244 EXPORT_SYMBOL_GPL(regmap_get_device); 1245 1246 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1247 struct regmap_range_node *range, 1248 unsigned int val_num) 1249 { 1250 void *orig_work_buf; 1251 unsigned int win_offset; 1252 unsigned int win_page; 1253 bool page_chg; 1254 int ret; 1255 1256 win_offset = (*reg - range->range_min) % range->window_len; 1257 win_page = (*reg - range->range_min) / range->window_len; 1258 1259 if (val_num > 1) { 1260 /* Bulk write shouldn't cross range boundary */ 1261 if (*reg + val_num - 1 > range->range_max) 1262 return -EINVAL; 1263 1264 /* ... or single page boundary */ 1265 if (val_num > range->window_len - win_offset) 1266 return -EINVAL; 1267 } 1268 1269 /* It is possible to have selector register inside data window. 1270 In that case, selector register is located on every page and 1271 it needs no page switching, when accessed alone. */ 1272 if (val_num > 1 || 1273 range->window_start + win_offset != range->selector_reg) { 1274 /* Use separate work_buf during page switching */ 1275 orig_work_buf = map->work_buf; 1276 map->work_buf = map->selector_work_buf; 1277 1278 ret = _regmap_update_bits(map, range->selector_reg, 1279 range->selector_mask, 1280 win_page << range->selector_shift, 1281 &page_chg, false); 1282 1283 map->work_buf = orig_work_buf; 1284 1285 if (ret != 0) 1286 return ret; 1287 } 1288 1289 *reg = range->window_start + win_offset; 1290 1291 return 0; 1292 } 1293 1294 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1295 const void *val, size_t val_len) 1296 { 1297 struct regmap_range_node *range; 1298 unsigned long flags; 1299 u8 *u8 = map->work_buf; 1300 void *work_val = map->work_buf + map->format.reg_bytes + 1301 map->format.pad_bytes; 1302 void *buf; 1303 int ret = -ENOTSUPP; 1304 size_t len; 1305 int i; 1306 1307 WARN_ON(!map->bus); 1308 1309 /* Check for unwritable registers before we start */ 1310 if (map->writeable_reg) 1311 for (i = 0; i < val_len / map->format.val_bytes; i++) 1312 if (!map->writeable_reg(map->dev, 1313 reg + (i * map->reg_stride))) 1314 return -EINVAL; 1315 1316 if (!map->cache_bypass && map->format.parse_val) { 1317 unsigned int ival; 1318 int val_bytes = map->format.val_bytes; 1319 for (i = 0; i < val_len / val_bytes; i++) { 1320 ival = map->format.parse_val(val + (i * val_bytes)); 1321 ret = regcache_write(map, reg + (i * map->reg_stride), 1322 ival); 1323 if (ret) { 1324 dev_err(map->dev, 1325 "Error in caching of register: %x ret: %d\n", 1326 reg + i, ret); 1327 return ret; 1328 } 1329 } 1330 if (map->cache_only) { 1331 map->cache_dirty = true; 1332 return 0; 1333 } 1334 } 1335 1336 range = _regmap_range_lookup(map, reg); 1337 if (range) { 1338 int val_num = val_len / map->format.val_bytes; 1339 int win_offset = (reg - range->range_min) % range->window_len; 1340 int win_residue = range->window_len - win_offset; 1341 1342 /* If the write goes beyond the end of the window split it */ 1343 while (val_num > win_residue) { 1344 dev_dbg(map->dev, "Writing window %d/%zu\n", 1345 win_residue, val_len / map->format.val_bytes); 1346 ret = _regmap_raw_write(map, reg, val, win_residue * 1347 map->format.val_bytes); 1348 if (ret != 0) 1349 return ret; 1350 1351 reg += win_residue; 1352 val_num -= win_residue; 1353 val += win_residue * map->format.val_bytes; 1354 val_len -= win_residue * map->format.val_bytes; 1355 1356 win_offset = (reg - range->range_min) % 1357 range->window_len; 1358 win_residue = range->window_len - win_offset; 1359 } 1360 1361 ret = _regmap_select_page(map, ®, range, val_num); 1362 if (ret != 0) 1363 return ret; 1364 } 1365 1366 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1367 1368 u8[0] |= map->write_flag_mask; 1369 1370 /* 1371 * Essentially all I/O mechanisms will be faster with a single 1372 * buffer to write. Since register syncs often generate raw 1373 * writes of single registers optimise that case. 1374 */ 1375 if (val != work_val && val_len == map->format.val_bytes) { 1376 memcpy(work_val, val, map->format.val_bytes); 1377 val = work_val; 1378 } 1379 1380 if (map->async && map->bus->async_write) { 1381 struct regmap_async *async; 1382 1383 trace_regmap_async_write_start(map, reg, val_len); 1384 1385 spin_lock_irqsave(&map->async_lock, flags); 1386 async = list_first_entry_or_null(&map->async_free, 1387 struct regmap_async, 1388 list); 1389 if (async) 1390 list_del(&async->list); 1391 spin_unlock_irqrestore(&map->async_lock, flags); 1392 1393 if (!async) { 1394 async = map->bus->async_alloc(); 1395 if (!async) 1396 return -ENOMEM; 1397 1398 async->work_buf = kzalloc(map->format.buf_size, 1399 GFP_KERNEL | GFP_DMA); 1400 if (!async->work_buf) { 1401 kfree(async); 1402 return -ENOMEM; 1403 } 1404 } 1405 1406 async->map = map; 1407 1408 /* If the caller supplied the value we can use it safely. */ 1409 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1410 map->format.reg_bytes + map->format.val_bytes); 1411 1412 spin_lock_irqsave(&map->async_lock, flags); 1413 list_add_tail(&async->list, &map->async_list); 1414 spin_unlock_irqrestore(&map->async_lock, flags); 1415 1416 if (val != work_val) 1417 ret = map->bus->async_write(map->bus_context, 1418 async->work_buf, 1419 map->format.reg_bytes + 1420 map->format.pad_bytes, 1421 val, val_len, async); 1422 else 1423 ret = map->bus->async_write(map->bus_context, 1424 async->work_buf, 1425 map->format.reg_bytes + 1426 map->format.pad_bytes + 1427 val_len, NULL, 0, async); 1428 1429 if (ret != 0) { 1430 dev_err(map->dev, "Failed to schedule write: %d\n", 1431 ret); 1432 1433 spin_lock_irqsave(&map->async_lock, flags); 1434 list_move(&async->list, &map->async_free); 1435 spin_unlock_irqrestore(&map->async_lock, flags); 1436 } 1437 1438 return ret; 1439 } 1440 1441 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1442 1443 /* If we're doing a single register write we can probably just 1444 * send the work_buf directly, otherwise try to do a gather 1445 * write. 1446 */ 1447 if (val == work_val) 1448 ret = map->bus->write(map->bus_context, map->work_buf, 1449 map->format.reg_bytes + 1450 map->format.pad_bytes + 1451 val_len); 1452 else if (map->bus->gather_write) 1453 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1454 map->format.reg_bytes + 1455 map->format.pad_bytes, 1456 val, val_len); 1457 1458 /* If that didn't work fall back on linearising by hand. */ 1459 if (ret == -ENOTSUPP) { 1460 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1461 buf = kzalloc(len, GFP_KERNEL); 1462 if (!buf) 1463 return -ENOMEM; 1464 1465 memcpy(buf, map->work_buf, map->format.reg_bytes); 1466 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1467 val, val_len); 1468 ret = map->bus->write(map->bus_context, buf, len); 1469 1470 kfree(buf); 1471 } 1472 1473 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1474 1475 return ret; 1476 } 1477 1478 /** 1479 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1480 * 1481 * @map: Map to check. 1482 */ 1483 bool regmap_can_raw_write(struct regmap *map) 1484 { 1485 return map->bus && map->bus->write && map->format.format_val && 1486 map->format.format_reg; 1487 } 1488 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1489 1490 /** 1491 * regmap_get_raw_read_max - Get the maximum size we can read 1492 * 1493 * @map: Map to check. 1494 */ 1495 size_t regmap_get_raw_read_max(struct regmap *map) 1496 { 1497 return map->max_raw_read; 1498 } 1499 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1500 1501 /** 1502 * regmap_get_raw_write_max - Get the maximum size we can read 1503 * 1504 * @map: Map to check. 1505 */ 1506 size_t regmap_get_raw_write_max(struct regmap *map) 1507 { 1508 return map->max_raw_write; 1509 } 1510 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1511 1512 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1513 unsigned int val) 1514 { 1515 int ret; 1516 struct regmap_range_node *range; 1517 struct regmap *map = context; 1518 1519 WARN_ON(!map->bus || !map->format.format_write); 1520 1521 range = _regmap_range_lookup(map, reg); 1522 if (range) { 1523 ret = _regmap_select_page(map, ®, range, 1); 1524 if (ret != 0) 1525 return ret; 1526 } 1527 1528 map->format.format_write(map, reg, val); 1529 1530 trace_regmap_hw_write_start(map, reg, 1); 1531 1532 ret = map->bus->write(map->bus_context, map->work_buf, 1533 map->format.buf_size); 1534 1535 trace_regmap_hw_write_done(map, reg, 1); 1536 1537 return ret; 1538 } 1539 1540 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1541 unsigned int val) 1542 { 1543 struct regmap *map = context; 1544 1545 return map->bus->reg_write(map->bus_context, reg, val); 1546 } 1547 1548 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1549 unsigned int val) 1550 { 1551 struct regmap *map = context; 1552 1553 WARN_ON(!map->bus || !map->format.format_val); 1554 1555 map->format.format_val(map->work_buf + map->format.reg_bytes 1556 + map->format.pad_bytes, val, 0); 1557 return _regmap_raw_write(map, reg, 1558 map->work_buf + 1559 map->format.reg_bytes + 1560 map->format.pad_bytes, 1561 map->format.val_bytes); 1562 } 1563 1564 static inline void *_regmap_map_get_context(struct regmap *map) 1565 { 1566 return (map->bus) ? map : map->bus_context; 1567 } 1568 1569 int _regmap_write(struct regmap *map, unsigned int reg, 1570 unsigned int val) 1571 { 1572 int ret; 1573 void *context = _regmap_map_get_context(map); 1574 1575 if (!regmap_writeable(map, reg)) 1576 return -EIO; 1577 1578 if (!map->cache_bypass && !map->defer_caching) { 1579 ret = regcache_write(map, reg, val); 1580 if (ret != 0) 1581 return ret; 1582 if (map->cache_only) { 1583 map->cache_dirty = true; 1584 return 0; 1585 } 1586 } 1587 1588 #ifdef LOG_DEVICE 1589 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1590 dev_info(map->dev, "%x <= %x\n", reg, val); 1591 #endif 1592 1593 trace_regmap_reg_write(map, reg, val); 1594 1595 return map->reg_write(context, reg, val); 1596 } 1597 1598 /** 1599 * regmap_write(): Write a value to a single register 1600 * 1601 * @map: Register map to write to 1602 * @reg: Register to write to 1603 * @val: Value to be written 1604 * 1605 * A value of zero will be returned on success, a negative errno will 1606 * be returned in error cases. 1607 */ 1608 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1609 { 1610 int ret; 1611 1612 if (!IS_ALIGNED(reg, map->reg_stride)) 1613 return -EINVAL; 1614 1615 map->lock(map->lock_arg); 1616 1617 ret = _regmap_write(map, reg, val); 1618 1619 map->unlock(map->lock_arg); 1620 1621 return ret; 1622 } 1623 EXPORT_SYMBOL_GPL(regmap_write); 1624 1625 /** 1626 * regmap_write_async(): Write a value to a single register asynchronously 1627 * 1628 * @map: Register map to write to 1629 * @reg: Register to write to 1630 * @val: Value to be written 1631 * 1632 * A value of zero will be returned on success, a negative errno will 1633 * be returned in error cases. 1634 */ 1635 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1636 { 1637 int ret; 1638 1639 if (!IS_ALIGNED(reg, map->reg_stride)) 1640 return -EINVAL; 1641 1642 map->lock(map->lock_arg); 1643 1644 map->async = true; 1645 1646 ret = _regmap_write(map, reg, val); 1647 1648 map->async = false; 1649 1650 map->unlock(map->lock_arg); 1651 1652 return ret; 1653 } 1654 EXPORT_SYMBOL_GPL(regmap_write_async); 1655 1656 /** 1657 * regmap_raw_write(): Write raw values to one or more registers 1658 * 1659 * @map: Register map to write to 1660 * @reg: Initial register to write to 1661 * @val: Block of data to be written, laid out for direct transmission to the 1662 * device 1663 * @val_len: Length of data pointed to by val. 1664 * 1665 * This function is intended to be used for things like firmware 1666 * download where a large block of data needs to be transferred to the 1667 * device. No formatting will be done on the data provided. 1668 * 1669 * A value of zero will be returned on success, a negative errno will 1670 * be returned in error cases. 1671 */ 1672 int regmap_raw_write(struct regmap *map, unsigned int reg, 1673 const void *val, size_t val_len) 1674 { 1675 int ret; 1676 1677 if (!regmap_can_raw_write(map)) 1678 return -EINVAL; 1679 if (val_len % map->format.val_bytes) 1680 return -EINVAL; 1681 if (map->max_raw_write && map->max_raw_write > val_len) 1682 return -E2BIG; 1683 1684 map->lock(map->lock_arg); 1685 1686 ret = _regmap_raw_write(map, reg, val, val_len); 1687 1688 map->unlock(map->lock_arg); 1689 1690 return ret; 1691 } 1692 EXPORT_SYMBOL_GPL(regmap_raw_write); 1693 1694 /** 1695 * regmap_field_write(): Write a value to a single register field 1696 * 1697 * @field: Register field to write to 1698 * @val: Value to be written 1699 * 1700 * A value of zero will be returned on success, a negative errno will 1701 * be returned in error cases. 1702 */ 1703 int regmap_field_write(struct regmap_field *field, unsigned int val) 1704 { 1705 return regmap_update_bits(field->regmap, field->reg, 1706 field->mask, val << field->shift); 1707 } 1708 EXPORT_SYMBOL_GPL(regmap_field_write); 1709 1710 /** 1711 * regmap_field_update_bits(): Perform a read/modify/write cycle 1712 * on the register field 1713 * 1714 * @field: Register field to write to 1715 * @mask: Bitmask to change 1716 * @val: Value to be written 1717 * 1718 * A value of zero will be returned on success, a negative errno will 1719 * be returned in error cases. 1720 */ 1721 int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val) 1722 { 1723 mask = (mask << field->shift) & field->mask; 1724 1725 return regmap_update_bits(field->regmap, field->reg, 1726 mask, val << field->shift); 1727 } 1728 EXPORT_SYMBOL_GPL(regmap_field_update_bits); 1729 1730 /** 1731 * regmap_fields_write(): Write a value to a single register field with port ID 1732 * 1733 * @field: Register field to write to 1734 * @id: port ID 1735 * @val: Value to be written 1736 * 1737 * A value of zero will be returned on success, a negative errno will 1738 * be returned in error cases. 1739 */ 1740 int regmap_fields_write(struct regmap_field *field, unsigned int id, 1741 unsigned int val) 1742 { 1743 if (id >= field->id_size) 1744 return -EINVAL; 1745 1746 return regmap_update_bits(field->regmap, 1747 field->reg + (field->id_offset * id), 1748 field->mask, val << field->shift); 1749 } 1750 EXPORT_SYMBOL_GPL(regmap_fields_write); 1751 1752 int regmap_fields_force_write(struct regmap_field *field, unsigned int id, 1753 unsigned int val) 1754 { 1755 if (id >= field->id_size) 1756 return -EINVAL; 1757 1758 return regmap_write_bits(field->regmap, 1759 field->reg + (field->id_offset * id), 1760 field->mask, val << field->shift); 1761 } 1762 EXPORT_SYMBOL_GPL(regmap_fields_force_write); 1763 1764 /** 1765 * regmap_fields_update_bits(): Perform a read/modify/write cycle 1766 * on the register field 1767 * 1768 * @field: Register field to write to 1769 * @id: port ID 1770 * @mask: Bitmask to change 1771 * @val: Value to be written 1772 * 1773 * A value of zero will be returned on success, a negative errno will 1774 * be returned in error cases. 1775 */ 1776 int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, 1777 unsigned int mask, unsigned int val) 1778 { 1779 if (id >= field->id_size) 1780 return -EINVAL; 1781 1782 mask = (mask << field->shift) & field->mask; 1783 1784 return regmap_update_bits(field->regmap, 1785 field->reg + (field->id_offset * id), 1786 mask, val << field->shift); 1787 } 1788 EXPORT_SYMBOL_GPL(regmap_fields_update_bits); 1789 1790 /* 1791 * regmap_bulk_write(): Write multiple registers to the device 1792 * 1793 * @map: Register map to write to 1794 * @reg: First register to be write from 1795 * @val: Block of data to be written, in native register size for device 1796 * @val_count: Number of registers to write 1797 * 1798 * This function is intended to be used for writing a large block of 1799 * data to the device either in single transfer or multiple transfer. 1800 * 1801 * A value of zero will be returned on success, a negative errno will 1802 * be returned in error cases. 1803 */ 1804 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1805 size_t val_count) 1806 { 1807 int ret = 0, i; 1808 size_t val_bytes = map->format.val_bytes; 1809 size_t total_size = val_bytes * val_count; 1810 1811 if (map->bus && !map->format.parse_inplace) 1812 return -EINVAL; 1813 if (!IS_ALIGNED(reg, map->reg_stride)) 1814 return -EINVAL; 1815 1816 /* 1817 * Some devices don't support bulk write, for 1818 * them we have a series of single write operations in the first two if 1819 * blocks. 1820 * 1821 * The first if block is used for memory mapped io. It does not allow 1822 * val_bytes of 3 for example. 1823 * The second one is used for busses which do not have this limitation 1824 * and can write arbitrary value lengths. 1825 */ 1826 if (!map->bus) { 1827 map->lock(map->lock_arg); 1828 for (i = 0; i < val_count; i++) { 1829 unsigned int ival; 1830 1831 switch (val_bytes) { 1832 case 1: 1833 ival = *(u8 *)(val + (i * val_bytes)); 1834 break; 1835 case 2: 1836 ival = *(u16 *)(val + (i * val_bytes)); 1837 break; 1838 case 4: 1839 ival = *(u32 *)(val + (i * val_bytes)); 1840 break; 1841 #ifdef CONFIG_64BIT 1842 case 8: 1843 ival = *(u64 *)(val + (i * val_bytes)); 1844 break; 1845 #endif 1846 default: 1847 ret = -EINVAL; 1848 goto out; 1849 } 1850 1851 ret = _regmap_write(map, reg + (i * map->reg_stride), 1852 ival); 1853 if (ret != 0) 1854 goto out; 1855 } 1856 out: 1857 map->unlock(map->lock_arg); 1858 } else if (map->use_single_write || 1859 (map->max_raw_write && map->max_raw_write < total_size)) { 1860 int chunk_stride = map->reg_stride; 1861 size_t chunk_size = val_bytes; 1862 size_t chunk_count = val_count; 1863 1864 if (!map->use_single_write) { 1865 chunk_size = map->max_raw_write; 1866 if (chunk_size % val_bytes) 1867 chunk_size -= chunk_size % val_bytes; 1868 chunk_count = total_size / chunk_size; 1869 chunk_stride *= chunk_size / val_bytes; 1870 } 1871 1872 map->lock(map->lock_arg); 1873 /* Write as many bytes as possible with chunk_size */ 1874 for (i = 0; i < chunk_count; i++) { 1875 ret = _regmap_raw_write(map, 1876 reg + (i * chunk_stride), 1877 val + (i * chunk_size), 1878 chunk_size); 1879 if (ret) 1880 break; 1881 } 1882 1883 /* Write remaining bytes */ 1884 if (!ret && chunk_size * i < total_size) { 1885 ret = _regmap_raw_write(map, reg + (i * chunk_stride), 1886 val + (i * chunk_size), 1887 total_size - i * chunk_size); 1888 } 1889 map->unlock(map->lock_arg); 1890 } else { 1891 void *wval; 1892 1893 if (!val_count) 1894 return -EINVAL; 1895 1896 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 1897 if (!wval) { 1898 dev_err(map->dev, "Error in memory allocation\n"); 1899 return -ENOMEM; 1900 } 1901 for (i = 0; i < val_count * val_bytes; i += val_bytes) 1902 map->format.parse_inplace(wval + i); 1903 1904 map->lock(map->lock_arg); 1905 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1906 map->unlock(map->lock_arg); 1907 1908 kfree(wval); 1909 } 1910 return ret; 1911 } 1912 EXPORT_SYMBOL_GPL(regmap_bulk_write); 1913 1914 /* 1915 * _regmap_raw_multi_reg_write() 1916 * 1917 * the (register,newvalue) pairs in regs have not been formatted, but 1918 * they are all in the same page and have been changed to being page 1919 * relative. The page register has been written if that was necessary. 1920 */ 1921 static int _regmap_raw_multi_reg_write(struct regmap *map, 1922 const struct reg_sequence *regs, 1923 size_t num_regs) 1924 { 1925 int ret; 1926 void *buf; 1927 int i; 1928 u8 *u8; 1929 size_t val_bytes = map->format.val_bytes; 1930 size_t reg_bytes = map->format.reg_bytes; 1931 size_t pad_bytes = map->format.pad_bytes; 1932 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 1933 size_t len = pair_size * num_regs; 1934 1935 if (!len) 1936 return -EINVAL; 1937 1938 buf = kzalloc(len, GFP_KERNEL); 1939 if (!buf) 1940 return -ENOMEM; 1941 1942 /* We have to linearise by hand. */ 1943 1944 u8 = buf; 1945 1946 for (i = 0; i < num_regs; i++) { 1947 unsigned int reg = regs[i].reg; 1948 unsigned int val = regs[i].def; 1949 trace_regmap_hw_write_start(map, reg, 1); 1950 map->format.format_reg(u8, reg, map->reg_shift); 1951 u8 += reg_bytes + pad_bytes; 1952 map->format.format_val(u8, val, 0); 1953 u8 += val_bytes; 1954 } 1955 u8 = buf; 1956 *u8 |= map->write_flag_mask; 1957 1958 ret = map->bus->write(map->bus_context, buf, len); 1959 1960 kfree(buf); 1961 1962 for (i = 0; i < num_regs; i++) { 1963 int reg = regs[i].reg; 1964 trace_regmap_hw_write_done(map, reg, 1); 1965 } 1966 return ret; 1967 } 1968 1969 static unsigned int _regmap_register_page(struct regmap *map, 1970 unsigned int reg, 1971 struct regmap_range_node *range) 1972 { 1973 unsigned int win_page = (reg - range->range_min) / range->window_len; 1974 1975 return win_page; 1976 } 1977 1978 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 1979 struct reg_sequence *regs, 1980 size_t num_regs) 1981 { 1982 int ret; 1983 int i, n; 1984 struct reg_sequence *base; 1985 unsigned int this_page = 0; 1986 unsigned int page_change = 0; 1987 /* 1988 * the set of registers are not neccessarily in order, but 1989 * since the order of write must be preserved this algorithm 1990 * chops the set each time the page changes. This also applies 1991 * if there is a delay required at any point in the sequence. 1992 */ 1993 base = regs; 1994 for (i = 0, n = 0; i < num_regs; i++, n++) { 1995 unsigned int reg = regs[i].reg; 1996 struct regmap_range_node *range; 1997 1998 range = _regmap_range_lookup(map, reg); 1999 if (range) { 2000 unsigned int win_page = _regmap_register_page(map, reg, 2001 range); 2002 2003 if (i == 0) 2004 this_page = win_page; 2005 if (win_page != this_page) { 2006 this_page = win_page; 2007 page_change = 1; 2008 } 2009 } 2010 2011 /* If we have both a page change and a delay make sure to 2012 * write the regs and apply the delay before we change the 2013 * page. 2014 */ 2015 2016 if (page_change || regs[i].delay_us) { 2017 2018 /* For situations where the first write requires 2019 * a delay we need to make sure we don't call 2020 * raw_multi_reg_write with n=0 2021 * This can't occur with page breaks as we 2022 * never write on the first iteration 2023 */ 2024 if (regs[i].delay_us && i == 0) 2025 n = 1; 2026 2027 ret = _regmap_raw_multi_reg_write(map, base, n); 2028 if (ret != 0) 2029 return ret; 2030 2031 if (regs[i].delay_us) 2032 udelay(regs[i].delay_us); 2033 2034 base += n; 2035 n = 0; 2036 2037 if (page_change) { 2038 ret = _regmap_select_page(map, 2039 &base[n].reg, 2040 range, 1); 2041 if (ret != 0) 2042 return ret; 2043 2044 page_change = 0; 2045 } 2046 2047 } 2048 2049 } 2050 if (n > 0) 2051 return _regmap_raw_multi_reg_write(map, base, n); 2052 return 0; 2053 } 2054 2055 static int _regmap_multi_reg_write(struct regmap *map, 2056 const struct reg_sequence *regs, 2057 size_t num_regs) 2058 { 2059 int i; 2060 int ret; 2061 2062 if (!map->can_multi_write) { 2063 for (i = 0; i < num_regs; i++) { 2064 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2065 if (ret != 0) 2066 return ret; 2067 2068 if (regs[i].delay_us) 2069 udelay(regs[i].delay_us); 2070 } 2071 return 0; 2072 } 2073 2074 if (!map->format.parse_inplace) 2075 return -EINVAL; 2076 2077 if (map->writeable_reg) 2078 for (i = 0; i < num_regs; i++) { 2079 int reg = regs[i].reg; 2080 if (!map->writeable_reg(map->dev, reg)) 2081 return -EINVAL; 2082 if (!IS_ALIGNED(reg, map->reg_stride)) 2083 return -EINVAL; 2084 } 2085 2086 if (!map->cache_bypass) { 2087 for (i = 0; i < num_regs; i++) { 2088 unsigned int val = regs[i].def; 2089 unsigned int reg = regs[i].reg; 2090 ret = regcache_write(map, reg, val); 2091 if (ret) { 2092 dev_err(map->dev, 2093 "Error in caching of register: %x ret: %d\n", 2094 reg, ret); 2095 return ret; 2096 } 2097 } 2098 if (map->cache_only) { 2099 map->cache_dirty = true; 2100 return 0; 2101 } 2102 } 2103 2104 WARN_ON(!map->bus); 2105 2106 for (i = 0; i < num_regs; i++) { 2107 unsigned int reg = regs[i].reg; 2108 struct regmap_range_node *range; 2109 2110 /* Coalesce all the writes between a page break or a delay 2111 * in a sequence 2112 */ 2113 range = _regmap_range_lookup(map, reg); 2114 if (range || regs[i].delay_us) { 2115 size_t len = sizeof(struct reg_sequence)*num_regs; 2116 struct reg_sequence *base = kmemdup(regs, len, 2117 GFP_KERNEL); 2118 if (!base) 2119 return -ENOMEM; 2120 ret = _regmap_range_multi_paged_reg_write(map, base, 2121 num_regs); 2122 kfree(base); 2123 2124 return ret; 2125 } 2126 } 2127 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2128 } 2129 2130 /* 2131 * regmap_multi_reg_write(): Write multiple registers to the device 2132 * 2133 * where the set of register,value pairs are supplied in any order, 2134 * possibly not all in a single range. 2135 * 2136 * @map: Register map to write to 2137 * @regs: Array of structures containing register,value to be written 2138 * @num_regs: Number of registers to write 2139 * 2140 * The 'normal' block write mode will send ultimately send data on the 2141 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are 2142 * addressed. However, this alternative block multi write mode will send 2143 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2144 * must of course support the mode. 2145 * 2146 * A value of zero will be returned on success, a negative errno will be 2147 * returned in error cases. 2148 */ 2149 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2150 int num_regs) 2151 { 2152 int ret; 2153 2154 map->lock(map->lock_arg); 2155 2156 ret = _regmap_multi_reg_write(map, regs, num_regs); 2157 2158 map->unlock(map->lock_arg); 2159 2160 return ret; 2161 } 2162 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2163 2164 /* 2165 * regmap_multi_reg_write_bypassed(): Write multiple registers to the 2166 * device but not the cache 2167 * 2168 * where the set of register are supplied in any order 2169 * 2170 * @map: Register map to write to 2171 * @regs: Array of structures containing register,value to be written 2172 * @num_regs: Number of registers to write 2173 * 2174 * This function is intended to be used for writing a large block of data 2175 * atomically to the device in single transfer for those I2C client devices 2176 * that implement this alternative block write mode. 2177 * 2178 * A value of zero will be returned on success, a negative errno will 2179 * be returned in error cases. 2180 */ 2181 int regmap_multi_reg_write_bypassed(struct regmap *map, 2182 const struct reg_sequence *regs, 2183 int num_regs) 2184 { 2185 int ret; 2186 bool bypass; 2187 2188 map->lock(map->lock_arg); 2189 2190 bypass = map->cache_bypass; 2191 map->cache_bypass = true; 2192 2193 ret = _regmap_multi_reg_write(map, regs, num_regs); 2194 2195 map->cache_bypass = bypass; 2196 2197 map->unlock(map->lock_arg); 2198 2199 return ret; 2200 } 2201 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2202 2203 /** 2204 * regmap_raw_write_async(): Write raw values to one or more registers 2205 * asynchronously 2206 * 2207 * @map: Register map to write to 2208 * @reg: Initial register to write to 2209 * @val: Block of data to be written, laid out for direct transmission to the 2210 * device. Must be valid until regmap_async_complete() is called. 2211 * @val_len: Length of data pointed to by val. 2212 * 2213 * This function is intended to be used for things like firmware 2214 * download where a large block of data needs to be transferred to the 2215 * device. No formatting will be done on the data provided. 2216 * 2217 * If supported by the underlying bus the write will be scheduled 2218 * asynchronously, helping maximise I/O speed on higher speed buses 2219 * like SPI. regmap_async_complete() can be called to ensure that all 2220 * asynchrnous writes have been completed. 2221 * 2222 * A value of zero will be returned on success, a negative errno will 2223 * be returned in error cases. 2224 */ 2225 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2226 const void *val, size_t val_len) 2227 { 2228 int ret; 2229 2230 if (val_len % map->format.val_bytes) 2231 return -EINVAL; 2232 if (!IS_ALIGNED(reg, map->reg_stride)) 2233 return -EINVAL; 2234 2235 map->lock(map->lock_arg); 2236 2237 map->async = true; 2238 2239 ret = _regmap_raw_write(map, reg, val, val_len); 2240 2241 map->async = false; 2242 2243 map->unlock(map->lock_arg); 2244 2245 return ret; 2246 } 2247 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2248 2249 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2250 unsigned int val_len) 2251 { 2252 struct regmap_range_node *range; 2253 u8 *u8 = map->work_buf; 2254 int ret; 2255 2256 WARN_ON(!map->bus); 2257 2258 range = _regmap_range_lookup(map, reg); 2259 if (range) { 2260 ret = _regmap_select_page(map, ®, range, 2261 val_len / map->format.val_bytes); 2262 if (ret != 0) 2263 return ret; 2264 } 2265 2266 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2267 2268 /* 2269 * Some buses or devices flag reads by setting the high bits in the 2270 * register address; since it's always the high bits for all 2271 * current formats we can do this here rather than in 2272 * formatting. This may break if we get interesting formats. 2273 */ 2274 u8[0] |= map->read_flag_mask; 2275 2276 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2277 2278 ret = map->bus->read(map->bus_context, map->work_buf, 2279 map->format.reg_bytes + map->format.pad_bytes, 2280 val, val_len); 2281 2282 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2283 2284 return ret; 2285 } 2286 2287 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2288 unsigned int *val) 2289 { 2290 struct regmap *map = context; 2291 2292 return map->bus->reg_read(map->bus_context, reg, val); 2293 } 2294 2295 static int _regmap_bus_read(void *context, unsigned int reg, 2296 unsigned int *val) 2297 { 2298 int ret; 2299 struct regmap *map = context; 2300 2301 if (!map->format.parse_val) 2302 return -EINVAL; 2303 2304 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 2305 if (ret == 0) 2306 *val = map->format.parse_val(map->work_buf); 2307 2308 return ret; 2309 } 2310 2311 static int _regmap_read(struct regmap *map, unsigned int reg, 2312 unsigned int *val) 2313 { 2314 int ret; 2315 void *context = _regmap_map_get_context(map); 2316 2317 if (!map->cache_bypass) { 2318 ret = regcache_read(map, reg, val); 2319 if (ret == 0) 2320 return 0; 2321 } 2322 2323 if (map->cache_only) 2324 return -EBUSY; 2325 2326 if (!regmap_readable(map, reg)) 2327 return -EIO; 2328 2329 ret = map->reg_read(context, reg, val); 2330 if (ret == 0) { 2331 #ifdef LOG_DEVICE 2332 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2333 dev_info(map->dev, "%x => %x\n", reg, *val); 2334 #endif 2335 2336 trace_regmap_reg_read(map, reg, *val); 2337 2338 if (!map->cache_bypass) 2339 regcache_write(map, reg, *val); 2340 } 2341 2342 return ret; 2343 } 2344 2345 /** 2346 * regmap_read(): Read a value from a single register 2347 * 2348 * @map: Register map to read from 2349 * @reg: Register to be read from 2350 * @val: Pointer to store read value 2351 * 2352 * A value of zero will be returned on success, a negative errno will 2353 * be returned in error cases. 2354 */ 2355 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2356 { 2357 int ret; 2358 2359 if (!IS_ALIGNED(reg, map->reg_stride)) 2360 return -EINVAL; 2361 2362 map->lock(map->lock_arg); 2363 2364 ret = _regmap_read(map, reg, val); 2365 2366 map->unlock(map->lock_arg); 2367 2368 return ret; 2369 } 2370 EXPORT_SYMBOL_GPL(regmap_read); 2371 2372 /** 2373 * regmap_raw_read(): Read raw data from the device 2374 * 2375 * @map: Register map to read from 2376 * @reg: First register to be read from 2377 * @val: Pointer to store read value 2378 * @val_len: Size of data to read 2379 * 2380 * A value of zero will be returned on success, a negative errno will 2381 * be returned in error cases. 2382 */ 2383 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2384 size_t val_len) 2385 { 2386 size_t val_bytes = map->format.val_bytes; 2387 size_t val_count = val_len / val_bytes; 2388 unsigned int v; 2389 int ret, i; 2390 2391 if (!map->bus) 2392 return -EINVAL; 2393 if (val_len % map->format.val_bytes) 2394 return -EINVAL; 2395 if (!IS_ALIGNED(reg, map->reg_stride)) 2396 return -EINVAL; 2397 if (val_count == 0) 2398 return -EINVAL; 2399 2400 map->lock(map->lock_arg); 2401 2402 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2403 map->cache_type == REGCACHE_NONE) { 2404 if (!map->bus->read) { 2405 ret = -ENOTSUPP; 2406 goto out; 2407 } 2408 if (map->max_raw_read && map->max_raw_read < val_len) { 2409 ret = -E2BIG; 2410 goto out; 2411 } 2412 2413 /* Physical block read if there's no cache involved */ 2414 ret = _regmap_raw_read(map, reg, val, val_len); 2415 2416 } else { 2417 /* Otherwise go word by word for the cache; should be low 2418 * cost as we expect to hit the cache. 2419 */ 2420 for (i = 0; i < val_count; i++) { 2421 ret = _regmap_read(map, reg + (i * map->reg_stride), 2422 &v); 2423 if (ret != 0) 2424 goto out; 2425 2426 map->format.format_val(val + (i * val_bytes), v, 0); 2427 } 2428 } 2429 2430 out: 2431 map->unlock(map->lock_arg); 2432 2433 return ret; 2434 } 2435 EXPORT_SYMBOL_GPL(regmap_raw_read); 2436 2437 /** 2438 * regmap_field_read(): Read a value to a single register field 2439 * 2440 * @field: Register field to read from 2441 * @val: Pointer to store read value 2442 * 2443 * A value of zero will be returned on success, a negative errno will 2444 * be returned in error cases. 2445 */ 2446 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2447 { 2448 int ret; 2449 unsigned int reg_val; 2450 ret = regmap_read(field->regmap, field->reg, ®_val); 2451 if (ret != 0) 2452 return ret; 2453 2454 reg_val &= field->mask; 2455 reg_val >>= field->shift; 2456 *val = reg_val; 2457 2458 return ret; 2459 } 2460 EXPORT_SYMBOL_GPL(regmap_field_read); 2461 2462 /** 2463 * regmap_fields_read(): Read a value to a single register field with port ID 2464 * 2465 * @field: Register field to read from 2466 * @id: port ID 2467 * @val: Pointer to store read value 2468 * 2469 * A value of zero will be returned on success, a negative errno will 2470 * be returned in error cases. 2471 */ 2472 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2473 unsigned int *val) 2474 { 2475 int ret; 2476 unsigned int reg_val; 2477 2478 if (id >= field->id_size) 2479 return -EINVAL; 2480 2481 ret = regmap_read(field->regmap, 2482 field->reg + (field->id_offset * id), 2483 ®_val); 2484 if (ret != 0) 2485 return ret; 2486 2487 reg_val &= field->mask; 2488 reg_val >>= field->shift; 2489 *val = reg_val; 2490 2491 return ret; 2492 } 2493 EXPORT_SYMBOL_GPL(regmap_fields_read); 2494 2495 /** 2496 * regmap_bulk_read(): Read multiple registers from the device 2497 * 2498 * @map: Register map to read from 2499 * @reg: First register to be read from 2500 * @val: Pointer to store read value, in native register size for device 2501 * @val_count: Number of registers to read 2502 * 2503 * A value of zero will be returned on success, a negative errno will 2504 * be returned in error cases. 2505 */ 2506 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2507 size_t val_count) 2508 { 2509 int ret, i; 2510 size_t val_bytes = map->format.val_bytes; 2511 bool vol = regmap_volatile_range(map, reg, val_count); 2512 2513 if (!IS_ALIGNED(reg, map->reg_stride)) 2514 return -EINVAL; 2515 2516 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2517 /* 2518 * Some devices does not support bulk read, for 2519 * them we have a series of single read operations. 2520 */ 2521 size_t total_size = val_bytes * val_count; 2522 2523 if (!map->use_single_read && 2524 (!map->max_raw_read || map->max_raw_read > total_size)) { 2525 ret = regmap_raw_read(map, reg, val, 2526 val_bytes * val_count); 2527 if (ret != 0) 2528 return ret; 2529 } else { 2530 /* 2531 * Some devices do not support bulk read or do not 2532 * support large bulk reads, for them we have a series 2533 * of read operations. 2534 */ 2535 int chunk_stride = map->reg_stride; 2536 size_t chunk_size = val_bytes; 2537 size_t chunk_count = val_count; 2538 2539 if (!map->use_single_read) { 2540 chunk_size = map->max_raw_read; 2541 if (chunk_size % val_bytes) 2542 chunk_size -= chunk_size % val_bytes; 2543 chunk_count = total_size / chunk_size; 2544 chunk_stride *= chunk_size / val_bytes; 2545 } 2546 2547 /* Read bytes that fit into a multiple of chunk_size */ 2548 for (i = 0; i < chunk_count; i++) { 2549 ret = regmap_raw_read(map, 2550 reg + (i * chunk_stride), 2551 val + (i * chunk_size), 2552 chunk_size); 2553 if (ret != 0) 2554 return ret; 2555 } 2556 2557 /* Read remaining bytes */ 2558 if (chunk_size * i < total_size) { 2559 ret = regmap_raw_read(map, 2560 reg + (i * chunk_stride), 2561 val + (i * chunk_size), 2562 total_size - i * chunk_size); 2563 if (ret != 0) 2564 return ret; 2565 } 2566 } 2567 2568 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2569 map->format.parse_inplace(val + i); 2570 } else { 2571 for (i = 0; i < val_count; i++) { 2572 unsigned int ival; 2573 ret = regmap_read(map, reg + (i * map->reg_stride), 2574 &ival); 2575 if (ret != 0) 2576 return ret; 2577 2578 if (map->format.format_val) { 2579 map->format.format_val(val + (i * val_bytes), ival, 0); 2580 } else { 2581 /* Devices providing read and write 2582 * operations can use the bulk I/O 2583 * functions if they define a val_bytes, 2584 * we assume that the values are native 2585 * endian. 2586 */ 2587 #ifdef CONFIG_64BIT 2588 u64 *u64 = val; 2589 #endif 2590 u32 *u32 = val; 2591 u16 *u16 = val; 2592 u8 *u8 = val; 2593 2594 switch (map->format.val_bytes) { 2595 #ifdef CONFIG_64BIT 2596 case 8: 2597 u64[i] = ival; 2598 break; 2599 #endif 2600 case 4: 2601 u32[i] = ival; 2602 break; 2603 case 2: 2604 u16[i] = ival; 2605 break; 2606 case 1: 2607 u8[i] = ival; 2608 break; 2609 default: 2610 return -EINVAL; 2611 } 2612 } 2613 } 2614 } 2615 2616 return 0; 2617 } 2618 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2619 2620 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2621 unsigned int mask, unsigned int val, 2622 bool *change, bool force_write) 2623 { 2624 int ret; 2625 unsigned int tmp, orig; 2626 2627 if (change) 2628 *change = false; 2629 2630 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2631 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2632 if (ret == 0 && change) 2633 *change = true; 2634 } else { 2635 ret = _regmap_read(map, reg, &orig); 2636 if (ret != 0) 2637 return ret; 2638 2639 tmp = orig & ~mask; 2640 tmp |= val & mask; 2641 2642 if (force_write || (tmp != orig)) { 2643 ret = _regmap_write(map, reg, tmp); 2644 if (ret == 0 && change) 2645 *change = true; 2646 } 2647 } 2648 2649 return ret; 2650 } 2651 2652 /** 2653 * regmap_update_bits: Perform a read/modify/write cycle on the register map 2654 * 2655 * @map: Register map to update 2656 * @reg: Register to update 2657 * @mask: Bitmask to change 2658 * @val: New value for bitmask 2659 * 2660 * Returns zero for success, a negative number on error. 2661 */ 2662 int regmap_update_bits(struct regmap *map, unsigned int reg, 2663 unsigned int mask, unsigned int val) 2664 { 2665 int ret; 2666 2667 map->lock(map->lock_arg); 2668 ret = _regmap_update_bits(map, reg, mask, val, NULL, false); 2669 map->unlock(map->lock_arg); 2670 2671 return ret; 2672 } 2673 EXPORT_SYMBOL_GPL(regmap_update_bits); 2674 2675 /** 2676 * regmap_write_bits: Perform a read/modify/write cycle on the register map 2677 * 2678 * @map: Register map to update 2679 * @reg: Register to update 2680 * @mask: Bitmask to change 2681 * @val: New value for bitmask 2682 * 2683 * Returns zero for success, a negative number on error. 2684 */ 2685 int regmap_write_bits(struct regmap *map, unsigned int reg, 2686 unsigned int mask, unsigned int val) 2687 { 2688 int ret; 2689 2690 map->lock(map->lock_arg); 2691 ret = _regmap_update_bits(map, reg, mask, val, NULL, true); 2692 map->unlock(map->lock_arg); 2693 2694 return ret; 2695 } 2696 EXPORT_SYMBOL_GPL(regmap_write_bits); 2697 2698 /** 2699 * regmap_update_bits_async: Perform a read/modify/write cycle on the register 2700 * map asynchronously 2701 * 2702 * @map: Register map to update 2703 * @reg: Register to update 2704 * @mask: Bitmask to change 2705 * @val: New value for bitmask 2706 * 2707 * With most buses the read must be done synchronously so this is most 2708 * useful for devices with a cache which do not need to interact with 2709 * the hardware to determine the current register value. 2710 * 2711 * Returns zero for success, a negative number on error. 2712 */ 2713 int regmap_update_bits_async(struct regmap *map, unsigned int reg, 2714 unsigned int mask, unsigned int val) 2715 { 2716 int ret; 2717 2718 map->lock(map->lock_arg); 2719 2720 map->async = true; 2721 2722 ret = _regmap_update_bits(map, reg, mask, val, NULL, false); 2723 2724 map->async = false; 2725 2726 map->unlock(map->lock_arg); 2727 2728 return ret; 2729 } 2730 EXPORT_SYMBOL_GPL(regmap_update_bits_async); 2731 2732 /** 2733 * regmap_update_bits_check: Perform a read/modify/write cycle on the 2734 * register map and report if updated 2735 * 2736 * @map: Register map to update 2737 * @reg: Register to update 2738 * @mask: Bitmask to change 2739 * @val: New value for bitmask 2740 * @change: Boolean indicating if a write was done 2741 * 2742 * Returns zero for success, a negative number on error. 2743 */ 2744 int regmap_update_bits_check(struct regmap *map, unsigned int reg, 2745 unsigned int mask, unsigned int val, 2746 bool *change) 2747 { 2748 int ret; 2749 2750 map->lock(map->lock_arg); 2751 ret = _regmap_update_bits(map, reg, mask, val, change, false); 2752 map->unlock(map->lock_arg); 2753 return ret; 2754 } 2755 EXPORT_SYMBOL_GPL(regmap_update_bits_check); 2756 2757 /** 2758 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the 2759 * register map asynchronously and report if 2760 * updated 2761 * 2762 * @map: Register map to update 2763 * @reg: Register to update 2764 * @mask: Bitmask to change 2765 * @val: New value for bitmask 2766 * @change: Boolean indicating if a write was done 2767 * 2768 * With most buses the read must be done synchronously so this is most 2769 * useful for devices with a cache which do not need to interact with 2770 * the hardware to determine the current register value. 2771 * 2772 * Returns zero for success, a negative number on error. 2773 */ 2774 int regmap_update_bits_check_async(struct regmap *map, unsigned int reg, 2775 unsigned int mask, unsigned int val, 2776 bool *change) 2777 { 2778 int ret; 2779 2780 map->lock(map->lock_arg); 2781 2782 map->async = true; 2783 2784 ret = _regmap_update_bits(map, reg, mask, val, change, false); 2785 2786 map->async = false; 2787 2788 map->unlock(map->lock_arg); 2789 2790 return ret; 2791 } 2792 EXPORT_SYMBOL_GPL(regmap_update_bits_check_async); 2793 2794 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2795 { 2796 struct regmap *map = async->map; 2797 bool wake; 2798 2799 trace_regmap_async_io_complete(map); 2800 2801 spin_lock(&map->async_lock); 2802 list_move(&async->list, &map->async_free); 2803 wake = list_empty(&map->async_list); 2804 2805 if (ret != 0) 2806 map->async_ret = ret; 2807 2808 spin_unlock(&map->async_lock); 2809 2810 if (wake) 2811 wake_up(&map->async_waitq); 2812 } 2813 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2814 2815 static int regmap_async_is_done(struct regmap *map) 2816 { 2817 unsigned long flags; 2818 int ret; 2819 2820 spin_lock_irqsave(&map->async_lock, flags); 2821 ret = list_empty(&map->async_list); 2822 spin_unlock_irqrestore(&map->async_lock, flags); 2823 2824 return ret; 2825 } 2826 2827 /** 2828 * regmap_async_complete: Ensure all asynchronous I/O has completed. 2829 * 2830 * @map: Map to operate on. 2831 * 2832 * Blocks until any pending asynchronous I/O has completed. Returns 2833 * an error code for any failed I/O operations. 2834 */ 2835 int regmap_async_complete(struct regmap *map) 2836 { 2837 unsigned long flags; 2838 int ret; 2839 2840 /* Nothing to do with no async support */ 2841 if (!map->bus || !map->bus->async_write) 2842 return 0; 2843 2844 trace_regmap_async_complete_start(map); 2845 2846 wait_event(map->async_waitq, regmap_async_is_done(map)); 2847 2848 spin_lock_irqsave(&map->async_lock, flags); 2849 ret = map->async_ret; 2850 map->async_ret = 0; 2851 spin_unlock_irqrestore(&map->async_lock, flags); 2852 2853 trace_regmap_async_complete_done(map); 2854 2855 return ret; 2856 } 2857 EXPORT_SYMBOL_GPL(regmap_async_complete); 2858 2859 /** 2860 * regmap_register_patch: Register and apply register updates to be applied 2861 * on device initialistion 2862 * 2863 * @map: Register map to apply updates to. 2864 * @regs: Values to update. 2865 * @num_regs: Number of entries in regs. 2866 * 2867 * Register a set of register updates to be applied to the device 2868 * whenever the device registers are synchronised with the cache and 2869 * apply them immediately. Typically this is used to apply 2870 * corrections to be applied to the device defaults on startup, such 2871 * as the updates some vendors provide to undocumented registers. 2872 * 2873 * The caller must ensure that this function cannot be called 2874 * concurrently with either itself or regcache_sync(). 2875 */ 2876 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 2877 int num_regs) 2878 { 2879 struct reg_sequence *p; 2880 int ret; 2881 bool bypass; 2882 2883 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2884 num_regs)) 2885 return 0; 2886 2887 p = krealloc(map->patch, 2888 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 2889 GFP_KERNEL); 2890 if (p) { 2891 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2892 map->patch = p; 2893 map->patch_regs += num_regs; 2894 } else { 2895 return -ENOMEM; 2896 } 2897 2898 map->lock(map->lock_arg); 2899 2900 bypass = map->cache_bypass; 2901 2902 map->cache_bypass = true; 2903 map->async = true; 2904 2905 ret = _regmap_multi_reg_write(map, regs, num_regs); 2906 2907 map->async = false; 2908 map->cache_bypass = bypass; 2909 2910 map->unlock(map->lock_arg); 2911 2912 regmap_async_complete(map); 2913 2914 return ret; 2915 } 2916 EXPORT_SYMBOL_GPL(regmap_register_patch); 2917 2918 /* 2919 * regmap_get_val_bytes(): Report the size of a register value 2920 * 2921 * Report the size of a register value, mainly intended to for use by 2922 * generic infrastructure built on top of regmap. 2923 */ 2924 int regmap_get_val_bytes(struct regmap *map) 2925 { 2926 if (map->format.format_write) 2927 return -EINVAL; 2928 2929 return map->format.val_bytes; 2930 } 2931 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2932 2933 /** 2934 * regmap_get_max_register(): Report the max register value 2935 * 2936 * Report the max register value, mainly intended to for use by 2937 * generic infrastructure built on top of regmap. 2938 */ 2939 int regmap_get_max_register(struct regmap *map) 2940 { 2941 return map->max_register ? map->max_register : -EINVAL; 2942 } 2943 EXPORT_SYMBOL_GPL(regmap_get_max_register); 2944 2945 /** 2946 * regmap_get_reg_stride(): Report the register address stride 2947 * 2948 * Report the register address stride, mainly intended to for use by 2949 * generic infrastructure built on top of regmap. 2950 */ 2951 int regmap_get_reg_stride(struct regmap *map) 2952 { 2953 return map->reg_stride; 2954 } 2955 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 2956 2957 int regmap_parse_val(struct regmap *map, const void *buf, 2958 unsigned int *val) 2959 { 2960 if (!map->format.parse_val) 2961 return -EINVAL; 2962 2963 *val = map->format.parse_val(buf); 2964 2965 return 0; 2966 } 2967 EXPORT_SYMBOL_GPL(regmap_parse_val); 2968 2969 static int __init regmap_initcall(void) 2970 { 2971 regmap_debugfs_initcall(); 2972 2973 return 0; 2974 } 2975 postcore_initcall(regmap_initcall); 2976