1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/of.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 23 #define CREATE_TRACE_POINTS 24 #include "trace.h" 25 26 #include "internal.h" 27 28 /* 29 * Sometimes for failures during very early init the trace 30 * infrastructure isn't available early enough to be used. For this 31 * sort of problem defining LOG_DEVICE will add printks for basic 32 * register I/O on a specific device. 33 */ 34 #undef LOG_DEVICE 35 36 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 37 unsigned int mask, unsigned int val, 38 bool *change, bool force_write); 39 40 static int _regmap_bus_reg_read(void *context, unsigned int reg, 41 unsigned int *val); 42 static int _regmap_bus_read(void *context, unsigned int reg, 43 unsigned int *val); 44 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 45 unsigned int val); 46 static int _regmap_bus_reg_write(void *context, unsigned int reg, 47 unsigned int val); 48 static int _regmap_bus_raw_write(void *context, unsigned int reg, 49 unsigned int val); 50 51 bool regmap_reg_in_ranges(unsigned int reg, 52 const struct regmap_range *ranges, 53 unsigned int nranges) 54 { 55 const struct regmap_range *r; 56 int i; 57 58 for (i = 0, r = ranges; i < nranges; i++, r++) 59 if (regmap_reg_in_range(reg, r)) 60 return true; 61 return false; 62 } 63 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 64 65 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 66 const struct regmap_access_table *table) 67 { 68 /* Check "no ranges" first */ 69 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 70 return false; 71 72 /* In case zero "yes ranges" are supplied, any reg is OK */ 73 if (!table->n_yes_ranges) 74 return true; 75 76 return regmap_reg_in_ranges(reg, table->yes_ranges, 77 table->n_yes_ranges); 78 } 79 EXPORT_SYMBOL_GPL(regmap_check_range_table); 80 81 bool regmap_writeable(struct regmap *map, unsigned int reg) 82 { 83 if (map->max_register && reg > map->max_register) 84 return false; 85 86 if (map->writeable_reg) 87 return map->writeable_reg(map->dev, reg); 88 89 if (map->wr_table) 90 return regmap_check_range_table(map, reg, map->wr_table); 91 92 return true; 93 } 94 95 bool regmap_readable(struct regmap *map, unsigned int reg) 96 { 97 if (!map->reg_read) 98 return false; 99 100 if (map->max_register && reg > map->max_register) 101 return false; 102 103 if (map->format.format_write) 104 return false; 105 106 if (map->readable_reg) 107 return map->readable_reg(map->dev, reg); 108 109 if (map->rd_table) 110 return regmap_check_range_table(map, reg, map->rd_table); 111 112 return true; 113 } 114 115 bool regmap_volatile(struct regmap *map, unsigned int reg) 116 { 117 if (!map->format.format_write && !regmap_readable(map, reg)) 118 return false; 119 120 if (map->volatile_reg) 121 return map->volatile_reg(map->dev, reg); 122 123 if (map->volatile_table) 124 return regmap_check_range_table(map, reg, map->volatile_table); 125 126 if (map->cache_ops) 127 return false; 128 else 129 return true; 130 } 131 132 bool regmap_precious(struct regmap *map, unsigned int reg) 133 { 134 if (!regmap_readable(map, reg)) 135 return false; 136 137 if (map->precious_reg) 138 return map->precious_reg(map->dev, reg); 139 140 if (map->precious_table) 141 return regmap_check_range_table(map, reg, map->precious_table); 142 143 return false; 144 } 145 146 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 147 size_t num) 148 { 149 unsigned int i; 150 151 for (i = 0; i < num; i++) 152 if (!regmap_volatile(map, reg + i)) 153 return false; 154 155 return true; 156 } 157 158 static void regmap_format_2_6_write(struct regmap *map, 159 unsigned int reg, unsigned int val) 160 { 161 u8 *out = map->work_buf; 162 163 *out = (reg << 6) | val; 164 } 165 166 static void regmap_format_4_12_write(struct regmap *map, 167 unsigned int reg, unsigned int val) 168 { 169 __be16 *out = map->work_buf; 170 *out = cpu_to_be16((reg << 12) | val); 171 } 172 173 static void regmap_format_7_9_write(struct regmap *map, 174 unsigned int reg, unsigned int val) 175 { 176 __be16 *out = map->work_buf; 177 *out = cpu_to_be16((reg << 9) | val); 178 } 179 180 static void regmap_format_10_14_write(struct regmap *map, 181 unsigned int reg, unsigned int val) 182 { 183 u8 *out = map->work_buf; 184 185 out[2] = val; 186 out[1] = (val >> 8) | (reg << 6); 187 out[0] = reg >> 2; 188 } 189 190 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 191 { 192 u8 *b = buf; 193 194 b[0] = val << shift; 195 } 196 197 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 198 { 199 __be16 *b = buf; 200 201 b[0] = cpu_to_be16(val << shift); 202 } 203 204 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 205 { 206 __le16 *b = buf; 207 208 b[0] = cpu_to_le16(val << shift); 209 } 210 211 static void regmap_format_16_native(void *buf, unsigned int val, 212 unsigned int shift) 213 { 214 *(u16 *)buf = val << shift; 215 } 216 217 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 218 { 219 u8 *b = buf; 220 221 val <<= shift; 222 223 b[0] = val >> 16; 224 b[1] = val >> 8; 225 b[2] = val; 226 } 227 228 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 229 { 230 __be32 *b = buf; 231 232 b[0] = cpu_to_be32(val << shift); 233 } 234 235 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 236 { 237 __le32 *b = buf; 238 239 b[0] = cpu_to_le32(val << shift); 240 } 241 242 static void regmap_format_32_native(void *buf, unsigned int val, 243 unsigned int shift) 244 { 245 *(u32 *)buf = val << shift; 246 } 247 248 static void regmap_parse_inplace_noop(void *buf) 249 { 250 } 251 252 static unsigned int regmap_parse_8(const void *buf) 253 { 254 const u8 *b = buf; 255 256 return b[0]; 257 } 258 259 static unsigned int regmap_parse_16_be(const void *buf) 260 { 261 const __be16 *b = buf; 262 263 return be16_to_cpu(b[0]); 264 } 265 266 static unsigned int regmap_parse_16_le(const void *buf) 267 { 268 const __le16 *b = buf; 269 270 return le16_to_cpu(b[0]); 271 } 272 273 static void regmap_parse_16_be_inplace(void *buf) 274 { 275 __be16 *b = buf; 276 277 b[0] = be16_to_cpu(b[0]); 278 } 279 280 static void regmap_parse_16_le_inplace(void *buf) 281 { 282 __le16 *b = buf; 283 284 b[0] = le16_to_cpu(b[0]); 285 } 286 287 static unsigned int regmap_parse_16_native(const void *buf) 288 { 289 return *(u16 *)buf; 290 } 291 292 static unsigned int regmap_parse_24(const void *buf) 293 { 294 const u8 *b = buf; 295 unsigned int ret = b[2]; 296 ret |= ((unsigned int)b[1]) << 8; 297 ret |= ((unsigned int)b[0]) << 16; 298 299 return ret; 300 } 301 302 static unsigned int regmap_parse_32_be(const void *buf) 303 { 304 const __be32 *b = buf; 305 306 return be32_to_cpu(b[0]); 307 } 308 309 static unsigned int regmap_parse_32_le(const void *buf) 310 { 311 const __le32 *b = buf; 312 313 return le32_to_cpu(b[0]); 314 } 315 316 static void regmap_parse_32_be_inplace(void *buf) 317 { 318 __be32 *b = buf; 319 320 b[0] = be32_to_cpu(b[0]); 321 } 322 323 static void regmap_parse_32_le_inplace(void *buf) 324 { 325 __le32 *b = buf; 326 327 b[0] = le32_to_cpu(b[0]); 328 } 329 330 static unsigned int regmap_parse_32_native(const void *buf) 331 { 332 return *(u32 *)buf; 333 } 334 335 static void regmap_lock_mutex(void *__map) 336 { 337 struct regmap *map = __map; 338 mutex_lock(&map->mutex); 339 } 340 341 static void regmap_unlock_mutex(void *__map) 342 { 343 struct regmap *map = __map; 344 mutex_unlock(&map->mutex); 345 } 346 347 static void regmap_lock_spinlock(void *__map) 348 __acquires(&map->spinlock) 349 { 350 struct regmap *map = __map; 351 unsigned long flags; 352 353 spin_lock_irqsave(&map->spinlock, flags); 354 map->spinlock_flags = flags; 355 } 356 357 static void regmap_unlock_spinlock(void *__map) 358 __releases(&map->spinlock) 359 { 360 struct regmap *map = __map; 361 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 362 } 363 364 static void dev_get_regmap_release(struct device *dev, void *res) 365 { 366 /* 367 * We don't actually have anything to do here; the goal here 368 * is not to manage the regmap but to provide a simple way to 369 * get the regmap back given a struct device. 370 */ 371 } 372 373 static bool _regmap_range_add(struct regmap *map, 374 struct regmap_range_node *data) 375 { 376 struct rb_root *root = &map->range_tree; 377 struct rb_node **new = &(root->rb_node), *parent = NULL; 378 379 while (*new) { 380 struct regmap_range_node *this = 381 container_of(*new, struct regmap_range_node, node); 382 383 parent = *new; 384 if (data->range_max < this->range_min) 385 new = &((*new)->rb_left); 386 else if (data->range_min > this->range_max) 387 new = &((*new)->rb_right); 388 else 389 return false; 390 } 391 392 rb_link_node(&data->node, parent, new); 393 rb_insert_color(&data->node, root); 394 395 return true; 396 } 397 398 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 399 unsigned int reg) 400 { 401 struct rb_node *node = map->range_tree.rb_node; 402 403 while (node) { 404 struct regmap_range_node *this = 405 container_of(node, struct regmap_range_node, node); 406 407 if (reg < this->range_min) 408 node = node->rb_left; 409 else if (reg > this->range_max) 410 node = node->rb_right; 411 else 412 return this; 413 } 414 415 return NULL; 416 } 417 418 static void regmap_range_exit(struct regmap *map) 419 { 420 struct rb_node *next; 421 struct regmap_range_node *range_node; 422 423 next = rb_first(&map->range_tree); 424 while (next) { 425 range_node = rb_entry(next, struct regmap_range_node, node); 426 next = rb_next(&range_node->node); 427 rb_erase(&range_node->node, &map->range_tree); 428 kfree(range_node); 429 } 430 431 kfree(map->selector_work_buf); 432 } 433 434 int regmap_attach_dev(struct device *dev, struct regmap *map, 435 const struct regmap_config *config) 436 { 437 struct regmap **m; 438 439 map->dev = dev; 440 441 regmap_debugfs_init(map, config->name); 442 443 /* Add a devres resource for dev_get_regmap() */ 444 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 445 if (!m) { 446 regmap_debugfs_exit(map); 447 return -ENOMEM; 448 } 449 *m = map; 450 devres_add(dev, m); 451 452 return 0; 453 } 454 EXPORT_SYMBOL_GPL(regmap_attach_dev); 455 456 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 457 const struct regmap_config *config) 458 { 459 enum regmap_endian endian; 460 461 /* Retrieve the endianness specification from the regmap config */ 462 endian = config->reg_format_endian; 463 464 /* If the regmap config specified a non-default value, use that */ 465 if (endian != REGMAP_ENDIAN_DEFAULT) 466 return endian; 467 468 /* Retrieve the endianness specification from the bus config */ 469 if (bus && bus->reg_format_endian_default) 470 endian = bus->reg_format_endian_default; 471 472 /* If the bus specified a non-default value, use that */ 473 if (endian != REGMAP_ENDIAN_DEFAULT) 474 return endian; 475 476 /* Use this if no other value was found */ 477 return REGMAP_ENDIAN_BIG; 478 } 479 480 enum regmap_endian regmap_get_val_endian(struct device *dev, 481 const struct regmap_bus *bus, 482 const struct regmap_config *config) 483 { 484 struct device_node *np; 485 enum regmap_endian endian; 486 487 /* Retrieve the endianness specification from the regmap config */ 488 endian = config->val_format_endian; 489 490 /* If the regmap config specified a non-default value, use that */ 491 if (endian != REGMAP_ENDIAN_DEFAULT) 492 return endian; 493 494 /* If the dev and dev->of_node exist try to get endianness from DT */ 495 if (dev && dev->of_node) { 496 np = dev->of_node; 497 498 /* Parse the device's DT node for an endianness specification */ 499 if (of_property_read_bool(np, "big-endian")) 500 endian = REGMAP_ENDIAN_BIG; 501 else if (of_property_read_bool(np, "little-endian")) 502 endian = REGMAP_ENDIAN_LITTLE; 503 504 /* If the endianness was specified in DT, use that */ 505 if (endian != REGMAP_ENDIAN_DEFAULT) 506 return endian; 507 } 508 509 /* Retrieve the endianness specification from the bus config */ 510 if (bus && bus->val_format_endian_default) 511 endian = bus->val_format_endian_default; 512 513 /* If the bus specified a non-default value, use that */ 514 if (endian != REGMAP_ENDIAN_DEFAULT) 515 return endian; 516 517 /* Use this if no other value was found */ 518 return REGMAP_ENDIAN_BIG; 519 } 520 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 521 522 struct regmap *__regmap_init(struct device *dev, 523 const struct regmap_bus *bus, 524 void *bus_context, 525 const struct regmap_config *config, 526 struct lock_class_key *lock_key, 527 const char *lock_name) 528 { 529 struct regmap *map; 530 int ret = -EINVAL; 531 enum regmap_endian reg_endian, val_endian; 532 int i, j; 533 534 if (!config) 535 goto err; 536 537 map = kzalloc(sizeof(*map), GFP_KERNEL); 538 if (map == NULL) { 539 ret = -ENOMEM; 540 goto err; 541 } 542 543 if (config->lock && config->unlock) { 544 map->lock = config->lock; 545 map->unlock = config->unlock; 546 map->lock_arg = config->lock_arg; 547 } else { 548 if ((bus && bus->fast_io) || 549 config->fast_io) { 550 spin_lock_init(&map->spinlock); 551 map->lock = regmap_lock_spinlock; 552 map->unlock = regmap_unlock_spinlock; 553 lockdep_set_class_and_name(&map->spinlock, 554 lock_key, lock_name); 555 } else { 556 mutex_init(&map->mutex); 557 map->lock = regmap_lock_mutex; 558 map->unlock = regmap_unlock_mutex; 559 lockdep_set_class_and_name(&map->mutex, 560 lock_key, lock_name); 561 } 562 map->lock_arg = map; 563 } 564 565 /* 566 * When we write in fast-paths with regmap_bulk_write() don't allocate 567 * scratch buffers with sleeping allocations. 568 */ 569 if ((bus && bus->fast_io) || config->fast_io) 570 map->alloc_flags = GFP_ATOMIC; 571 else 572 map->alloc_flags = GFP_KERNEL; 573 574 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 575 map->format.pad_bytes = config->pad_bits / 8; 576 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 577 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 578 config->val_bits + config->pad_bits, 8); 579 map->reg_shift = config->pad_bits % 8; 580 if (config->reg_stride) 581 map->reg_stride = config->reg_stride; 582 else 583 map->reg_stride = 1; 584 map->use_single_read = config->use_single_rw || !bus || !bus->read; 585 map->use_single_write = config->use_single_rw || !bus || !bus->write; 586 map->can_multi_write = config->can_multi_write && bus && bus->write; 587 if (bus) { 588 map->max_raw_read = bus->max_raw_read; 589 map->max_raw_write = bus->max_raw_write; 590 } 591 map->dev = dev; 592 map->bus = bus; 593 map->bus_context = bus_context; 594 map->max_register = config->max_register; 595 map->wr_table = config->wr_table; 596 map->rd_table = config->rd_table; 597 map->volatile_table = config->volatile_table; 598 map->precious_table = config->precious_table; 599 map->writeable_reg = config->writeable_reg; 600 map->readable_reg = config->readable_reg; 601 map->volatile_reg = config->volatile_reg; 602 map->precious_reg = config->precious_reg; 603 map->cache_type = config->cache_type; 604 map->name = config->name; 605 606 spin_lock_init(&map->async_lock); 607 INIT_LIST_HEAD(&map->async_list); 608 INIT_LIST_HEAD(&map->async_free); 609 init_waitqueue_head(&map->async_waitq); 610 611 if (config->read_flag_mask || config->write_flag_mask) { 612 map->read_flag_mask = config->read_flag_mask; 613 map->write_flag_mask = config->write_flag_mask; 614 } else if (bus) { 615 map->read_flag_mask = bus->read_flag_mask; 616 } 617 618 if (!bus) { 619 map->reg_read = config->reg_read; 620 map->reg_write = config->reg_write; 621 622 map->defer_caching = false; 623 goto skip_format_initialization; 624 } else if (!bus->read || !bus->write) { 625 map->reg_read = _regmap_bus_reg_read; 626 map->reg_write = _regmap_bus_reg_write; 627 628 map->defer_caching = false; 629 goto skip_format_initialization; 630 } else { 631 map->reg_read = _regmap_bus_read; 632 map->reg_update_bits = bus->reg_update_bits; 633 } 634 635 reg_endian = regmap_get_reg_endian(bus, config); 636 val_endian = regmap_get_val_endian(dev, bus, config); 637 638 switch (config->reg_bits + map->reg_shift) { 639 case 2: 640 switch (config->val_bits) { 641 case 6: 642 map->format.format_write = regmap_format_2_6_write; 643 break; 644 default: 645 goto err_map; 646 } 647 break; 648 649 case 4: 650 switch (config->val_bits) { 651 case 12: 652 map->format.format_write = regmap_format_4_12_write; 653 break; 654 default: 655 goto err_map; 656 } 657 break; 658 659 case 7: 660 switch (config->val_bits) { 661 case 9: 662 map->format.format_write = regmap_format_7_9_write; 663 break; 664 default: 665 goto err_map; 666 } 667 break; 668 669 case 10: 670 switch (config->val_bits) { 671 case 14: 672 map->format.format_write = regmap_format_10_14_write; 673 break; 674 default: 675 goto err_map; 676 } 677 break; 678 679 case 8: 680 map->format.format_reg = regmap_format_8; 681 break; 682 683 case 16: 684 switch (reg_endian) { 685 case REGMAP_ENDIAN_BIG: 686 map->format.format_reg = regmap_format_16_be; 687 break; 688 case REGMAP_ENDIAN_NATIVE: 689 map->format.format_reg = regmap_format_16_native; 690 break; 691 default: 692 goto err_map; 693 } 694 break; 695 696 case 24: 697 if (reg_endian != REGMAP_ENDIAN_BIG) 698 goto err_map; 699 map->format.format_reg = regmap_format_24; 700 break; 701 702 case 32: 703 switch (reg_endian) { 704 case REGMAP_ENDIAN_BIG: 705 map->format.format_reg = regmap_format_32_be; 706 break; 707 case REGMAP_ENDIAN_NATIVE: 708 map->format.format_reg = regmap_format_32_native; 709 break; 710 default: 711 goto err_map; 712 } 713 break; 714 715 default: 716 goto err_map; 717 } 718 719 if (val_endian == REGMAP_ENDIAN_NATIVE) 720 map->format.parse_inplace = regmap_parse_inplace_noop; 721 722 switch (config->val_bits) { 723 case 8: 724 map->format.format_val = regmap_format_8; 725 map->format.parse_val = regmap_parse_8; 726 map->format.parse_inplace = regmap_parse_inplace_noop; 727 break; 728 case 16: 729 switch (val_endian) { 730 case REGMAP_ENDIAN_BIG: 731 map->format.format_val = regmap_format_16_be; 732 map->format.parse_val = regmap_parse_16_be; 733 map->format.parse_inplace = regmap_parse_16_be_inplace; 734 break; 735 case REGMAP_ENDIAN_LITTLE: 736 map->format.format_val = regmap_format_16_le; 737 map->format.parse_val = regmap_parse_16_le; 738 map->format.parse_inplace = regmap_parse_16_le_inplace; 739 break; 740 case REGMAP_ENDIAN_NATIVE: 741 map->format.format_val = regmap_format_16_native; 742 map->format.parse_val = regmap_parse_16_native; 743 break; 744 default: 745 goto err_map; 746 } 747 break; 748 case 24: 749 if (val_endian != REGMAP_ENDIAN_BIG) 750 goto err_map; 751 map->format.format_val = regmap_format_24; 752 map->format.parse_val = regmap_parse_24; 753 break; 754 case 32: 755 switch (val_endian) { 756 case REGMAP_ENDIAN_BIG: 757 map->format.format_val = regmap_format_32_be; 758 map->format.parse_val = regmap_parse_32_be; 759 map->format.parse_inplace = regmap_parse_32_be_inplace; 760 break; 761 case REGMAP_ENDIAN_LITTLE: 762 map->format.format_val = regmap_format_32_le; 763 map->format.parse_val = regmap_parse_32_le; 764 map->format.parse_inplace = regmap_parse_32_le_inplace; 765 break; 766 case REGMAP_ENDIAN_NATIVE: 767 map->format.format_val = regmap_format_32_native; 768 map->format.parse_val = regmap_parse_32_native; 769 break; 770 default: 771 goto err_map; 772 } 773 break; 774 } 775 776 if (map->format.format_write) { 777 if ((reg_endian != REGMAP_ENDIAN_BIG) || 778 (val_endian != REGMAP_ENDIAN_BIG)) 779 goto err_map; 780 map->use_single_write = true; 781 } 782 783 if (!map->format.format_write && 784 !(map->format.format_reg && map->format.format_val)) 785 goto err_map; 786 787 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 788 if (map->work_buf == NULL) { 789 ret = -ENOMEM; 790 goto err_map; 791 } 792 793 if (map->format.format_write) { 794 map->defer_caching = false; 795 map->reg_write = _regmap_bus_formatted_write; 796 } else if (map->format.format_val) { 797 map->defer_caching = true; 798 map->reg_write = _regmap_bus_raw_write; 799 } 800 801 skip_format_initialization: 802 803 map->range_tree = RB_ROOT; 804 for (i = 0; i < config->num_ranges; i++) { 805 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 806 struct regmap_range_node *new; 807 808 /* Sanity check */ 809 if (range_cfg->range_max < range_cfg->range_min) { 810 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 811 range_cfg->range_max, range_cfg->range_min); 812 goto err_range; 813 } 814 815 if (range_cfg->range_max > map->max_register) { 816 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 817 range_cfg->range_max, map->max_register); 818 goto err_range; 819 } 820 821 if (range_cfg->selector_reg > map->max_register) { 822 dev_err(map->dev, 823 "Invalid range %d: selector out of map\n", i); 824 goto err_range; 825 } 826 827 if (range_cfg->window_len == 0) { 828 dev_err(map->dev, "Invalid range %d: window_len 0\n", 829 i); 830 goto err_range; 831 } 832 833 /* Make sure, that this register range has no selector 834 or data window within its boundary */ 835 for (j = 0; j < config->num_ranges; j++) { 836 unsigned sel_reg = config->ranges[j].selector_reg; 837 unsigned win_min = config->ranges[j].window_start; 838 unsigned win_max = win_min + 839 config->ranges[j].window_len - 1; 840 841 /* Allow data window inside its own virtual range */ 842 if (j == i) 843 continue; 844 845 if (range_cfg->range_min <= sel_reg && 846 sel_reg <= range_cfg->range_max) { 847 dev_err(map->dev, 848 "Range %d: selector for %d in window\n", 849 i, j); 850 goto err_range; 851 } 852 853 if (!(win_max < range_cfg->range_min || 854 win_min > range_cfg->range_max)) { 855 dev_err(map->dev, 856 "Range %d: window for %d in window\n", 857 i, j); 858 goto err_range; 859 } 860 } 861 862 new = kzalloc(sizeof(*new), GFP_KERNEL); 863 if (new == NULL) { 864 ret = -ENOMEM; 865 goto err_range; 866 } 867 868 new->map = map; 869 new->name = range_cfg->name; 870 new->range_min = range_cfg->range_min; 871 new->range_max = range_cfg->range_max; 872 new->selector_reg = range_cfg->selector_reg; 873 new->selector_mask = range_cfg->selector_mask; 874 new->selector_shift = range_cfg->selector_shift; 875 new->window_start = range_cfg->window_start; 876 new->window_len = range_cfg->window_len; 877 878 if (!_regmap_range_add(map, new)) { 879 dev_err(map->dev, "Failed to add range %d\n", i); 880 kfree(new); 881 goto err_range; 882 } 883 884 if (map->selector_work_buf == NULL) { 885 map->selector_work_buf = 886 kzalloc(map->format.buf_size, GFP_KERNEL); 887 if (map->selector_work_buf == NULL) { 888 ret = -ENOMEM; 889 goto err_range; 890 } 891 } 892 } 893 894 ret = regcache_init(map, config); 895 if (ret != 0) 896 goto err_range; 897 898 if (dev) { 899 ret = regmap_attach_dev(dev, map, config); 900 if (ret != 0) 901 goto err_regcache; 902 } 903 904 return map; 905 906 err_regcache: 907 regcache_exit(map); 908 err_range: 909 regmap_range_exit(map); 910 kfree(map->work_buf); 911 err_map: 912 kfree(map); 913 err: 914 return ERR_PTR(ret); 915 } 916 EXPORT_SYMBOL_GPL(__regmap_init); 917 918 static void devm_regmap_release(struct device *dev, void *res) 919 { 920 regmap_exit(*(struct regmap **)res); 921 } 922 923 struct regmap *__devm_regmap_init(struct device *dev, 924 const struct regmap_bus *bus, 925 void *bus_context, 926 const struct regmap_config *config, 927 struct lock_class_key *lock_key, 928 const char *lock_name) 929 { 930 struct regmap **ptr, *regmap; 931 932 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 933 if (!ptr) 934 return ERR_PTR(-ENOMEM); 935 936 regmap = __regmap_init(dev, bus, bus_context, config, 937 lock_key, lock_name); 938 if (!IS_ERR(regmap)) { 939 *ptr = regmap; 940 devres_add(dev, ptr); 941 } else { 942 devres_free(ptr); 943 } 944 945 return regmap; 946 } 947 EXPORT_SYMBOL_GPL(__devm_regmap_init); 948 949 static void regmap_field_init(struct regmap_field *rm_field, 950 struct regmap *regmap, struct reg_field reg_field) 951 { 952 rm_field->regmap = regmap; 953 rm_field->reg = reg_field.reg; 954 rm_field->shift = reg_field.lsb; 955 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 956 rm_field->id_size = reg_field.id_size; 957 rm_field->id_offset = reg_field.id_offset; 958 } 959 960 /** 961 * devm_regmap_field_alloc(): Allocate and initialise a register field 962 * in a register map. 963 * 964 * @dev: Device that will be interacted with 965 * @regmap: regmap bank in which this register field is located. 966 * @reg_field: Register field with in the bank. 967 * 968 * The return value will be an ERR_PTR() on error or a valid pointer 969 * to a struct regmap_field. The regmap_field will be automatically freed 970 * by the device management code. 971 */ 972 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 973 struct regmap *regmap, struct reg_field reg_field) 974 { 975 struct regmap_field *rm_field = devm_kzalloc(dev, 976 sizeof(*rm_field), GFP_KERNEL); 977 if (!rm_field) 978 return ERR_PTR(-ENOMEM); 979 980 regmap_field_init(rm_field, regmap, reg_field); 981 982 return rm_field; 983 984 } 985 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 986 987 /** 988 * devm_regmap_field_free(): Free register field allocated using 989 * devm_regmap_field_alloc. Usally drivers need not call this function, 990 * as the memory allocated via devm will be freed as per device-driver 991 * life-cyle. 992 * 993 * @dev: Device that will be interacted with 994 * @field: regmap field which should be freed. 995 */ 996 void devm_regmap_field_free(struct device *dev, 997 struct regmap_field *field) 998 { 999 devm_kfree(dev, field); 1000 } 1001 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1002 1003 /** 1004 * regmap_field_alloc(): Allocate and initialise a register field 1005 * in a register map. 1006 * 1007 * @regmap: regmap bank in which this register field is located. 1008 * @reg_field: Register field with in the bank. 1009 * 1010 * The return value will be an ERR_PTR() on error or a valid pointer 1011 * to a struct regmap_field. The regmap_field should be freed by the 1012 * user once its finished working with it using regmap_field_free(). 1013 */ 1014 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1015 struct reg_field reg_field) 1016 { 1017 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1018 1019 if (!rm_field) 1020 return ERR_PTR(-ENOMEM); 1021 1022 regmap_field_init(rm_field, regmap, reg_field); 1023 1024 return rm_field; 1025 } 1026 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1027 1028 /** 1029 * regmap_field_free(): Free register field allocated using regmap_field_alloc 1030 * 1031 * @field: regmap field which should be freed. 1032 */ 1033 void regmap_field_free(struct regmap_field *field) 1034 { 1035 kfree(field); 1036 } 1037 EXPORT_SYMBOL_GPL(regmap_field_free); 1038 1039 /** 1040 * regmap_reinit_cache(): Reinitialise the current register cache 1041 * 1042 * @map: Register map to operate on. 1043 * @config: New configuration. Only the cache data will be used. 1044 * 1045 * Discard any existing register cache for the map and initialize a 1046 * new cache. This can be used to restore the cache to defaults or to 1047 * update the cache configuration to reflect runtime discovery of the 1048 * hardware. 1049 * 1050 * No explicit locking is done here, the user needs to ensure that 1051 * this function will not race with other calls to regmap. 1052 */ 1053 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1054 { 1055 regcache_exit(map); 1056 regmap_debugfs_exit(map); 1057 1058 map->max_register = config->max_register; 1059 map->writeable_reg = config->writeable_reg; 1060 map->readable_reg = config->readable_reg; 1061 map->volatile_reg = config->volatile_reg; 1062 map->precious_reg = config->precious_reg; 1063 map->cache_type = config->cache_type; 1064 1065 regmap_debugfs_init(map, config->name); 1066 1067 map->cache_bypass = false; 1068 map->cache_only = false; 1069 1070 return regcache_init(map, config); 1071 } 1072 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1073 1074 /** 1075 * regmap_exit(): Free a previously allocated register map 1076 */ 1077 void regmap_exit(struct regmap *map) 1078 { 1079 struct regmap_async *async; 1080 1081 regcache_exit(map); 1082 regmap_debugfs_exit(map); 1083 regmap_range_exit(map); 1084 if (map->bus && map->bus->free_context) 1085 map->bus->free_context(map->bus_context); 1086 kfree(map->work_buf); 1087 while (!list_empty(&map->async_free)) { 1088 async = list_first_entry_or_null(&map->async_free, 1089 struct regmap_async, 1090 list); 1091 list_del(&async->list); 1092 kfree(async->work_buf); 1093 kfree(async); 1094 } 1095 kfree(map); 1096 } 1097 EXPORT_SYMBOL_GPL(regmap_exit); 1098 1099 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1100 { 1101 struct regmap **r = res; 1102 if (!r || !*r) { 1103 WARN_ON(!r || !*r); 1104 return 0; 1105 } 1106 1107 /* If the user didn't specify a name match any */ 1108 if (data) 1109 return (*r)->name == data; 1110 else 1111 return 1; 1112 } 1113 1114 /** 1115 * dev_get_regmap(): Obtain the regmap (if any) for a device 1116 * 1117 * @dev: Device to retrieve the map for 1118 * @name: Optional name for the register map, usually NULL. 1119 * 1120 * Returns the regmap for the device if one is present, or NULL. If 1121 * name is specified then it must match the name specified when 1122 * registering the device, if it is NULL then the first regmap found 1123 * will be used. Devices with multiple register maps are very rare, 1124 * generic code should normally not need to specify a name. 1125 */ 1126 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1127 { 1128 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1129 dev_get_regmap_match, (void *)name); 1130 1131 if (!r) 1132 return NULL; 1133 return *r; 1134 } 1135 EXPORT_SYMBOL_GPL(dev_get_regmap); 1136 1137 /** 1138 * regmap_get_device(): Obtain the device from a regmap 1139 * 1140 * @map: Register map to operate on. 1141 * 1142 * Returns the underlying device that the regmap has been created for. 1143 */ 1144 struct device *regmap_get_device(struct regmap *map) 1145 { 1146 return map->dev; 1147 } 1148 EXPORT_SYMBOL_GPL(regmap_get_device); 1149 1150 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1151 struct regmap_range_node *range, 1152 unsigned int val_num) 1153 { 1154 void *orig_work_buf; 1155 unsigned int win_offset; 1156 unsigned int win_page; 1157 bool page_chg; 1158 int ret; 1159 1160 win_offset = (*reg - range->range_min) % range->window_len; 1161 win_page = (*reg - range->range_min) / range->window_len; 1162 1163 if (val_num > 1) { 1164 /* Bulk write shouldn't cross range boundary */ 1165 if (*reg + val_num - 1 > range->range_max) 1166 return -EINVAL; 1167 1168 /* ... or single page boundary */ 1169 if (val_num > range->window_len - win_offset) 1170 return -EINVAL; 1171 } 1172 1173 /* It is possible to have selector register inside data window. 1174 In that case, selector register is located on every page and 1175 it needs no page switching, when accessed alone. */ 1176 if (val_num > 1 || 1177 range->window_start + win_offset != range->selector_reg) { 1178 /* Use separate work_buf during page switching */ 1179 orig_work_buf = map->work_buf; 1180 map->work_buf = map->selector_work_buf; 1181 1182 ret = _regmap_update_bits(map, range->selector_reg, 1183 range->selector_mask, 1184 win_page << range->selector_shift, 1185 &page_chg, false); 1186 1187 map->work_buf = orig_work_buf; 1188 1189 if (ret != 0) 1190 return ret; 1191 } 1192 1193 *reg = range->window_start + win_offset; 1194 1195 return 0; 1196 } 1197 1198 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1199 const void *val, size_t val_len) 1200 { 1201 struct regmap_range_node *range; 1202 unsigned long flags; 1203 u8 *u8 = map->work_buf; 1204 void *work_val = map->work_buf + map->format.reg_bytes + 1205 map->format.pad_bytes; 1206 void *buf; 1207 int ret = -ENOTSUPP; 1208 size_t len; 1209 int i; 1210 1211 WARN_ON(!map->bus); 1212 1213 /* Check for unwritable registers before we start */ 1214 if (map->writeable_reg) 1215 for (i = 0; i < val_len / map->format.val_bytes; i++) 1216 if (!map->writeable_reg(map->dev, 1217 reg + (i * map->reg_stride))) 1218 return -EINVAL; 1219 1220 if (!map->cache_bypass && map->format.parse_val) { 1221 unsigned int ival; 1222 int val_bytes = map->format.val_bytes; 1223 for (i = 0; i < val_len / val_bytes; i++) { 1224 ival = map->format.parse_val(val + (i * val_bytes)); 1225 ret = regcache_write(map, reg + (i * map->reg_stride), 1226 ival); 1227 if (ret) { 1228 dev_err(map->dev, 1229 "Error in caching of register: %x ret: %d\n", 1230 reg + i, ret); 1231 return ret; 1232 } 1233 } 1234 if (map->cache_only) { 1235 map->cache_dirty = true; 1236 return 0; 1237 } 1238 } 1239 1240 range = _regmap_range_lookup(map, reg); 1241 if (range) { 1242 int val_num = val_len / map->format.val_bytes; 1243 int win_offset = (reg - range->range_min) % range->window_len; 1244 int win_residue = range->window_len - win_offset; 1245 1246 /* If the write goes beyond the end of the window split it */ 1247 while (val_num > win_residue) { 1248 dev_dbg(map->dev, "Writing window %d/%zu\n", 1249 win_residue, val_len / map->format.val_bytes); 1250 ret = _regmap_raw_write(map, reg, val, win_residue * 1251 map->format.val_bytes); 1252 if (ret != 0) 1253 return ret; 1254 1255 reg += win_residue; 1256 val_num -= win_residue; 1257 val += win_residue * map->format.val_bytes; 1258 val_len -= win_residue * map->format.val_bytes; 1259 1260 win_offset = (reg - range->range_min) % 1261 range->window_len; 1262 win_residue = range->window_len - win_offset; 1263 } 1264 1265 ret = _regmap_select_page(map, ®, range, val_num); 1266 if (ret != 0) 1267 return ret; 1268 } 1269 1270 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1271 1272 u8[0] |= map->write_flag_mask; 1273 1274 /* 1275 * Essentially all I/O mechanisms will be faster with a single 1276 * buffer to write. Since register syncs often generate raw 1277 * writes of single registers optimise that case. 1278 */ 1279 if (val != work_val && val_len == map->format.val_bytes) { 1280 memcpy(work_val, val, map->format.val_bytes); 1281 val = work_val; 1282 } 1283 1284 if (map->async && map->bus->async_write) { 1285 struct regmap_async *async; 1286 1287 trace_regmap_async_write_start(map, reg, val_len); 1288 1289 spin_lock_irqsave(&map->async_lock, flags); 1290 async = list_first_entry_or_null(&map->async_free, 1291 struct regmap_async, 1292 list); 1293 if (async) 1294 list_del(&async->list); 1295 spin_unlock_irqrestore(&map->async_lock, flags); 1296 1297 if (!async) { 1298 async = map->bus->async_alloc(); 1299 if (!async) 1300 return -ENOMEM; 1301 1302 async->work_buf = kzalloc(map->format.buf_size, 1303 GFP_KERNEL | GFP_DMA); 1304 if (!async->work_buf) { 1305 kfree(async); 1306 return -ENOMEM; 1307 } 1308 } 1309 1310 async->map = map; 1311 1312 /* If the caller supplied the value we can use it safely. */ 1313 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1314 map->format.reg_bytes + map->format.val_bytes); 1315 1316 spin_lock_irqsave(&map->async_lock, flags); 1317 list_add_tail(&async->list, &map->async_list); 1318 spin_unlock_irqrestore(&map->async_lock, flags); 1319 1320 if (val != work_val) 1321 ret = map->bus->async_write(map->bus_context, 1322 async->work_buf, 1323 map->format.reg_bytes + 1324 map->format.pad_bytes, 1325 val, val_len, async); 1326 else 1327 ret = map->bus->async_write(map->bus_context, 1328 async->work_buf, 1329 map->format.reg_bytes + 1330 map->format.pad_bytes + 1331 val_len, NULL, 0, async); 1332 1333 if (ret != 0) { 1334 dev_err(map->dev, "Failed to schedule write: %d\n", 1335 ret); 1336 1337 spin_lock_irqsave(&map->async_lock, flags); 1338 list_move(&async->list, &map->async_free); 1339 spin_unlock_irqrestore(&map->async_lock, flags); 1340 } 1341 1342 return ret; 1343 } 1344 1345 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1346 1347 /* If we're doing a single register write we can probably just 1348 * send the work_buf directly, otherwise try to do a gather 1349 * write. 1350 */ 1351 if (val == work_val) 1352 ret = map->bus->write(map->bus_context, map->work_buf, 1353 map->format.reg_bytes + 1354 map->format.pad_bytes + 1355 val_len); 1356 else if (map->bus->gather_write) 1357 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1358 map->format.reg_bytes + 1359 map->format.pad_bytes, 1360 val, val_len); 1361 1362 /* If that didn't work fall back on linearising by hand. */ 1363 if (ret == -ENOTSUPP) { 1364 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1365 buf = kzalloc(len, GFP_KERNEL); 1366 if (!buf) 1367 return -ENOMEM; 1368 1369 memcpy(buf, map->work_buf, map->format.reg_bytes); 1370 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1371 val, val_len); 1372 ret = map->bus->write(map->bus_context, buf, len); 1373 1374 kfree(buf); 1375 } 1376 1377 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1378 1379 return ret; 1380 } 1381 1382 /** 1383 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1384 * 1385 * @map: Map to check. 1386 */ 1387 bool regmap_can_raw_write(struct regmap *map) 1388 { 1389 return map->bus && map->bus->write && map->format.format_val && 1390 map->format.format_reg; 1391 } 1392 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1393 1394 /** 1395 * regmap_get_raw_read_max - Get the maximum size we can read 1396 * 1397 * @map: Map to check. 1398 */ 1399 size_t regmap_get_raw_read_max(struct regmap *map) 1400 { 1401 return map->max_raw_read; 1402 } 1403 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1404 1405 /** 1406 * regmap_get_raw_write_max - Get the maximum size we can read 1407 * 1408 * @map: Map to check. 1409 */ 1410 size_t regmap_get_raw_write_max(struct regmap *map) 1411 { 1412 return map->max_raw_write; 1413 } 1414 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1415 1416 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1417 unsigned int val) 1418 { 1419 int ret; 1420 struct regmap_range_node *range; 1421 struct regmap *map = context; 1422 1423 WARN_ON(!map->bus || !map->format.format_write); 1424 1425 range = _regmap_range_lookup(map, reg); 1426 if (range) { 1427 ret = _regmap_select_page(map, ®, range, 1); 1428 if (ret != 0) 1429 return ret; 1430 } 1431 1432 map->format.format_write(map, reg, val); 1433 1434 trace_regmap_hw_write_start(map, reg, 1); 1435 1436 ret = map->bus->write(map->bus_context, map->work_buf, 1437 map->format.buf_size); 1438 1439 trace_regmap_hw_write_done(map, reg, 1); 1440 1441 return ret; 1442 } 1443 1444 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1445 unsigned int val) 1446 { 1447 struct regmap *map = context; 1448 1449 return map->bus->reg_write(map->bus_context, reg, val); 1450 } 1451 1452 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1453 unsigned int val) 1454 { 1455 struct regmap *map = context; 1456 1457 WARN_ON(!map->bus || !map->format.format_val); 1458 1459 map->format.format_val(map->work_buf + map->format.reg_bytes 1460 + map->format.pad_bytes, val, 0); 1461 return _regmap_raw_write(map, reg, 1462 map->work_buf + 1463 map->format.reg_bytes + 1464 map->format.pad_bytes, 1465 map->format.val_bytes); 1466 } 1467 1468 static inline void *_regmap_map_get_context(struct regmap *map) 1469 { 1470 return (map->bus) ? map : map->bus_context; 1471 } 1472 1473 int _regmap_write(struct regmap *map, unsigned int reg, 1474 unsigned int val) 1475 { 1476 int ret; 1477 void *context = _regmap_map_get_context(map); 1478 1479 if (!regmap_writeable(map, reg)) 1480 return -EIO; 1481 1482 if (!map->cache_bypass && !map->defer_caching) { 1483 ret = regcache_write(map, reg, val); 1484 if (ret != 0) 1485 return ret; 1486 if (map->cache_only) { 1487 map->cache_dirty = true; 1488 return 0; 1489 } 1490 } 1491 1492 #ifdef LOG_DEVICE 1493 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1494 dev_info(map->dev, "%x <= %x\n", reg, val); 1495 #endif 1496 1497 trace_regmap_reg_write(map, reg, val); 1498 1499 return map->reg_write(context, reg, val); 1500 } 1501 1502 /** 1503 * regmap_write(): Write a value to a single register 1504 * 1505 * @map: Register map to write to 1506 * @reg: Register to write to 1507 * @val: Value to be written 1508 * 1509 * A value of zero will be returned on success, a negative errno will 1510 * be returned in error cases. 1511 */ 1512 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1513 { 1514 int ret; 1515 1516 if (reg % map->reg_stride) 1517 return -EINVAL; 1518 1519 map->lock(map->lock_arg); 1520 1521 ret = _regmap_write(map, reg, val); 1522 1523 map->unlock(map->lock_arg); 1524 1525 return ret; 1526 } 1527 EXPORT_SYMBOL_GPL(regmap_write); 1528 1529 /** 1530 * regmap_write_async(): Write a value to a single register asynchronously 1531 * 1532 * @map: Register map to write to 1533 * @reg: Register to write to 1534 * @val: Value to be written 1535 * 1536 * A value of zero will be returned on success, a negative errno will 1537 * be returned in error cases. 1538 */ 1539 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1540 { 1541 int ret; 1542 1543 if (reg % map->reg_stride) 1544 return -EINVAL; 1545 1546 map->lock(map->lock_arg); 1547 1548 map->async = true; 1549 1550 ret = _regmap_write(map, reg, val); 1551 1552 map->async = false; 1553 1554 map->unlock(map->lock_arg); 1555 1556 return ret; 1557 } 1558 EXPORT_SYMBOL_GPL(regmap_write_async); 1559 1560 /** 1561 * regmap_raw_write(): Write raw values to one or more registers 1562 * 1563 * @map: Register map to write to 1564 * @reg: Initial register to write to 1565 * @val: Block of data to be written, laid out for direct transmission to the 1566 * device 1567 * @val_len: Length of data pointed to by val. 1568 * 1569 * This function is intended to be used for things like firmware 1570 * download where a large block of data needs to be transferred to the 1571 * device. No formatting will be done on the data provided. 1572 * 1573 * A value of zero will be returned on success, a negative errno will 1574 * be returned in error cases. 1575 */ 1576 int regmap_raw_write(struct regmap *map, unsigned int reg, 1577 const void *val, size_t val_len) 1578 { 1579 int ret; 1580 1581 if (!regmap_can_raw_write(map)) 1582 return -EINVAL; 1583 if (val_len % map->format.val_bytes) 1584 return -EINVAL; 1585 if (map->max_raw_write && map->max_raw_write > val_len) 1586 return -E2BIG; 1587 1588 map->lock(map->lock_arg); 1589 1590 ret = _regmap_raw_write(map, reg, val, val_len); 1591 1592 map->unlock(map->lock_arg); 1593 1594 return ret; 1595 } 1596 EXPORT_SYMBOL_GPL(regmap_raw_write); 1597 1598 /** 1599 * regmap_field_write(): Write a value to a single register field 1600 * 1601 * @field: Register field to write to 1602 * @val: Value to be written 1603 * 1604 * A value of zero will be returned on success, a negative errno will 1605 * be returned in error cases. 1606 */ 1607 int regmap_field_write(struct regmap_field *field, unsigned int val) 1608 { 1609 return regmap_update_bits(field->regmap, field->reg, 1610 field->mask, val << field->shift); 1611 } 1612 EXPORT_SYMBOL_GPL(regmap_field_write); 1613 1614 /** 1615 * regmap_field_update_bits(): Perform a read/modify/write cycle 1616 * on the register field 1617 * 1618 * @field: Register field to write to 1619 * @mask: Bitmask to change 1620 * @val: Value to be written 1621 * 1622 * A value of zero will be returned on success, a negative errno will 1623 * be returned in error cases. 1624 */ 1625 int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val) 1626 { 1627 mask = (mask << field->shift) & field->mask; 1628 1629 return regmap_update_bits(field->regmap, field->reg, 1630 mask, val << field->shift); 1631 } 1632 EXPORT_SYMBOL_GPL(regmap_field_update_bits); 1633 1634 /** 1635 * regmap_fields_write(): Write a value to a single register field with port ID 1636 * 1637 * @field: Register field to write to 1638 * @id: port ID 1639 * @val: Value to be written 1640 * 1641 * A value of zero will be returned on success, a negative errno will 1642 * be returned in error cases. 1643 */ 1644 int regmap_fields_write(struct regmap_field *field, unsigned int id, 1645 unsigned int val) 1646 { 1647 if (id >= field->id_size) 1648 return -EINVAL; 1649 1650 return regmap_update_bits(field->regmap, 1651 field->reg + (field->id_offset * id), 1652 field->mask, val << field->shift); 1653 } 1654 EXPORT_SYMBOL_GPL(regmap_fields_write); 1655 1656 int regmap_fields_force_write(struct regmap_field *field, unsigned int id, 1657 unsigned int val) 1658 { 1659 if (id >= field->id_size) 1660 return -EINVAL; 1661 1662 return regmap_write_bits(field->regmap, 1663 field->reg + (field->id_offset * id), 1664 field->mask, val << field->shift); 1665 } 1666 EXPORT_SYMBOL_GPL(regmap_fields_force_write); 1667 1668 /** 1669 * regmap_fields_update_bits(): Perform a read/modify/write cycle 1670 * on the register field 1671 * 1672 * @field: Register field to write to 1673 * @id: port ID 1674 * @mask: Bitmask to change 1675 * @val: Value to be written 1676 * 1677 * A value of zero will be returned on success, a negative errno will 1678 * be returned in error cases. 1679 */ 1680 int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, 1681 unsigned int mask, unsigned int val) 1682 { 1683 if (id >= field->id_size) 1684 return -EINVAL; 1685 1686 mask = (mask << field->shift) & field->mask; 1687 1688 return regmap_update_bits(field->regmap, 1689 field->reg + (field->id_offset * id), 1690 mask, val << field->shift); 1691 } 1692 EXPORT_SYMBOL_GPL(regmap_fields_update_bits); 1693 1694 /* 1695 * regmap_bulk_write(): Write multiple registers to the device 1696 * 1697 * @map: Register map to write to 1698 * @reg: First register to be write from 1699 * @val: Block of data to be written, in native register size for device 1700 * @val_count: Number of registers to write 1701 * 1702 * This function is intended to be used for writing a large block of 1703 * data to the device either in single transfer or multiple transfer. 1704 * 1705 * A value of zero will be returned on success, a negative errno will 1706 * be returned in error cases. 1707 */ 1708 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1709 size_t val_count) 1710 { 1711 int ret = 0, i; 1712 size_t val_bytes = map->format.val_bytes; 1713 size_t total_size = val_bytes * val_count; 1714 1715 if (map->bus && !map->format.parse_inplace) 1716 return -EINVAL; 1717 if (reg % map->reg_stride) 1718 return -EINVAL; 1719 1720 /* 1721 * Some devices don't support bulk write, for 1722 * them we have a series of single write operations in the first two if 1723 * blocks. 1724 * 1725 * The first if block is used for memory mapped io. It does not allow 1726 * val_bytes of 3 for example. 1727 * The second one is used for busses which do not have this limitation 1728 * and can write arbitrary value lengths. 1729 */ 1730 if (!map->bus) { 1731 map->lock(map->lock_arg); 1732 for (i = 0; i < val_count; i++) { 1733 unsigned int ival; 1734 1735 switch (val_bytes) { 1736 case 1: 1737 ival = *(u8 *)(val + (i * val_bytes)); 1738 break; 1739 case 2: 1740 ival = *(u16 *)(val + (i * val_bytes)); 1741 break; 1742 case 4: 1743 ival = *(u32 *)(val + (i * val_bytes)); 1744 break; 1745 #ifdef CONFIG_64BIT 1746 case 8: 1747 ival = *(u64 *)(val + (i * val_bytes)); 1748 break; 1749 #endif 1750 default: 1751 ret = -EINVAL; 1752 goto out; 1753 } 1754 1755 ret = _regmap_write(map, reg + (i * map->reg_stride), 1756 ival); 1757 if (ret != 0) 1758 goto out; 1759 } 1760 out: 1761 map->unlock(map->lock_arg); 1762 } else if (map->use_single_write || 1763 (map->max_raw_write && map->max_raw_write < total_size)) { 1764 int chunk_stride = map->reg_stride; 1765 size_t chunk_size = val_bytes; 1766 size_t chunk_count = val_count; 1767 1768 if (!map->use_single_write) { 1769 chunk_size = map->max_raw_write; 1770 if (chunk_size % val_bytes) 1771 chunk_size -= chunk_size % val_bytes; 1772 chunk_count = total_size / chunk_size; 1773 chunk_stride *= chunk_size / val_bytes; 1774 } 1775 1776 map->lock(map->lock_arg); 1777 /* Write as many bytes as possible with chunk_size */ 1778 for (i = 0; i < chunk_count; i++) { 1779 ret = _regmap_raw_write(map, 1780 reg + (i * chunk_stride), 1781 val + (i * chunk_size), 1782 chunk_size); 1783 if (ret) 1784 break; 1785 } 1786 1787 /* Write remaining bytes */ 1788 if (!ret && chunk_size * i < total_size) { 1789 ret = _regmap_raw_write(map, reg + (i * chunk_stride), 1790 val + (i * chunk_size), 1791 total_size - i * chunk_size); 1792 } 1793 map->unlock(map->lock_arg); 1794 } else { 1795 void *wval; 1796 1797 if (!val_count) 1798 return -EINVAL; 1799 1800 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 1801 if (!wval) { 1802 dev_err(map->dev, "Error in memory allocation\n"); 1803 return -ENOMEM; 1804 } 1805 for (i = 0; i < val_count * val_bytes; i += val_bytes) 1806 map->format.parse_inplace(wval + i); 1807 1808 map->lock(map->lock_arg); 1809 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1810 map->unlock(map->lock_arg); 1811 1812 kfree(wval); 1813 } 1814 return ret; 1815 } 1816 EXPORT_SYMBOL_GPL(regmap_bulk_write); 1817 1818 /* 1819 * _regmap_raw_multi_reg_write() 1820 * 1821 * the (register,newvalue) pairs in regs have not been formatted, but 1822 * they are all in the same page and have been changed to being page 1823 * relative. The page register has been written if that was necessary. 1824 */ 1825 static int _regmap_raw_multi_reg_write(struct regmap *map, 1826 const struct reg_sequence *regs, 1827 size_t num_regs) 1828 { 1829 int ret; 1830 void *buf; 1831 int i; 1832 u8 *u8; 1833 size_t val_bytes = map->format.val_bytes; 1834 size_t reg_bytes = map->format.reg_bytes; 1835 size_t pad_bytes = map->format.pad_bytes; 1836 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 1837 size_t len = pair_size * num_regs; 1838 1839 if (!len) 1840 return -EINVAL; 1841 1842 buf = kzalloc(len, GFP_KERNEL); 1843 if (!buf) 1844 return -ENOMEM; 1845 1846 /* We have to linearise by hand. */ 1847 1848 u8 = buf; 1849 1850 for (i = 0; i < num_regs; i++) { 1851 unsigned int reg = regs[i].reg; 1852 unsigned int val = regs[i].def; 1853 trace_regmap_hw_write_start(map, reg, 1); 1854 map->format.format_reg(u8, reg, map->reg_shift); 1855 u8 += reg_bytes + pad_bytes; 1856 map->format.format_val(u8, val, 0); 1857 u8 += val_bytes; 1858 } 1859 u8 = buf; 1860 *u8 |= map->write_flag_mask; 1861 1862 ret = map->bus->write(map->bus_context, buf, len); 1863 1864 kfree(buf); 1865 1866 for (i = 0; i < num_regs; i++) { 1867 int reg = regs[i].reg; 1868 trace_regmap_hw_write_done(map, reg, 1); 1869 } 1870 return ret; 1871 } 1872 1873 static unsigned int _regmap_register_page(struct regmap *map, 1874 unsigned int reg, 1875 struct regmap_range_node *range) 1876 { 1877 unsigned int win_page = (reg - range->range_min) / range->window_len; 1878 1879 return win_page; 1880 } 1881 1882 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 1883 struct reg_sequence *regs, 1884 size_t num_regs) 1885 { 1886 int ret; 1887 int i, n; 1888 struct reg_sequence *base; 1889 unsigned int this_page = 0; 1890 unsigned int page_change = 0; 1891 /* 1892 * the set of registers are not neccessarily in order, but 1893 * since the order of write must be preserved this algorithm 1894 * chops the set each time the page changes. This also applies 1895 * if there is a delay required at any point in the sequence. 1896 */ 1897 base = regs; 1898 for (i = 0, n = 0; i < num_regs; i++, n++) { 1899 unsigned int reg = regs[i].reg; 1900 struct regmap_range_node *range; 1901 1902 range = _regmap_range_lookup(map, reg); 1903 if (range) { 1904 unsigned int win_page = _regmap_register_page(map, reg, 1905 range); 1906 1907 if (i == 0) 1908 this_page = win_page; 1909 if (win_page != this_page) { 1910 this_page = win_page; 1911 page_change = 1; 1912 } 1913 } 1914 1915 /* If we have both a page change and a delay make sure to 1916 * write the regs and apply the delay before we change the 1917 * page. 1918 */ 1919 1920 if (page_change || regs[i].delay_us) { 1921 1922 /* For situations where the first write requires 1923 * a delay we need to make sure we don't call 1924 * raw_multi_reg_write with n=0 1925 * This can't occur with page breaks as we 1926 * never write on the first iteration 1927 */ 1928 if (regs[i].delay_us && i == 0) 1929 n = 1; 1930 1931 ret = _regmap_raw_multi_reg_write(map, base, n); 1932 if (ret != 0) 1933 return ret; 1934 1935 if (regs[i].delay_us) 1936 udelay(regs[i].delay_us); 1937 1938 base += n; 1939 n = 0; 1940 1941 if (page_change) { 1942 ret = _regmap_select_page(map, 1943 &base[n].reg, 1944 range, 1); 1945 if (ret != 0) 1946 return ret; 1947 1948 page_change = 0; 1949 } 1950 1951 } 1952 1953 } 1954 if (n > 0) 1955 return _regmap_raw_multi_reg_write(map, base, n); 1956 return 0; 1957 } 1958 1959 static int _regmap_multi_reg_write(struct regmap *map, 1960 const struct reg_sequence *regs, 1961 size_t num_regs) 1962 { 1963 int i; 1964 int ret; 1965 1966 if (!map->can_multi_write) { 1967 for (i = 0; i < num_regs; i++) { 1968 ret = _regmap_write(map, regs[i].reg, regs[i].def); 1969 if (ret != 0) 1970 return ret; 1971 1972 if (regs[i].delay_us) 1973 udelay(regs[i].delay_us); 1974 } 1975 return 0; 1976 } 1977 1978 if (!map->format.parse_inplace) 1979 return -EINVAL; 1980 1981 if (map->writeable_reg) 1982 for (i = 0; i < num_regs; i++) { 1983 int reg = regs[i].reg; 1984 if (!map->writeable_reg(map->dev, reg)) 1985 return -EINVAL; 1986 if (reg % map->reg_stride) 1987 return -EINVAL; 1988 } 1989 1990 if (!map->cache_bypass) { 1991 for (i = 0; i < num_regs; i++) { 1992 unsigned int val = regs[i].def; 1993 unsigned int reg = regs[i].reg; 1994 ret = regcache_write(map, reg, val); 1995 if (ret) { 1996 dev_err(map->dev, 1997 "Error in caching of register: %x ret: %d\n", 1998 reg, ret); 1999 return ret; 2000 } 2001 } 2002 if (map->cache_only) { 2003 map->cache_dirty = true; 2004 return 0; 2005 } 2006 } 2007 2008 WARN_ON(!map->bus); 2009 2010 for (i = 0; i < num_regs; i++) { 2011 unsigned int reg = regs[i].reg; 2012 struct regmap_range_node *range; 2013 2014 /* Coalesce all the writes between a page break or a delay 2015 * in a sequence 2016 */ 2017 range = _regmap_range_lookup(map, reg); 2018 if (range || regs[i].delay_us) { 2019 size_t len = sizeof(struct reg_sequence)*num_regs; 2020 struct reg_sequence *base = kmemdup(regs, len, 2021 GFP_KERNEL); 2022 if (!base) 2023 return -ENOMEM; 2024 ret = _regmap_range_multi_paged_reg_write(map, base, 2025 num_regs); 2026 kfree(base); 2027 2028 return ret; 2029 } 2030 } 2031 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2032 } 2033 2034 /* 2035 * regmap_multi_reg_write(): Write multiple registers to the device 2036 * 2037 * where the set of register,value pairs are supplied in any order, 2038 * possibly not all in a single range. 2039 * 2040 * @map: Register map to write to 2041 * @regs: Array of structures containing register,value to be written 2042 * @num_regs: Number of registers to write 2043 * 2044 * The 'normal' block write mode will send ultimately send data on the 2045 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are 2046 * addressed. However, this alternative block multi write mode will send 2047 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2048 * must of course support the mode. 2049 * 2050 * A value of zero will be returned on success, a negative errno will be 2051 * returned in error cases. 2052 */ 2053 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2054 int num_regs) 2055 { 2056 int ret; 2057 2058 map->lock(map->lock_arg); 2059 2060 ret = _regmap_multi_reg_write(map, regs, num_regs); 2061 2062 map->unlock(map->lock_arg); 2063 2064 return ret; 2065 } 2066 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2067 2068 /* 2069 * regmap_multi_reg_write_bypassed(): Write multiple registers to the 2070 * device but not the cache 2071 * 2072 * where the set of register are supplied in any order 2073 * 2074 * @map: Register map to write to 2075 * @regs: Array of structures containing register,value to be written 2076 * @num_regs: Number of registers to write 2077 * 2078 * This function is intended to be used for writing a large block of data 2079 * atomically to the device in single transfer for those I2C client devices 2080 * that implement this alternative block write mode. 2081 * 2082 * A value of zero will be returned on success, a negative errno will 2083 * be returned in error cases. 2084 */ 2085 int regmap_multi_reg_write_bypassed(struct regmap *map, 2086 const struct reg_sequence *regs, 2087 int num_regs) 2088 { 2089 int ret; 2090 bool bypass; 2091 2092 map->lock(map->lock_arg); 2093 2094 bypass = map->cache_bypass; 2095 map->cache_bypass = true; 2096 2097 ret = _regmap_multi_reg_write(map, regs, num_regs); 2098 2099 map->cache_bypass = bypass; 2100 2101 map->unlock(map->lock_arg); 2102 2103 return ret; 2104 } 2105 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2106 2107 /** 2108 * regmap_raw_write_async(): Write raw values to one or more registers 2109 * asynchronously 2110 * 2111 * @map: Register map to write to 2112 * @reg: Initial register to write to 2113 * @val: Block of data to be written, laid out for direct transmission to the 2114 * device. Must be valid until regmap_async_complete() is called. 2115 * @val_len: Length of data pointed to by val. 2116 * 2117 * This function is intended to be used for things like firmware 2118 * download where a large block of data needs to be transferred to the 2119 * device. No formatting will be done on the data provided. 2120 * 2121 * If supported by the underlying bus the write will be scheduled 2122 * asynchronously, helping maximise I/O speed on higher speed buses 2123 * like SPI. regmap_async_complete() can be called to ensure that all 2124 * asynchrnous writes have been completed. 2125 * 2126 * A value of zero will be returned on success, a negative errno will 2127 * be returned in error cases. 2128 */ 2129 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2130 const void *val, size_t val_len) 2131 { 2132 int ret; 2133 2134 if (val_len % map->format.val_bytes) 2135 return -EINVAL; 2136 if (reg % map->reg_stride) 2137 return -EINVAL; 2138 2139 map->lock(map->lock_arg); 2140 2141 map->async = true; 2142 2143 ret = _regmap_raw_write(map, reg, val, val_len); 2144 2145 map->async = false; 2146 2147 map->unlock(map->lock_arg); 2148 2149 return ret; 2150 } 2151 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2152 2153 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2154 unsigned int val_len) 2155 { 2156 struct regmap_range_node *range; 2157 u8 *u8 = map->work_buf; 2158 int ret; 2159 2160 WARN_ON(!map->bus); 2161 2162 range = _regmap_range_lookup(map, reg); 2163 if (range) { 2164 ret = _regmap_select_page(map, ®, range, 2165 val_len / map->format.val_bytes); 2166 if (ret != 0) 2167 return ret; 2168 } 2169 2170 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2171 2172 /* 2173 * Some buses or devices flag reads by setting the high bits in the 2174 * register address; since it's always the high bits for all 2175 * current formats we can do this here rather than in 2176 * formatting. This may break if we get interesting formats. 2177 */ 2178 u8[0] |= map->read_flag_mask; 2179 2180 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2181 2182 ret = map->bus->read(map->bus_context, map->work_buf, 2183 map->format.reg_bytes + map->format.pad_bytes, 2184 val, val_len); 2185 2186 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2187 2188 return ret; 2189 } 2190 2191 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2192 unsigned int *val) 2193 { 2194 struct regmap *map = context; 2195 2196 return map->bus->reg_read(map->bus_context, reg, val); 2197 } 2198 2199 static int _regmap_bus_read(void *context, unsigned int reg, 2200 unsigned int *val) 2201 { 2202 int ret; 2203 struct regmap *map = context; 2204 2205 if (!map->format.parse_val) 2206 return -EINVAL; 2207 2208 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 2209 if (ret == 0) 2210 *val = map->format.parse_val(map->work_buf); 2211 2212 return ret; 2213 } 2214 2215 static int _regmap_read(struct regmap *map, unsigned int reg, 2216 unsigned int *val) 2217 { 2218 int ret; 2219 void *context = _regmap_map_get_context(map); 2220 2221 if (!map->cache_bypass) { 2222 ret = regcache_read(map, reg, val); 2223 if (ret == 0) 2224 return 0; 2225 } 2226 2227 if (map->cache_only) 2228 return -EBUSY; 2229 2230 if (!regmap_readable(map, reg)) 2231 return -EIO; 2232 2233 ret = map->reg_read(context, reg, val); 2234 if (ret == 0) { 2235 #ifdef LOG_DEVICE 2236 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2237 dev_info(map->dev, "%x => %x\n", reg, *val); 2238 #endif 2239 2240 trace_regmap_reg_read(map, reg, *val); 2241 2242 if (!map->cache_bypass) 2243 regcache_write(map, reg, *val); 2244 } 2245 2246 return ret; 2247 } 2248 2249 /** 2250 * regmap_read(): Read a value from a single register 2251 * 2252 * @map: Register map to read from 2253 * @reg: Register to be read from 2254 * @val: Pointer to store read value 2255 * 2256 * A value of zero will be returned on success, a negative errno will 2257 * be returned in error cases. 2258 */ 2259 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2260 { 2261 int ret; 2262 2263 if (reg % map->reg_stride) 2264 return -EINVAL; 2265 2266 map->lock(map->lock_arg); 2267 2268 ret = _regmap_read(map, reg, val); 2269 2270 map->unlock(map->lock_arg); 2271 2272 return ret; 2273 } 2274 EXPORT_SYMBOL_GPL(regmap_read); 2275 2276 /** 2277 * regmap_raw_read(): Read raw data from the device 2278 * 2279 * @map: Register map to read from 2280 * @reg: First register to be read from 2281 * @val: Pointer to store read value 2282 * @val_len: Size of data to read 2283 * 2284 * A value of zero will be returned on success, a negative errno will 2285 * be returned in error cases. 2286 */ 2287 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2288 size_t val_len) 2289 { 2290 size_t val_bytes = map->format.val_bytes; 2291 size_t val_count = val_len / val_bytes; 2292 unsigned int v; 2293 int ret, i; 2294 2295 if (!map->bus) 2296 return -EINVAL; 2297 if (val_len % map->format.val_bytes) 2298 return -EINVAL; 2299 if (reg % map->reg_stride) 2300 return -EINVAL; 2301 if (val_count == 0) 2302 return -EINVAL; 2303 2304 map->lock(map->lock_arg); 2305 2306 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2307 map->cache_type == REGCACHE_NONE) { 2308 if (!map->bus->read) { 2309 ret = -ENOTSUPP; 2310 goto out; 2311 } 2312 if (map->max_raw_read && map->max_raw_read < val_len) { 2313 ret = -E2BIG; 2314 goto out; 2315 } 2316 2317 /* Physical block read if there's no cache involved */ 2318 ret = _regmap_raw_read(map, reg, val, val_len); 2319 2320 } else { 2321 /* Otherwise go word by word for the cache; should be low 2322 * cost as we expect to hit the cache. 2323 */ 2324 for (i = 0; i < val_count; i++) { 2325 ret = _regmap_read(map, reg + (i * map->reg_stride), 2326 &v); 2327 if (ret != 0) 2328 goto out; 2329 2330 map->format.format_val(val + (i * val_bytes), v, 0); 2331 } 2332 } 2333 2334 out: 2335 map->unlock(map->lock_arg); 2336 2337 return ret; 2338 } 2339 EXPORT_SYMBOL_GPL(regmap_raw_read); 2340 2341 /** 2342 * regmap_field_read(): Read a value to a single register field 2343 * 2344 * @field: Register field to read from 2345 * @val: Pointer to store read value 2346 * 2347 * A value of zero will be returned on success, a negative errno will 2348 * be returned in error cases. 2349 */ 2350 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2351 { 2352 int ret; 2353 unsigned int reg_val; 2354 ret = regmap_read(field->regmap, field->reg, ®_val); 2355 if (ret != 0) 2356 return ret; 2357 2358 reg_val &= field->mask; 2359 reg_val >>= field->shift; 2360 *val = reg_val; 2361 2362 return ret; 2363 } 2364 EXPORT_SYMBOL_GPL(regmap_field_read); 2365 2366 /** 2367 * regmap_fields_read(): Read a value to a single register field with port ID 2368 * 2369 * @field: Register field to read from 2370 * @id: port ID 2371 * @val: Pointer to store read value 2372 * 2373 * A value of zero will be returned on success, a negative errno will 2374 * be returned in error cases. 2375 */ 2376 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2377 unsigned int *val) 2378 { 2379 int ret; 2380 unsigned int reg_val; 2381 2382 if (id >= field->id_size) 2383 return -EINVAL; 2384 2385 ret = regmap_read(field->regmap, 2386 field->reg + (field->id_offset * id), 2387 ®_val); 2388 if (ret != 0) 2389 return ret; 2390 2391 reg_val &= field->mask; 2392 reg_val >>= field->shift; 2393 *val = reg_val; 2394 2395 return ret; 2396 } 2397 EXPORT_SYMBOL_GPL(regmap_fields_read); 2398 2399 /** 2400 * regmap_bulk_read(): Read multiple registers from the device 2401 * 2402 * @map: Register map to read from 2403 * @reg: First register to be read from 2404 * @val: Pointer to store read value, in native register size for device 2405 * @val_count: Number of registers to read 2406 * 2407 * A value of zero will be returned on success, a negative errno will 2408 * be returned in error cases. 2409 */ 2410 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2411 size_t val_count) 2412 { 2413 int ret, i; 2414 size_t val_bytes = map->format.val_bytes; 2415 bool vol = regmap_volatile_range(map, reg, val_count); 2416 2417 if (reg % map->reg_stride) 2418 return -EINVAL; 2419 2420 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2421 /* 2422 * Some devices does not support bulk read, for 2423 * them we have a series of single read operations. 2424 */ 2425 size_t total_size = val_bytes * val_count; 2426 2427 if (!map->use_single_read && 2428 (!map->max_raw_read || map->max_raw_read > total_size)) { 2429 ret = regmap_raw_read(map, reg, val, 2430 val_bytes * val_count); 2431 if (ret != 0) 2432 return ret; 2433 } else { 2434 /* 2435 * Some devices do not support bulk read or do not 2436 * support large bulk reads, for them we have a series 2437 * of read operations. 2438 */ 2439 int chunk_stride = map->reg_stride; 2440 size_t chunk_size = val_bytes; 2441 size_t chunk_count = val_count; 2442 2443 if (!map->use_single_read) { 2444 chunk_size = map->max_raw_read; 2445 if (chunk_size % val_bytes) 2446 chunk_size -= chunk_size % val_bytes; 2447 chunk_count = total_size / chunk_size; 2448 chunk_stride *= chunk_size / val_bytes; 2449 } 2450 2451 /* Read bytes that fit into a multiple of chunk_size */ 2452 for (i = 0; i < chunk_count; i++) { 2453 ret = regmap_raw_read(map, 2454 reg + (i * chunk_stride), 2455 val + (i * chunk_size), 2456 chunk_size); 2457 if (ret != 0) 2458 return ret; 2459 } 2460 2461 /* Read remaining bytes */ 2462 if (chunk_size * i < total_size) { 2463 ret = regmap_raw_read(map, 2464 reg + (i * chunk_stride), 2465 val + (i * chunk_size), 2466 total_size - i * chunk_size); 2467 if (ret != 0) 2468 return ret; 2469 } 2470 } 2471 2472 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2473 map->format.parse_inplace(val + i); 2474 } else { 2475 for (i = 0; i < val_count; i++) { 2476 unsigned int ival; 2477 ret = regmap_read(map, reg + (i * map->reg_stride), 2478 &ival); 2479 if (ret != 0) 2480 return ret; 2481 2482 if (map->format.format_val) { 2483 map->format.format_val(val + (i * val_bytes), ival, 0); 2484 } else { 2485 /* Devices providing read and write 2486 * operations can use the bulk I/O 2487 * functions if they define a val_bytes, 2488 * we assume that the values are native 2489 * endian. 2490 */ 2491 u32 *u32 = val; 2492 u16 *u16 = val; 2493 u8 *u8 = val; 2494 2495 switch (map->format.val_bytes) { 2496 case 4: 2497 u32[i] = ival; 2498 break; 2499 case 2: 2500 u16[i] = ival; 2501 break; 2502 case 1: 2503 u8[i] = ival; 2504 break; 2505 default: 2506 return -EINVAL; 2507 } 2508 } 2509 } 2510 } 2511 2512 return 0; 2513 } 2514 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2515 2516 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2517 unsigned int mask, unsigned int val, 2518 bool *change, bool force_write) 2519 { 2520 int ret; 2521 unsigned int tmp, orig; 2522 2523 if (change) 2524 *change = false; 2525 2526 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2527 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2528 if (ret == 0 && change) 2529 *change = true; 2530 } else { 2531 ret = _regmap_read(map, reg, &orig); 2532 if (ret != 0) 2533 return ret; 2534 2535 tmp = orig & ~mask; 2536 tmp |= val & mask; 2537 2538 if (force_write || (tmp != orig)) { 2539 ret = _regmap_write(map, reg, tmp); 2540 if (ret == 0 && change) 2541 *change = true; 2542 } 2543 } 2544 2545 return ret; 2546 } 2547 2548 /** 2549 * regmap_update_bits: Perform a read/modify/write cycle on the register map 2550 * 2551 * @map: Register map to update 2552 * @reg: Register to update 2553 * @mask: Bitmask to change 2554 * @val: New value for bitmask 2555 * 2556 * Returns zero for success, a negative number on error. 2557 */ 2558 int regmap_update_bits(struct regmap *map, unsigned int reg, 2559 unsigned int mask, unsigned int val) 2560 { 2561 int ret; 2562 2563 map->lock(map->lock_arg); 2564 ret = _regmap_update_bits(map, reg, mask, val, NULL, false); 2565 map->unlock(map->lock_arg); 2566 2567 return ret; 2568 } 2569 EXPORT_SYMBOL_GPL(regmap_update_bits); 2570 2571 /** 2572 * regmap_write_bits: Perform a read/modify/write cycle on the register map 2573 * 2574 * @map: Register map to update 2575 * @reg: Register to update 2576 * @mask: Bitmask to change 2577 * @val: New value for bitmask 2578 * 2579 * Returns zero for success, a negative number on error. 2580 */ 2581 int regmap_write_bits(struct regmap *map, unsigned int reg, 2582 unsigned int mask, unsigned int val) 2583 { 2584 int ret; 2585 2586 map->lock(map->lock_arg); 2587 ret = _regmap_update_bits(map, reg, mask, val, NULL, true); 2588 map->unlock(map->lock_arg); 2589 2590 return ret; 2591 } 2592 EXPORT_SYMBOL_GPL(regmap_write_bits); 2593 2594 /** 2595 * regmap_update_bits_async: Perform a read/modify/write cycle on the register 2596 * map asynchronously 2597 * 2598 * @map: Register map to update 2599 * @reg: Register to update 2600 * @mask: Bitmask to change 2601 * @val: New value for bitmask 2602 * 2603 * With most buses the read must be done synchronously so this is most 2604 * useful for devices with a cache which do not need to interact with 2605 * the hardware to determine the current register value. 2606 * 2607 * Returns zero for success, a negative number on error. 2608 */ 2609 int regmap_update_bits_async(struct regmap *map, unsigned int reg, 2610 unsigned int mask, unsigned int val) 2611 { 2612 int ret; 2613 2614 map->lock(map->lock_arg); 2615 2616 map->async = true; 2617 2618 ret = _regmap_update_bits(map, reg, mask, val, NULL, false); 2619 2620 map->async = false; 2621 2622 map->unlock(map->lock_arg); 2623 2624 return ret; 2625 } 2626 EXPORT_SYMBOL_GPL(regmap_update_bits_async); 2627 2628 /** 2629 * regmap_update_bits_check: Perform a read/modify/write cycle on the 2630 * register map and report if updated 2631 * 2632 * @map: Register map to update 2633 * @reg: Register to update 2634 * @mask: Bitmask to change 2635 * @val: New value for bitmask 2636 * @change: Boolean indicating if a write was done 2637 * 2638 * Returns zero for success, a negative number on error. 2639 */ 2640 int regmap_update_bits_check(struct regmap *map, unsigned int reg, 2641 unsigned int mask, unsigned int val, 2642 bool *change) 2643 { 2644 int ret; 2645 2646 map->lock(map->lock_arg); 2647 ret = _regmap_update_bits(map, reg, mask, val, change, false); 2648 map->unlock(map->lock_arg); 2649 return ret; 2650 } 2651 EXPORT_SYMBOL_GPL(regmap_update_bits_check); 2652 2653 /** 2654 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the 2655 * register map asynchronously and report if 2656 * updated 2657 * 2658 * @map: Register map to update 2659 * @reg: Register to update 2660 * @mask: Bitmask to change 2661 * @val: New value for bitmask 2662 * @change: Boolean indicating if a write was done 2663 * 2664 * With most buses the read must be done synchronously so this is most 2665 * useful for devices with a cache which do not need to interact with 2666 * the hardware to determine the current register value. 2667 * 2668 * Returns zero for success, a negative number on error. 2669 */ 2670 int regmap_update_bits_check_async(struct regmap *map, unsigned int reg, 2671 unsigned int mask, unsigned int val, 2672 bool *change) 2673 { 2674 int ret; 2675 2676 map->lock(map->lock_arg); 2677 2678 map->async = true; 2679 2680 ret = _regmap_update_bits(map, reg, mask, val, change, false); 2681 2682 map->async = false; 2683 2684 map->unlock(map->lock_arg); 2685 2686 return ret; 2687 } 2688 EXPORT_SYMBOL_GPL(regmap_update_bits_check_async); 2689 2690 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2691 { 2692 struct regmap *map = async->map; 2693 bool wake; 2694 2695 trace_regmap_async_io_complete(map); 2696 2697 spin_lock(&map->async_lock); 2698 list_move(&async->list, &map->async_free); 2699 wake = list_empty(&map->async_list); 2700 2701 if (ret != 0) 2702 map->async_ret = ret; 2703 2704 spin_unlock(&map->async_lock); 2705 2706 if (wake) 2707 wake_up(&map->async_waitq); 2708 } 2709 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2710 2711 static int regmap_async_is_done(struct regmap *map) 2712 { 2713 unsigned long flags; 2714 int ret; 2715 2716 spin_lock_irqsave(&map->async_lock, flags); 2717 ret = list_empty(&map->async_list); 2718 spin_unlock_irqrestore(&map->async_lock, flags); 2719 2720 return ret; 2721 } 2722 2723 /** 2724 * regmap_async_complete: Ensure all asynchronous I/O has completed. 2725 * 2726 * @map: Map to operate on. 2727 * 2728 * Blocks until any pending asynchronous I/O has completed. Returns 2729 * an error code for any failed I/O operations. 2730 */ 2731 int regmap_async_complete(struct regmap *map) 2732 { 2733 unsigned long flags; 2734 int ret; 2735 2736 /* Nothing to do with no async support */ 2737 if (!map->bus || !map->bus->async_write) 2738 return 0; 2739 2740 trace_regmap_async_complete_start(map); 2741 2742 wait_event(map->async_waitq, regmap_async_is_done(map)); 2743 2744 spin_lock_irqsave(&map->async_lock, flags); 2745 ret = map->async_ret; 2746 map->async_ret = 0; 2747 spin_unlock_irqrestore(&map->async_lock, flags); 2748 2749 trace_regmap_async_complete_done(map); 2750 2751 return ret; 2752 } 2753 EXPORT_SYMBOL_GPL(regmap_async_complete); 2754 2755 /** 2756 * regmap_register_patch: Register and apply register updates to be applied 2757 * on device initialistion 2758 * 2759 * @map: Register map to apply updates to. 2760 * @regs: Values to update. 2761 * @num_regs: Number of entries in regs. 2762 * 2763 * Register a set of register updates to be applied to the device 2764 * whenever the device registers are synchronised with the cache and 2765 * apply them immediately. Typically this is used to apply 2766 * corrections to be applied to the device defaults on startup, such 2767 * as the updates some vendors provide to undocumented registers. 2768 * 2769 * The caller must ensure that this function cannot be called 2770 * concurrently with either itself or regcache_sync(). 2771 */ 2772 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 2773 int num_regs) 2774 { 2775 struct reg_sequence *p; 2776 int ret; 2777 bool bypass; 2778 2779 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2780 num_regs)) 2781 return 0; 2782 2783 p = krealloc(map->patch, 2784 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 2785 GFP_KERNEL); 2786 if (p) { 2787 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2788 map->patch = p; 2789 map->patch_regs += num_regs; 2790 } else { 2791 return -ENOMEM; 2792 } 2793 2794 map->lock(map->lock_arg); 2795 2796 bypass = map->cache_bypass; 2797 2798 map->cache_bypass = true; 2799 map->async = true; 2800 2801 ret = _regmap_multi_reg_write(map, regs, num_regs); 2802 2803 map->async = false; 2804 map->cache_bypass = bypass; 2805 2806 map->unlock(map->lock_arg); 2807 2808 regmap_async_complete(map); 2809 2810 return ret; 2811 } 2812 EXPORT_SYMBOL_GPL(regmap_register_patch); 2813 2814 /* 2815 * regmap_get_val_bytes(): Report the size of a register value 2816 * 2817 * Report the size of a register value, mainly intended to for use by 2818 * generic infrastructure built on top of regmap. 2819 */ 2820 int regmap_get_val_bytes(struct regmap *map) 2821 { 2822 if (map->format.format_write) 2823 return -EINVAL; 2824 2825 return map->format.val_bytes; 2826 } 2827 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2828 2829 /** 2830 * regmap_get_max_register(): Report the max register value 2831 * 2832 * Report the max register value, mainly intended to for use by 2833 * generic infrastructure built on top of regmap. 2834 */ 2835 int regmap_get_max_register(struct regmap *map) 2836 { 2837 return map->max_register ? map->max_register : -EINVAL; 2838 } 2839 EXPORT_SYMBOL_GPL(regmap_get_max_register); 2840 2841 /** 2842 * regmap_get_reg_stride(): Report the register address stride 2843 * 2844 * Report the register address stride, mainly intended to for use by 2845 * generic infrastructure built on top of regmap. 2846 */ 2847 int regmap_get_reg_stride(struct regmap *map) 2848 { 2849 return map->reg_stride; 2850 } 2851 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 2852 2853 int regmap_parse_val(struct regmap *map, const void *buf, 2854 unsigned int *val) 2855 { 2856 if (!map->format.parse_val) 2857 return -EINVAL; 2858 2859 *val = map->format.parse_val(buf); 2860 2861 return 0; 2862 } 2863 EXPORT_SYMBOL_GPL(regmap_parse_val); 2864 2865 static int __init regmap_initcall(void) 2866 { 2867 regmap_debugfs_initcall(); 2868 2869 return 0; 2870 } 2871 postcore_initcall(regmap_initcall); 2872