1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/rbtree.h> 19 #include <linux/sched.h> 20 21 #define CREATE_TRACE_POINTS 22 #include <trace/events/regmap.h> 23 24 #include "internal.h" 25 26 /* 27 * Sometimes for failures during very early init the trace 28 * infrastructure isn't available early enough to be used. For this 29 * sort of problem defining LOG_DEVICE will add printks for basic 30 * register I/O on a specific device. 31 */ 32 #undef LOG_DEVICE 33 34 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 35 unsigned int mask, unsigned int val, 36 bool *change); 37 38 static int _regmap_bus_reg_read(void *context, unsigned int reg, 39 unsigned int *val); 40 static int _regmap_bus_read(void *context, unsigned int reg, 41 unsigned int *val); 42 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 43 unsigned int val); 44 static int _regmap_bus_reg_write(void *context, unsigned int reg, 45 unsigned int val); 46 static int _regmap_bus_raw_write(void *context, unsigned int reg, 47 unsigned int val); 48 49 bool regmap_reg_in_ranges(unsigned int reg, 50 const struct regmap_range *ranges, 51 unsigned int nranges) 52 { 53 const struct regmap_range *r; 54 int i; 55 56 for (i = 0, r = ranges; i < nranges; i++, r++) 57 if (regmap_reg_in_range(reg, r)) 58 return true; 59 return false; 60 } 61 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 62 63 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 64 const struct regmap_access_table *table) 65 { 66 /* Check "no ranges" first */ 67 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 68 return false; 69 70 /* In case zero "yes ranges" are supplied, any reg is OK */ 71 if (!table->n_yes_ranges) 72 return true; 73 74 return regmap_reg_in_ranges(reg, table->yes_ranges, 75 table->n_yes_ranges); 76 } 77 EXPORT_SYMBOL_GPL(regmap_check_range_table); 78 79 bool regmap_writeable(struct regmap *map, unsigned int reg) 80 { 81 if (map->max_register && reg > map->max_register) 82 return false; 83 84 if (map->writeable_reg) 85 return map->writeable_reg(map->dev, reg); 86 87 if (map->wr_table) 88 return regmap_check_range_table(map, reg, map->wr_table); 89 90 return true; 91 } 92 93 bool regmap_readable(struct regmap *map, unsigned int reg) 94 { 95 if (map->max_register && reg > map->max_register) 96 return false; 97 98 if (map->format.format_write) 99 return false; 100 101 if (map->readable_reg) 102 return map->readable_reg(map->dev, reg); 103 104 if (map->rd_table) 105 return regmap_check_range_table(map, reg, map->rd_table); 106 107 return true; 108 } 109 110 bool regmap_volatile(struct regmap *map, unsigned int reg) 111 { 112 if (!regmap_readable(map, reg)) 113 return false; 114 115 if (map->volatile_reg) 116 return map->volatile_reg(map->dev, reg); 117 118 if (map->volatile_table) 119 return regmap_check_range_table(map, reg, map->volatile_table); 120 121 if (map->cache_ops) 122 return false; 123 else 124 return true; 125 } 126 127 bool regmap_precious(struct regmap *map, unsigned int reg) 128 { 129 if (!regmap_readable(map, reg)) 130 return false; 131 132 if (map->precious_reg) 133 return map->precious_reg(map->dev, reg); 134 135 if (map->precious_table) 136 return regmap_check_range_table(map, reg, map->precious_table); 137 138 return false; 139 } 140 141 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 142 size_t num) 143 { 144 unsigned int i; 145 146 for (i = 0; i < num; i++) 147 if (!regmap_volatile(map, reg + i)) 148 return false; 149 150 return true; 151 } 152 153 static void regmap_format_2_6_write(struct regmap *map, 154 unsigned int reg, unsigned int val) 155 { 156 u8 *out = map->work_buf; 157 158 *out = (reg << 6) | val; 159 } 160 161 static void regmap_format_4_12_write(struct regmap *map, 162 unsigned int reg, unsigned int val) 163 { 164 __be16 *out = map->work_buf; 165 *out = cpu_to_be16((reg << 12) | val); 166 } 167 168 static void regmap_format_7_9_write(struct regmap *map, 169 unsigned int reg, unsigned int val) 170 { 171 __be16 *out = map->work_buf; 172 *out = cpu_to_be16((reg << 9) | val); 173 } 174 175 static void regmap_format_10_14_write(struct regmap *map, 176 unsigned int reg, unsigned int val) 177 { 178 u8 *out = map->work_buf; 179 180 out[2] = val; 181 out[1] = (val >> 8) | (reg << 6); 182 out[0] = reg >> 2; 183 } 184 185 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 186 { 187 u8 *b = buf; 188 189 b[0] = val << shift; 190 } 191 192 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 193 { 194 __be16 *b = buf; 195 196 b[0] = cpu_to_be16(val << shift); 197 } 198 199 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 200 { 201 __le16 *b = buf; 202 203 b[0] = cpu_to_le16(val << shift); 204 } 205 206 static void regmap_format_16_native(void *buf, unsigned int val, 207 unsigned int shift) 208 { 209 *(u16 *)buf = val << shift; 210 } 211 212 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 213 { 214 u8 *b = buf; 215 216 val <<= shift; 217 218 b[0] = val >> 16; 219 b[1] = val >> 8; 220 b[2] = val; 221 } 222 223 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 224 { 225 __be32 *b = buf; 226 227 b[0] = cpu_to_be32(val << shift); 228 } 229 230 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 231 { 232 __le32 *b = buf; 233 234 b[0] = cpu_to_le32(val << shift); 235 } 236 237 static void regmap_format_32_native(void *buf, unsigned int val, 238 unsigned int shift) 239 { 240 *(u32 *)buf = val << shift; 241 } 242 243 static void regmap_parse_inplace_noop(void *buf) 244 { 245 } 246 247 static unsigned int regmap_parse_8(const void *buf) 248 { 249 const u8 *b = buf; 250 251 return b[0]; 252 } 253 254 static unsigned int regmap_parse_16_be(const void *buf) 255 { 256 const __be16 *b = buf; 257 258 return be16_to_cpu(b[0]); 259 } 260 261 static unsigned int regmap_parse_16_le(const void *buf) 262 { 263 const __le16 *b = buf; 264 265 return le16_to_cpu(b[0]); 266 } 267 268 static void regmap_parse_16_be_inplace(void *buf) 269 { 270 __be16 *b = buf; 271 272 b[0] = be16_to_cpu(b[0]); 273 } 274 275 static void regmap_parse_16_le_inplace(void *buf) 276 { 277 __le16 *b = buf; 278 279 b[0] = le16_to_cpu(b[0]); 280 } 281 282 static unsigned int regmap_parse_16_native(const void *buf) 283 { 284 return *(u16 *)buf; 285 } 286 287 static unsigned int regmap_parse_24(const void *buf) 288 { 289 const u8 *b = buf; 290 unsigned int ret = b[2]; 291 ret |= ((unsigned int)b[1]) << 8; 292 ret |= ((unsigned int)b[0]) << 16; 293 294 return ret; 295 } 296 297 static unsigned int regmap_parse_32_be(const void *buf) 298 { 299 const __be32 *b = buf; 300 301 return be32_to_cpu(b[0]); 302 } 303 304 static unsigned int regmap_parse_32_le(const void *buf) 305 { 306 const __le32 *b = buf; 307 308 return le32_to_cpu(b[0]); 309 } 310 311 static void regmap_parse_32_be_inplace(void *buf) 312 { 313 __be32 *b = buf; 314 315 b[0] = be32_to_cpu(b[0]); 316 } 317 318 static void regmap_parse_32_le_inplace(void *buf) 319 { 320 __le32 *b = buf; 321 322 b[0] = le32_to_cpu(b[0]); 323 } 324 325 static unsigned int regmap_parse_32_native(const void *buf) 326 { 327 return *(u32 *)buf; 328 } 329 330 static void regmap_lock_mutex(void *__map) 331 { 332 struct regmap *map = __map; 333 mutex_lock(&map->mutex); 334 } 335 336 static void regmap_unlock_mutex(void *__map) 337 { 338 struct regmap *map = __map; 339 mutex_unlock(&map->mutex); 340 } 341 342 static void regmap_lock_spinlock(void *__map) 343 __acquires(&map->spinlock) 344 { 345 struct regmap *map = __map; 346 unsigned long flags; 347 348 spin_lock_irqsave(&map->spinlock, flags); 349 map->spinlock_flags = flags; 350 } 351 352 static void regmap_unlock_spinlock(void *__map) 353 __releases(&map->spinlock) 354 { 355 struct regmap *map = __map; 356 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 357 } 358 359 static void dev_get_regmap_release(struct device *dev, void *res) 360 { 361 /* 362 * We don't actually have anything to do here; the goal here 363 * is not to manage the regmap but to provide a simple way to 364 * get the regmap back given a struct device. 365 */ 366 } 367 368 static bool _regmap_range_add(struct regmap *map, 369 struct regmap_range_node *data) 370 { 371 struct rb_root *root = &map->range_tree; 372 struct rb_node **new = &(root->rb_node), *parent = NULL; 373 374 while (*new) { 375 struct regmap_range_node *this = 376 container_of(*new, struct regmap_range_node, node); 377 378 parent = *new; 379 if (data->range_max < this->range_min) 380 new = &((*new)->rb_left); 381 else if (data->range_min > this->range_max) 382 new = &((*new)->rb_right); 383 else 384 return false; 385 } 386 387 rb_link_node(&data->node, parent, new); 388 rb_insert_color(&data->node, root); 389 390 return true; 391 } 392 393 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 394 unsigned int reg) 395 { 396 struct rb_node *node = map->range_tree.rb_node; 397 398 while (node) { 399 struct regmap_range_node *this = 400 container_of(node, struct regmap_range_node, node); 401 402 if (reg < this->range_min) 403 node = node->rb_left; 404 else if (reg > this->range_max) 405 node = node->rb_right; 406 else 407 return this; 408 } 409 410 return NULL; 411 } 412 413 static void regmap_range_exit(struct regmap *map) 414 { 415 struct rb_node *next; 416 struct regmap_range_node *range_node; 417 418 next = rb_first(&map->range_tree); 419 while (next) { 420 range_node = rb_entry(next, struct regmap_range_node, node); 421 next = rb_next(&range_node->node); 422 rb_erase(&range_node->node, &map->range_tree); 423 kfree(range_node); 424 } 425 426 kfree(map->selector_work_buf); 427 } 428 429 int regmap_attach_dev(struct device *dev, struct regmap *map, 430 const struct regmap_config *config) 431 { 432 struct regmap **m; 433 434 map->dev = dev; 435 436 regmap_debugfs_init(map, config->name); 437 438 /* Add a devres resource for dev_get_regmap() */ 439 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 440 if (!m) { 441 regmap_debugfs_exit(map); 442 return -ENOMEM; 443 } 444 *m = map; 445 devres_add(dev, m); 446 447 return 0; 448 } 449 EXPORT_SYMBOL_GPL(regmap_attach_dev); 450 451 /** 452 * regmap_init(): Initialise register map 453 * 454 * @dev: Device that will be interacted with 455 * @bus: Bus-specific callbacks to use with device 456 * @bus_context: Data passed to bus-specific callbacks 457 * @config: Configuration for register map 458 * 459 * The return value will be an ERR_PTR() on error or a valid pointer to 460 * a struct regmap. This function should generally not be called 461 * directly, it should be called by bus-specific init functions. 462 */ 463 struct regmap *regmap_init(struct device *dev, 464 const struct regmap_bus *bus, 465 void *bus_context, 466 const struct regmap_config *config) 467 { 468 struct regmap *map; 469 int ret = -EINVAL; 470 enum regmap_endian reg_endian, val_endian; 471 int i, j; 472 473 if (!config) 474 goto err; 475 476 map = kzalloc(sizeof(*map), GFP_KERNEL); 477 if (map == NULL) { 478 ret = -ENOMEM; 479 goto err; 480 } 481 482 if (config->lock && config->unlock) { 483 map->lock = config->lock; 484 map->unlock = config->unlock; 485 map->lock_arg = config->lock_arg; 486 } else { 487 if ((bus && bus->fast_io) || 488 config->fast_io) { 489 spin_lock_init(&map->spinlock); 490 map->lock = regmap_lock_spinlock; 491 map->unlock = regmap_unlock_spinlock; 492 } else { 493 mutex_init(&map->mutex); 494 map->lock = regmap_lock_mutex; 495 map->unlock = regmap_unlock_mutex; 496 } 497 map->lock_arg = map; 498 } 499 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 500 map->format.pad_bytes = config->pad_bits / 8; 501 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 502 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 503 config->val_bits + config->pad_bits, 8); 504 map->reg_shift = config->pad_bits % 8; 505 if (config->reg_stride) 506 map->reg_stride = config->reg_stride; 507 else 508 map->reg_stride = 1; 509 map->use_single_rw = config->use_single_rw; 510 map->can_multi_write = config->can_multi_write; 511 map->dev = dev; 512 map->bus = bus; 513 map->bus_context = bus_context; 514 map->max_register = config->max_register; 515 map->wr_table = config->wr_table; 516 map->rd_table = config->rd_table; 517 map->volatile_table = config->volatile_table; 518 map->precious_table = config->precious_table; 519 map->writeable_reg = config->writeable_reg; 520 map->readable_reg = config->readable_reg; 521 map->volatile_reg = config->volatile_reg; 522 map->precious_reg = config->precious_reg; 523 map->cache_type = config->cache_type; 524 map->name = config->name; 525 526 spin_lock_init(&map->async_lock); 527 INIT_LIST_HEAD(&map->async_list); 528 INIT_LIST_HEAD(&map->async_free); 529 init_waitqueue_head(&map->async_waitq); 530 531 if (config->read_flag_mask || config->write_flag_mask) { 532 map->read_flag_mask = config->read_flag_mask; 533 map->write_flag_mask = config->write_flag_mask; 534 } else if (bus) { 535 map->read_flag_mask = bus->read_flag_mask; 536 } 537 538 if (!bus) { 539 map->reg_read = config->reg_read; 540 map->reg_write = config->reg_write; 541 542 map->defer_caching = false; 543 goto skip_format_initialization; 544 } else if (!bus->read || !bus->write) { 545 map->reg_read = _regmap_bus_reg_read; 546 map->reg_write = _regmap_bus_reg_write; 547 548 map->defer_caching = false; 549 goto skip_format_initialization; 550 } else { 551 map->reg_read = _regmap_bus_read; 552 } 553 554 reg_endian = config->reg_format_endian; 555 if (reg_endian == REGMAP_ENDIAN_DEFAULT) 556 reg_endian = bus->reg_format_endian_default; 557 if (reg_endian == REGMAP_ENDIAN_DEFAULT) 558 reg_endian = REGMAP_ENDIAN_BIG; 559 560 val_endian = config->val_format_endian; 561 if (val_endian == REGMAP_ENDIAN_DEFAULT) 562 val_endian = bus->val_format_endian_default; 563 if (val_endian == REGMAP_ENDIAN_DEFAULT) 564 val_endian = REGMAP_ENDIAN_BIG; 565 566 switch (config->reg_bits + map->reg_shift) { 567 case 2: 568 switch (config->val_bits) { 569 case 6: 570 map->format.format_write = regmap_format_2_6_write; 571 break; 572 default: 573 goto err_map; 574 } 575 break; 576 577 case 4: 578 switch (config->val_bits) { 579 case 12: 580 map->format.format_write = regmap_format_4_12_write; 581 break; 582 default: 583 goto err_map; 584 } 585 break; 586 587 case 7: 588 switch (config->val_bits) { 589 case 9: 590 map->format.format_write = regmap_format_7_9_write; 591 break; 592 default: 593 goto err_map; 594 } 595 break; 596 597 case 10: 598 switch (config->val_bits) { 599 case 14: 600 map->format.format_write = regmap_format_10_14_write; 601 break; 602 default: 603 goto err_map; 604 } 605 break; 606 607 case 8: 608 map->format.format_reg = regmap_format_8; 609 break; 610 611 case 16: 612 switch (reg_endian) { 613 case REGMAP_ENDIAN_BIG: 614 map->format.format_reg = regmap_format_16_be; 615 break; 616 case REGMAP_ENDIAN_NATIVE: 617 map->format.format_reg = regmap_format_16_native; 618 break; 619 default: 620 goto err_map; 621 } 622 break; 623 624 case 24: 625 if (reg_endian != REGMAP_ENDIAN_BIG) 626 goto err_map; 627 map->format.format_reg = regmap_format_24; 628 break; 629 630 case 32: 631 switch (reg_endian) { 632 case REGMAP_ENDIAN_BIG: 633 map->format.format_reg = regmap_format_32_be; 634 break; 635 case REGMAP_ENDIAN_NATIVE: 636 map->format.format_reg = regmap_format_32_native; 637 break; 638 default: 639 goto err_map; 640 } 641 break; 642 643 default: 644 goto err_map; 645 } 646 647 if (val_endian == REGMAP_ENDIAN_NATIVE) 648 map->format.parse_inplace = regmap_parse_inplace_noop; 649 650 switch (config->val_bits) { 651 case 8: 652 map->format.format_val = regmap_format_8; 653 map->format.parse_val = regmap_parse_8; 654 map->format.parse_inplace = regmap_parse_inplace_noop; 655 break; 656 case 16: 657 switch (val_endian) { 658 case REGMAP_ENDIAN_BIG: 659 map->format.format_val = regmap_format_16_be; 660 map->format.parse_val = regmap_parse_16_be; 661 map->format.parse_inplace = regmap_parse_16_be_inplace; 662 break; 663 case REGMAP_ENDIAN_LITTLE: 664 map->format.format_val = regmap_format_16_le; 665 map->format.parse_val = regmap_parse_16_le; 666 map->format.parse_inplace = regmap_parse_16_le_inplace; 667 break; 668 case REGMAP_ENDIAN_NATIVE: 669 map->format.format_val = regmap_format_16_native; 670 map->format.parse_val = regmap_parse_16_native; 671 break; 672 default: 673 goto err_map; 674 } 675 break; 676 case 24: 677 if (val_endian != REGMAP_ENDIAN_BIG) 678 goto err_map; 679 map->format.format_val = regmap_format_24; 680 map->format.parse_val = regmap_parse_24; 681 break; 682 case 32: 683 switch (val_endian) { 684 case REGMAP_ENDIAN_BIG: 685 map->format.format_val = regmap_format_32_be; 686 map->format.parse_val = regmap_parse_32_be; 687 map->format.parse_inplace = regmap_parse_32_be_inplace; 688 break; 689 case REGMAP_ENDIAN_LITTLE: 690 map->format.format_val = regmap_format_32_le; 691 map->format.parse_val = regmap_parse_32_le; 692 map->format.parse_inplace = regmap_parse_32_le_inplace; 693 break; 694 case REGMAP_ENDIAN_NATIVE: 695 map->format.format_val = regmap_format_32_native; 696 map->format.parse_val = regmap_parse_32_native; 697 break; 698 default: 699 goto err_map; 700 } 701 break; 702 } 703 704 if (map->format.format_write) { 705 if ((reg_endian != REGMAP_ENDIAN_BIG) || 706 (val_endian != REGMAP_ENDIAN_BIG)) 707 goto err_map; 708 map->use_single_rw = true; 709 } 710 711 if (!map->format.format_write && 712 !(map->format.format_reg && map->format.format_val)) 713 goto err_map; 714 715 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 716 if (map->work_buf == NULL) { 717 ret = -ENOMEM; 718 goto err_map; 719 } 720 721 if (map->format.format_write) { 722 map->defer_caching = false; 723 map->reg_write = _regmap_bus_formatted_write; 724 } else if (map->format.format_val) { 725 map->defer_caching = true; 726 map->reg_write = _regmap_bus_raw_write; 727 } 728 729 skip_format_initialization: 730 731 map->range_tree = RB_ROOT; 732 for (i = 0; i < config->num_ranges; i++) { 733 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 734 struct regmap_range_node *new; 735 736 /* Sanity check */ 737 if (range_cfg->range_max < range_cfg->range_min) { 738 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 739 range_cfg->range_max, range_cfg->range_min); 740 goto err_range; 741 } 742 743 if (range_cfg->range_max > map->max_register) { 744 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 745 range_cfg->range_max, map->max_register); 746 goto err_range; 747 } 748 749 if (range_cfg->selector_reg > map->max_register) { 750 dev_err(map->dev, 751 "Invalid range %d: selector out of map\n", i); 752 goto err_range; 753 } 754 755 if (range_cfg->window_len == 0) { 756 dev_err(map->dev, "Invalid range %d: window_len 0\n", 757 i); 758 goto err_range; 759 } 760 761 /* Make sure, that this register range has no selector 762 or data window within its boundary */ 763 for (j = 0; j < config->num_ranges; j++) { 764 unsigned sel_reg = config->ranges[j].selector_reg; 765 unsigned win_min = config->ranges[j].window_start; 766 unsigned win_max = win_min + 767 config->ranges[j].window_len - 1; 768 769 /* Allow data window inside its own virtual range */ 770 if (j == i) 771 continue; 772 773 if (range_cfg->range_min <= sel_reg && 774 sel_reg <= range_cfg->range_max) { 775 dev_err(map->dev, 776 "Range %d: selector for %d in window\n", 777 i, j); 778 goto err_range; 779 } 780 781 if (!(win_max < range_cfg->range_min || 782 win_min > range_cfg->range_max)) { 783 dev_err(map->dev, 784 "Range %d: window for %d in window\n", 785 i, j); 786 goto err_range; 787 } 788 } 789 790 new = kzalloc(sizeof(*new), GFP_KERNEL); 791 if (new == NULL) { 792 ret = -ENOMEM; 793 goto err_range; 794 } 795 796 new->map = map; 797 new->name = range_cfg->name; 798 new->range_min = range_cfg->range_min; 799 new->range_max = range_cfg->range_max; 800 new->selector_reg = range_cfg->selector_reg; 801 new->selector_mask = range_cfg->selector_mask; 802 new->selector_shift = range_cfg->selector_shift; 803 new->window_start = range_cfg->window_start; 804 new->window_len = range_cfg->window_len; 805 806 if (!_regmap_range_add(map, new)) { 807 dev_err(map->dev, "Failed to add range %d\n", i); 808 kfree(new); 809 goto err_range; 810 } 811 812 if (map->selector_work_buf == NULL) { 813 map->selector_work_buf = 814 kzalloc(map->format.buf_size, GFP_KERNEL); 815 if (map->selector_work_buf == NULL) { 816 ret = -ENOMEM; 817 goto err_range; 818 } 819 } 820 } 821 822 ret = regcache_init(map, config); 823 if (ret != 0) 824 goto err_range; 825 826 if (dev) { 827 ret = regmap_attach_dev(dev, map, config); 828 if (ret != 0) 829 goto err_regcache; 830 } 831 832 return map; 833 834 err_regcache: 835 regcache_exit(map); 836 err_range: 837 regmap_range_exit(map); 838 kfree(map->work_buf); 839 err_map: 840 kfree(map); 841 err: 842 return ERR_PTR(ret); 843 } 844 EXPORT_SYMBOL_GPL(regmap_init); 845 846 static void devm_regmap_release(struct device *dev, void *res) 847 { 848 regmap_exit(*(struct regmap **)res); 849 } 850 851 /** 852 * devm_regmap_init(): Initialise managed register map 853 * 854 * @dev: Device that will be interacted with 855 * @bus: Bus-specific callbacks to use with device 856 * @bus_context: Data passed to bus-specific callbacks 857 * @config: Configuration for register map 858 * 859 * The return value will be an ERR_PTR() on error or a valid pointer 860 * to a struct regmap. This function should generally not be called 861 * directly, it should be called by bus-specific init functions. The 862 * map will be automatically freed by the device management code. 863 */ 864 struct regmap *devm_regmap_init(struct device *dev, 865 const struct regmap_bus *bus, 866 void *bus_context, 867 const struct regmap_config *config) 868 { 869 struct regmap **ptr, *regmap; 870 871 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 872 if (!ptr) 873 return ERR_PTR(-ENOMEM); 874 875 regmap = regmap_init(dev, bus, bus_context, config); 876 if (!IS_ERR(regmap)) { 877 *ptr = regmap; 878 devres_add(dev, ptr); 879 } else { 880 devres_free(ptr); 881 } 882 883 return regmap; 884 } 885 EXPORT_SYMBOL_GPL(devm_regmap_init); 886 887 static void regmap_field_init(struct regmap_field *rm_field, 888 struct regmap *regmap, struct reg_field reg_field) 889 { 890 int field_bits = reg_field.msb - reg_field.lsb + 1; 891 rm_field->regmap = regmap; 892 rm_field->reg = reg_field.reg; 893 rm_field->shift = reg_field.lsb; 894 rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb); 895 rm_field->id_size = reg_field.id_size; 896 rm_field->id_offset = reg_field.id_offset; 897 } 898 899 /** 900 * devm_regmap_field_alloc(): Allocate and initialise a register field 901 * in a register map. 902 * 903 * @dev: Device that will be interacted with 904 * @regmap: regmap bank in which this register field is located. 905 * @reg_field: Register field with in the bank. 906 * 907 * The return value will be an ERR_PTR() on error or a valid pointer 908 * to a struct regmap_field. The regmap_field will be automatically freed 909 * by the device management code. 910 */ 911 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 912 struct regmap *regmap, struct reg_field reg_field) 913 { 914 struct regmap_field *rm_field = devm_kzalloc(dev, 915 sizeof(*rm_field), GFP_KERNEL); 916 if (!rm_field) 917 return ERR_PTR(-ENOMEM); 918 919 regmap_field_init(rm_field, regmap, reg_field); 920 921 return rm_field; 922 923 } 924 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 925 926 /** 927 * devm_regmap_field_free(): Free register field allocated using 928 * devm_regmap_field_alloc. Usally drivers need not call this function, 929 * as the memory allocated via devm will be freed as per device-driver 930 * life-cyle. 931 * 932 * @dev: Device that will be interacted with 933 * @field: regmap field which should be freed. 934 */ 935 void devm_regmap_field_free(struct device *dev, 936 struct regmap_field *field) 937 { 938 devm_kfree(dev, field); 939 } 940 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 941 942 /** 943 * regmap_field_alloc(): Allocate and initialise a register field 944 * in a register map. 945 * 946 * @regmap: regmap bank in which this register field is located. 947 * @reg_field: Register field with in the bank. 948 * 949 * The return value will be an ERR_PTR() on error or a valid pointer 950 * to a struct regmap_field. The regmap_field should be freed by the 951 * user once its finished working with it using regmap_field_free(). 952 */ 953 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 954 struct reg_field reg_field) 955 { 956 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 957 958 if (!rm_field) 959 return ERR_PTR(-ENOMEM); 960 961 regmap_field_init(rm_field, regmap, reg_field); 962 963 return rm_field; 964 } 965 EXPORT_SYMBOL_GPL(regmap_field_alloc); 966 967 /** 968 * regmap_field_free(): Free register field allocated using regmap_field_alloc 969 * 970 * @field: regmap field which should be freed. 971 */ 972 void regmap_field_free(struct regmap_field *field) 973 { 974 kfree(field); 975 } 976 EXPORT_SYMBOL_GPL(regmap_field_free); 977 978 /** 979 * regmap_reinit_cache(): Reinitialise the current register cache 980 * 981 * @map: Register map to operate on. 982 * @config: New configuration. Only the cache data will be used. 983 * 984 * Discard any existing register cache for the map and initialize a 985 * new cache. This can be used to restore the cache to defaults or to 986 * update the cache configuration to reflect runtime discovery of the 987 * hardware. 988 * 989 * No explicit locking is done here, the user needs to ensure that 990 * this function will not race with other calls to regmap. 991 */ 992 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 993 { 994 regcache_exit(map); 995 regmap_debugfs_exit(map); 996 997 map->max_register = config->max_register; 998 map->writeable_reg = config->writeable_reg; 999 map->readable_reg = config->readable_reg; 1000 map->volatile_reg = config->volatile_reg; 1001 map->precious_reg = config->precious_reg; 1002 map->cache_type = config->cache_type; 1003 1004 regmap_debugfs_init(map, config->name); 1005 1006 map->cache_bypass = false; 1007 map->cache_only = false; 1008 1009 return regcache_init(map, config); 1010 } 1011 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1012 1013 /** 1014 * regmap_exit(): Free a previously allocated register map 1015 */ 1016 void regmap_exit(struct regmap *map) 1017 { 1018 struct regmap_async *async; 1019 1020 regcache_exit(map); 1021 regmap_debugfs_exit(map); 1022 regmap_range_exit(map); 1023 if (map->bus && map->bus->free_context) 1024 map->bus->free_context(map->bus_context); 1025 kfree(map->work_buf); 1026 while (!list_empty(&map->async_free)) { 1027 async = list_first_entry_or_null(&map->async_free, 1028 struct regmap_async, 1029 list); 1030 list_del(&async->list); 1031 kfree(async->work_buf); 1032 kfree(async); 1033 } 1034 kfree(map); 1035 } 1036 EXPORT_SYMBOL_GPL(regmap_exit); 1037 1038 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1039 { 1040 struct regmap **r = res; 1041 if (!r || !*r) { 1042 WARN_ON(!r || !*r); 1043 return 0; 1044 } 1045 1046 /* If the user didn't specify a name match any */ 1047 if (data) 1048 return (*r)->name == data; 1049 else 1050 return 1; 1051 } 1052 1053 /** 1054 * dev_get_regmap(): Obtain the regmap (if any) for a device 1055 * 1056 * @dev: Device to retrieve the map for 1057 * @name: Optional name for the register map, usually NULL. 1058 * 1059 * Returns the regmap for the device if one is present, or NULL. If 1060 * name is specified then it must match the name specified when 1061 * registering the device, if it is NULL then the first regmap found 1062 * will be used. Devices with multiple register maps are very rare, 1063 * generic code should normally not need to specify a name. 1064 */ 1065 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1066 { 1067 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1068 dev_get_regmap_match, (void *)name); 1069 1070 if (!r) 1071 return NULL; 1072 return *r; 1073 } 1074 EXPORT_SYMBOL_GPL(dev_get_regmap); 1075 1076 /** 1077 * regmap_get_device(): Obtain the device from a regmap 1078 * 1079 * @map: Register map to operate on. 1080 * 1081 * Returns the underlying device that the regmap has been created for. 1082 */ 1083 struct device *regmap_get_device(struct regmap *map) 1084 { 1085 return map->dev; 1086 } 1087 EXPORT_SYMBOL_GPL(regmap_get_device); 1088 1089 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1090 struct regmap_range_node *range, 1091 unsigned int val_num) 1092 { 1093 void *orig_work_buf; 1094 unsigned int win_offset; 1095 unsigned int win_page; 1096 bool page_chg; 1097 int ret; 1098 1099 win_offset = (*reg - range->range_min) % range->window_len; 1100 win_page = (*reg - range->range_min) / range->window_len; 1101 1102 if (val_num > 1) { 1103 /* Bulk write shouldn't cross range boundary */ 1104 if (*reg + val_num - 1 > range->range_max) 1105 return -EINVAL; 1106 1107 /* ... or single page boundary */ 1108 if (val_num > range->window_len - win_offset) 1109 return -EINVAL; 1110 } 1111 1112 /* It is possible to have selector register inside data window. 1113 In that case, selector register is located on every page and 1114 it needs no page switching, when accessed alone. */ 1115 if (val_num > 1 || 1116 range->window_start + win_offset != range->selector_reg) { 1117 /* Use separate work_buf during page switching */ 1118 orig_work_buf = map->work_buf; 1119 map->work_buf = map->selector_work_buf; 1120 1121 ret = _regmap_update_bits(map, range->selector_reg, 1122 range->selector_mask, 1123 win_page << range->selector_shift, 1124 &page_chg); 1125 1126 map->work_buf = orig_work_buf; 1127 1128 if (ret != 0) 1129 return ret; 1130 } 1131 1132 *reg = range->window_start + win_offset; 1133 1134 return 0; 1135 } 1136 1137 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1138 const void *val, size_t val_len) 1139 { 1140 struct regmap_range_node *range; 1141 unsigned long flags; 1142 u8 *u8 = map->work_buf; 1143 void *work_val = map->work_buf + map->format.reg_bytes + 1144 map->format.pad_bytes; 1145 void *buf; 1146 int ret = -ENOTSUPP; 1147 size_t len; 1148 int i; 1149 1150 WARN_ON(!map->bus); 1151 1152 /* Check for unwritable registers before we start */ 1153 if (map->writeable_reg) 1154 for (i = 0; i < val_len / map->format.val_bytes; i++) 1155 if (!map->writeable_reg(map->dev, 1156 reg + (i * map->reg_stride))) 1157 return -EINVAL; 1158 1159 if (!map->cache_bypass && map->format.parse_val) { 1160 unsigned int ival; 1161 int val_bytes = map->format.val_bytes; 1162 for (i = 0; i < val_len / val_bytes; i++) { 1163 ival = map->format.parse_val(val + (i * val_bytes)); 1164 ret = regcache_write(map, reg + (i * map->reg_stride), 1165 ival); 1166 if (ret) { 1167 dev_err(map->dev, 1168 "Error in caching of register: %x ret: %d\n", 1169 reg + i, ret); 1170 return ret; 1171 } 1172 } 1173 if (map->cache_only) { 1174 map->cache_dirty = true; 1175 return 0; 1176 } 1177 } 1178 1179 range = _regmap_range_lookup(map, reg); 1180 if (range) { 1181 int val_num = val_len / map->format.val_bytes; 1182 int win_offset = (reg - range->range_min) % range->window_len; 1183 int win_residue = range->window_len - win_offset; 1184 1185 /* If the write goes beyond the end of the window split it */ 1186 while (val_num > win_residue) { 1187 dev_dbg(map->dev, "Writing window %d/%zu\n", 1188 win_residue, val_len / map->format.val_bytes); 1189 ret = _regmap_raw_write(map, reg, val, win_residue * 1190 map->format.val_bytes); 1191 if (ret != 0) 1192 return ret; 1193 1194 reg += win_residue; 1195 val_num -= win_residue; 1196 val += win_residue * map->format.val_bytes; 1197 val_len -= win_residue * map->format.val_bytes; 1198 1199 win_offset = (reg - range->range_min) % 1200 range->window_len; 1201 win_residue = range->window_len - win_offset; 1202 } 1203 1204 ret = _regmap_select_page(map, ®, range, val_num); 1205 if (ret != 0) 1206 return ret; 1207 } 1208 1209 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1210 1211 u8[0] |= map->write_flag_mask; 1212 1213 /* 1214 * Essentially all I/O mechanisms will be faster with a single 1215 * buffer to write. Since register syncs often generate raw 1216 * writes of single registers optimise that case. 1217 */ 1218 if (val != work_val && val_len == map->format.val_bytes) { 1219 memcpy(work_val, val, map->format.val_bytes); 1220 val = work_val; 1221 } 1222 1223 if (map->async && map->bus->async_write) { 1224 struct regmap_async *async; 1225 1226 trace_regmap_async_write_start(map->dev, reg, val_len); 1227 1228 spin_lock_irqsave(&map->async_lock, flags); 1229 async = list_first_entry_or_null(&map->async_free, 1230 struct regmap_async, 1231 list); 1232 if (async) 1233 list_del(&async->list); 1234 spin_unlock_irqrestore(&map->async_lock, flags); 1235 1236 if (!async) { 1237 async = map->bus->async_alloc(); 1238 if (!async) 1239 return -ENOMEM; 1240 1241 async->work_buf = kzalloc(map->format.buf_size, 1242 GFP_KERNEL | GFP_DMA); 1243 if (!async->work_buf) { 1244 kfree(async); 1245 return -ENOMEM; 1246 } 1247 } 1248 1249 async->map = map; 1250 1251 /* If the caller supplied the value we can use it safely. */ 1252 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1253 map->format.reg_bytes + map->format.val_bytes); 1254 1255 spin_lock_irqsave(&map->async_lock, flags); 1256 list_add_tail(&async->list, &map->async_list); 1257 spin_unlock_irqrestore(&map->async_lock, flags); 1258 1259 if (val != work_val) 1260 ret = map->bus->async_write(map->bus_context, 1261 async->work_buf, 1262 map->format.reg_bytes + 1263 map->format.pad_bytes, 1264 val, val_len, async); 1265 else 1266 ret = map->bus->async_write(map->bus_context, 1267 async->work_buf, 1268 map->format.reg_bytes + 1269 map->format.pad_bytes + 1270 val_len, NULL, 0, async); 1271 1272 if (ret != 0) { 1273 dev_err(map->dev, "Failed to schedule write: %d\n", 1274 ret); 1275 1276 spin_lock_irqsave(&map->async_lock, flags); 1277 list_move(&async->list, &map->async_free); 1278 spin_unlock_irqrestore(&map->async_lock, flags); 1279 } 1280 1281 return ret; 1282 } 1283 1284 trace_regmap_hw_write_start(map->dev, reg, 1285 val_len / map->format.val_bytes); 1286 1287 /* If we're doing a single register write we can probably just 1288 * send the work_buf directly, otherwise try to do a gather 1289 * write. 1290 */ 1291 if (val == work_val) 1292 ret = map->bus->write(map->bus_context, map->work_buf, 1293 map->format.reg_bytes + 1294 map->format.pad_bytes + 1295 val_len); 1296 else if (map->bus->gather_write) 1297 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1298 map->format.reg_bytes + 1299 map->format.pad_bytes, 1300 val, val_len); 1301 1302 /* If that didn't work fall back on linearising by hand. */ 1303 if (ret == -ENOTSUPP) { 1304 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1305 buf = kzalloc(len, GFP_KERNEL); 1306 if (!buf) 1307 return -ENOMEM; 1308 1309 memcpy(buf, map->work_buf, map->format.reg_bytes); 1310 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1311 val, val_len); 1312 ret = map->bus->write(map->bus_context, buf, len); 1313 1314 kfree(buf); 1315 } 1316 1317 trace_regmap_hw_write_done(map->dev, reg, 1318 val_len / map->format.val_bytes); 1319 1320 return ret; 1321 } 1322 1323 /** 1324 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1325 * 1326 * @map: Map to check. 1327 */ 1328 bool regmap_can_raw_write(struct regmap *map) 1329 { 1330 return map->bus && map->format.format_val && map->format.format_reg; 1331 } 1332 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1333 1334 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1335 unsigned int val) 1336 { 1337 int ret; 1338 struct regmap_range_node *range; 1339 struct regmap *map = context; 1340 1341 WARN_ON(!map->bus || !map->format.format_write); 1342 1343 range = _regmap_range_lookup(map, reg); 1344 if (range) { 1345 ret = _regmap_select_page(map, ®, range, 1); 1346 if (ret != 0) 1347 return ret; 1348 } 1349 1350 map->format.format_write(map, reg, val); 1351 1352 trace_regmap_hw_write_start(map->dev, reg, 1); 1353 1354 ret = map->bus->write(map->bus_context, map->work_buf, 1355 map->format.buf_size); 1356 1357 trace_regmap_hw_write_done(map->dev, reg, 1); 1358 1359 return ret; 1360 } 1361 1362 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1363 unsigned int val) 1364 { 1365 struct regmap *map = context; 1366 1367 return map->bus->reg_write(map->bus_context, reg, val); 1368 } 1369 1370 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1371 unsigned int val) 1372 { 1373 struct regmap *map = context; 1374 1375 WARN_ON(!map->bus || !map->format.format_val); 1376 1377 map->format.format_val(map->work_buf + map->format.reg_bytes 1378 + map->format.pad_bytes, val, 0); 1379 return _regmap_raw_write(map, reg, 1380 map->work_buf + 1381 map->format.reg_bytes + 1382 map->format.pad_bytes, 1383 map->format.val_bytes); 1384 } 1385 1386 static inline void *_regmap_map_get_context(struct regmap *map) 1387 { 1388 return (map->bus) ? map : map->bus_context; 1389 } 1390 1391 int _regmap_write(struct regmap *map, unsigned int reg, 1392 unsigned int val) 1393 { 1394 int ret; 1395 void *context = _regmap_map_get_context(map); 1396 1397 if (!regmap_writeable(map, reg)) 1398 return -EIO; 1399 1400 if (!map->cache_bypass && !map->defer_caching) { 1401 ret = regcache_write(map, reg, val); 1402 if (ret != 0) 1403 return ret; 1404 if (map->cache_only) { 1405 map->cache_dirty = true; 1406 return 0; 1407 } 1408 } 1409 1410 #ifdef LOG_DEVICE 1411 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1412 dev_info(map->dev, "%x <= %x\n", reg, val); 1413 #endif 1414 1415 trace_regmap_reg_write(map->dev, reg, val); 1416 1417 return map->reg_write(context, reg, val); 1418 } 1419 1420 /** 1421 * regmap_write(): Write a value to a single register 1422 * 1423 * @map: Register map to write to 1424 * @reg: Register to write to 1425 * @val: Value to be written 1426 * 1427 * A value of zero will be returned on success, a negative errno will 1428 * be returned in error cases. 1429 */ 1430 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1431 { 1432 int ret; 1433 1434 if (reg % map->reg_stride) 1435 return -EINVAL; 1436 1437 map->lock(map->lock_arg); 1438 1439 ret = _regmap_write(map, reg, val); 1440 1441 map->unlock(map->lock_arg); 1442 1443 return ret; 1444 } 1445 EXPORT_SYMBOL_GPL(regmap_write); 1446 1447 /** 1448 * regmap_write_async(): Write a value to a single register asynchronously 1449 * 1450 * @map: Register map to write to 1451 * @reg: Register to write to 1452 * @val: Value to be written 1453 * 1454 * A value of zero will be returned on success, a negative errno will 1455 * be returned in error cases. 1456 */ 1457 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1458 { 1459 int ret; 1460 1461 if (reg % map->reg_stride) 1462 return -EINVAL; 1463 1464 map->lock(map->lock_arg); 1465 1466 map->async = true; 1467 1468 ret = _regmap_write(map, reg, val); 1469 1470 map->async = false; 1471 1472 map->unlock(map->lock_arg); 1473 1474 return ret; 1475 } 1476 EXPORT_SYMBOL_GPL(regmap_write_async); 1477 1478 /** 1479 * regmap_raw_write(): Write raw values to one or more registers 1480 * 1481 * @map: Register map to write to 1482 * @reg: Initial register to write to 1483 * @val: Block of data to be written, laid out for direct transmission to the 1484 * device 1485 * @val_len: Length of data pointed to by val. 1486 * 1487 * This function is intended to be used for things like firmware 1488 * download where a large block of data needs to be transferred to the 1489 * device. No formatting will be done on the data provided. 1490 * 1491 * A value of zero will be returned on success, a negative errno will 1492 * be returned in error cases. 1493 */ 1494 int regmap_raw_write(struct regmap *map, unsigned int reg, 1495 const void *val, size_t val_len) 1496 { 1497 int ret; 1498 1499 if (!regmap_can_raw_write(map)) 1500 return -EINVAL; 1501 if (val_len % map->format.val_bytes) 1502 return -EINVAL; 1503 1504 map->lock(map->lock_arg); 1505 1506 ret = _regmap_raw_write(map, reg, val, val_len); 1507 1508 map->unlock(map->lock_arg); 1509 1510 return ret; 1511 } 1512 EXPORT_SYMBOL_GPL(regmap_raw_write); 1513 1514 /** 1515 * regmap_field_write(): Write a value to a single register field 1516 * 1517 * @field: Register field to write to 1518 * @val: Value to be written 1519 * 1520 * A value of zero will be returned on success, a negative errno will 1521 * be returned in error cases. 1522 */ 1523 int regmap_field_write(struct regmap_field *field, unsigned int val) 1524 { 1525 return regmap_update_bits(field->regmap, field->reg, 1526 field->mask, val << field->shift); 1527 } 1528 EXPORT_SYMBOL_GPL(regmap_field_write); 1529 1530 /** 1531 * regmap_field_update_bits(): Perform a read/modify/write cycle 1532 * on the register field 1533 * 1534 * @field: Register field to write to 1535 * @mask: Bitmask to change 1536 * @val: Value to be written 1537 * 1538 * A value of zero will be returned on success, a negative errno will 1539 * be returned in error cases. 1540 */ 1541 int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val) 1542 { 1543 mask = (mask << field->shift) & field->mask; 1544 1545 return regmap_update_bits(field->regmap, field->reg, 1546 mask, val << field->shift); 1547 } 1548 EXPORT_SYMBOL_GPL(regmap_field_update_bits); 1549 1550 /** 1551 * regmap_fields_write(): Write a value to a single register field with port ID 1552 * 1553 * @field: Register field to write to 1554 * @id: port ID 1555 * @val: Value to be written 1556 * 1557 * A value of zero will be returned on success, a negative errno will 1558 * be returned in error cases. 1559 */ 1560 int regmap_fields_write(struct regmap_field *field, unsigned int id, 1561 unsigned int val) 1562 { 1563 if (id >= field->id_size) 1564 return -EINVAL; 1565 1566 return regmap_update_bits(field->regmap, 1567 field->reg + (field->id_offset * id), 1568 field->mask, val << field->shift); 1569 } 1570 EXPORT_SYMBOL_GPL(regmap_fields_write); 1571 1572 /** 1573 * regmap_fields_update_bits(): Perform a read/modify/write cycle 1574 * on the register field 1575 * 1576 * @field: Register field to write to 1577 * @id: port ID 1578 * @mask: Bitmask to change 1579 * @val: Value to be written 1580 * 1581 * A value of zero will be returned on success, a negative errno will 1582 * be returned in error cases. 1583 */ 1584 int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, 1585 unsigned int mask, unsigned int val) 1586 { 1587 if (id >= field->id_size) 1588 return -EINVAL; 1589 1590 mask = (mask << field->shift) & field->mask; 1591 1592 return regmap_update_bits(field->regmap, 1593 field->reg + (field->id_offset * id), 1594 mask, val << field->shift); 1595 } 1596 EXPORT_SYMBOL_GPL(regmap_fields_update_bits); 1597 1598 /* 1599 * regmap_bulk_write(): Write multiple registers to the device 1600 * 1601 * @map: Register map to write to 1602 * @reg: First register to be write from 1603 * @val: Block of data to be written, in native register size for device 1604 * @val_count: Number of registers to write 1605 * 1606 * This function is intended to be used for writing a large block of 1607 * data to the device either in single transfer or multiple transfer. 1608 * 1609 * A value of zero will be returned on success, a negative errno will 1610 * be returned in error cases. 1611 */ 1612 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1613 size_t val_count) 1614 { 1615 int ret = 0, i; 1616 size_t val_bytes = map->format.val_bytes; 1617 1618 if (map->bus && !map->format.parse_inplace) 1619 return -EINVAL; 1620 if (reg % map->reg_stride) 1621 return -EINVAL; 1622 1623 /* 1624 * Some devices don't support bulk write, for 1625 * them we have a series of single write operations. 1626 */ 1627 if (!map->bus || map->use_single_rw) { 1628 map->lock(map->lock_arg); 1629 for (i = 0; i < val_count; i++) { 1630 unsigned int ival; 1631 1632 switch (val_bytes) { 1633 case 1: 1634 ival = *(u8 *)(val + (i * val_bytes)); 1635 break; 1636 case 2: 1637 ival = *(u16 *)(val + (i * val_bytes)); 1638 break; 1639 case 4: 1640 ival = *(u32 *)(val + (i * val_bytes)); 1641 break; 1642 #ifdef CONFIG_64BIT 1643 case 8: 1644 ival = *(u64 *)(val + (i * val_bytes)); 1645 break; 1646 #endif 1647 default: 1648 ret = -EINVAL; 1649 goto out; 1650 } 1651 1652 ret = _regmap_write(map, reg + (i * map->reg_stride), 1653 ival); 1654 if (ret != 0) 1655 goto out; 1656 } 1657 out: 1658 map->unlock(map->lock_arg); 1659 } else { 1660 void *wval; 1661 1662 wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); 1663 if (!wval) { 1664 dev_err(map->dev, "Error in memory allocation\n"); 1665 return -ENOMEM; 1666 } 1667 for (i = 0; i < val_count * val_bytes; i += val_bytes) 1668 map->format.parse_inplace(wval + i); 1669 1670 map->lock(map->lock_arg); 1671 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1672 map->unlock(map->lock_arg); 1673 1674 kfree(wval); 1675 } 1676 return ret; 1677 } 1678 EXPORT_SYMBOL_GPL(regmap_bulk_write); 1679 1680 /* 1681 * _regmap_raw_multi_reg_write() 1682 * 1683 * the (register,newvalue) pairs in regs have not been formatted, but 1684 * they are all in the same page and have been changed to being page 1685 * relative. The page register has been written if that was neccessary. 1686 */ 1687 static int _regmap_raw_multi_reg_write(struct regmap *map, 1688 const struct reg_default *regs, 1689 size_t num_regs) 1690 { 1691 int ret; 1692 void *buf; 1693 int i; 1694 u8 *u8; 1695 size_t val_bytes = map->format.val_bytes; 1696 size_t reg_bytes = map->format.reg_bytes; 1697 size_t pad_bytes = map->format.pad_bytes; 1698 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 1699 size_t len = pair_size * num_regs; 1700 1701 if (!len) 1702 return -EINVAL; 1703 1704 buf = kzalloc(len, GFP_KERNEL); 1705 if (!buf) 1706 return -ENOMEM; 1707 1708 /* We have to linearise by hand. */ 1709 1710 u8 = buf; 1711 1712 for (i = 0; i < num_regs; i++) { 1713 int reg = regs[i].reg; 1714 int val = regs[i].def; 1715 trace_regmap_hw_write_start(map->dev, reg, 1); 1716 map->format.format_reg(u8, reg, map->reg_shift); 1717 u8 += reg_bytes + pad_bytes; 1718 map->format.format_val(u8, val, 0); 1719 u8 += val_bytes; 1720 } 1721 u8 = buf; 1722 *u8 |= map->write_flag_mask; 1723 1724 ret = map->bus->write(map->bus_context, buf, len); 1725 1726 kfree(buf); 1727 1728 for (i = 0; i < num_regs; i++) { 1729 int reg = regs[i].reg; 1730 trace_regmap_hw_write_done(map->dev, reg, 1); 1731 } 1732 return ret; 1733 } 1734 1735 static unsigned int _regmap_register_page(struct regmap *map, 1736 unsigned int reg, 1737 struct regmap_range_node *range) 1738 { 1739 unsigned int win_page = (reg - range->range_min) / range->window_len; 1740 1741 return win_page; 1742 } 1743 1744 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 1745 struct reg_default *regs, 1746 size_t num_regs) 1747 { 1748 int ret; 1749 int i, n; 1750 struct reg_default *base; 1751 unsigned int this_page = 0; 1752 /* 1753 * the set of registers are not neccessarily in order, but 1754 * since the order of write must be preserved this algorithm 1755 * chops the set each time the page changes 1756 */ 1757 base = regs; 1758 for (i = 0, n = 0; i < num_regs; i++, n++) { 1759 unsigned int reg = regs[i].reg; 1760 struct regmap_range_node *range; 1761 1762 range = _regmap_range_lookup(map, reg); 1763 if (range) { 1764 unsigned int win_page = _regmap_register_page(map, reg, 1765 range); 1766 1767 if (i == 0) 1768 this_page = win_page; 1769 if (win_page != this_page) { 1770 this_page = win_page; 1771 ret = _regmap_raw_multi_reg_write(map, base, n); 1772 if (ret != 0) 1773 return ret; 1774 base += n; 1775 n = 0; 1776 } 1777 ret = _regmap_select_page(map, &base[n].reg, range, 1); 1778 if (ret != 0) 1779 return ret; 1780 } 1781 } 1782 if (n > 0) 1783 return _regmap_raw_multi_reg_write(map, base, n); 1784 return 0; 1785 } 1786 1787 static int _regmap_multi_reg_write(struct regmap *map, 1788 const struct reg_default *regs, 1789 size_t num_regs) 1790 { 1791 int i; 1792 int ret; 1793 1794 if (!map->can_multi_write) { 1795 for (i = 0; i < num_regs; i++) { 1796 ret = _regmap_write(map, regs[i].reg, regs[i].def); 1797 if (ret != 0) 1798 return ret; 1799 } 1800 return 0; 1801 } 1802 1803 if (!map->format.parse_inplace) 1804 return -EINVAL; 1805 1806 if (map->writeable_reg) 1807 for (i = 0; i < num_regs; i++) { 1808 int reg = regs[i].reg; 1809 if (!map->writeable_reg(map->dev, reg)) 1810 return -EINVAL; 1811 if (reg % map->reg_stride) 1812 return -EINVAL; 1813 } 1814 1815 if (!map->cache_bypass) { 1816 for (i = 0; i < num_regs; i++) { 1817 unsigned int val = regs[i].def; 1818 unsigned int reg = regs[i].reg; 1819 ret = regcache_write(map, reg, val); 1820 if (ret) { 1821 dev_err(map->dev, 1822 "Error in caching of register: %x ret: %d\n", 1823 reg, ret); 1824 return ret; 1825 } 1826 } 1827 if (map->cache_only) { 1828 map->cache_dirty = true; 1829 return 0; 1830 } 1831 } 1832 1833 WARN_ON(!map->bus); 1834 1835 for (i = 0; i < num_regs; i++) { 1836 unsigned int reg = regs[i].reg; 1837 struct regmap_range_node *range; 1838 range = _regmap_range_lookup(map, reg); 1839 if (range) { 1840 size_t len = sizeof(struct reg_default)*num_regs; 1841 struct reg_default *base = kmemdup(regs, len, 1842 GFP_KERNEL); 1843 if (!base) 1844 return -ENOMEM; 1845 ret = _regmap_range_multi_paged_reg_write(map, base, 1846 num_regs); 1847 kfree(base); 1848 1849 return ret; 1850 } 1851 } 1852 return _regmap_raw_multi_reg_write(map, regs, num_regs); 1853 } 1854 1855 /* 1856 * regmap_multi_reg_write(): Write multiple registers to the device 1857 * 1858 * where the set of register,value pairs are supplied in any order, 1859 * possibly not all in a single range. 1860 * 1861 * @map: Register map to write to 1862 * @regs: Array of structures containing register,value to be written 1863 * @num_regs: Number of registers to write 1864 * 1865 * The 'normal' block write mode will send ultimately send data on the 1866 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are 1867 * addressed. However, this alternative block multi write mode will send 1868 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 1869 * must of course support the mode. 1870 * 1871 * A value of zero will be returned on success, a negative errno will be 1872 * returned in error cases. 1873 */ 1874 int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, 1875 int num_regs) 1876 { 1877 int ret; 1878 1879 map->lock(map->lock_arg); 1880 1881 ret = _regmap_multi_reg_write(map, regs, num_regs); 1882 1883 map->unlock(map->lock_arg); 1884 1885 return ret; 1886 } 1887 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 1888 1889 /* 1890 * regmap_multi_reg_write_bypassed(): Write multiple registers to the 1891 * device but not the cache 1892 * 1893 * where the set of register are supplied in any order 1894 * 1895 * @map: Register map to write to 1896 * @regs: Array of structures containing register,value to be written 1897 * @num_regs: Number of registers to write 1898 * 1899 * This function is intended to be used for writing a large block of data 1900 * atomically to the device in single transfer for those I2C client devices 1901 * that implement this alternative block write mode. 1902 * 1903 * A value of zero will be returned on success, a negative errno will 1904 * be returned in error cases. 1905 */ 1906 int regmap_multi_reg_write_bypassed(struct regmap *map, 1907 const struct reg_default *regs, 1908 int num_regs) 1909 { 1910 int ret; 1911 bool bypass; 1912 1913 map->lock(map->lock_arg); 1914 1915 bypass = map->cache_bypass; 1916 map->cache_bypass = true; 1917 1918 ret = _regmap_multi_reg_write(map, regs, num_regs); 1919 1920 map->cache_bypass = bypass; 1921 1922 map->unlock(map->lock_arg); 1923 1924 return ret; 1925 } 1926 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 1927 1928 /** 1929 * regmap_raw_write_async(): Write raw values to one or more registers 1930 * asynchronously 1931 * 1932 * @map: Register map to write to 1933 * @reg: Initial register to write to 1934 * @val: Block of data to be written, laid out for direct transmission to the 1935 * device. Must be valid until regmap_async_complete() is called. 1936 * @val_len: Length of data pointed to by val. 1937 * 1938 * This function is intended to be used for things like firmware 1939 * download where a large block of data needs to be transferred to the 1940 * device. No formatting will be done on the data provided. 1941 * 1942 * If supported by the underlying bus the write will be scheduled 1943 * asynchronously, helping maximise I/O speed on higher speed buses 1944 * like SPI. regmap_async_complete() can be called to ensure that all 1945 * asynchrnous writes have been completed. 1946 * 1947 * A value of zero will be returned on success, a negative errno will 1948 * be returned in error cases. 1949 */ 1950 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 1951 const void *val, size_t val_len) 1952 { 1953 int ret; 1954 1955 if (val_len % map->format.val_bytes) 1956 return -EINVAL; 1957 if (reg % map->reg_stride) 1958 return -EINVAL; 1959 1960 map->lock(map->lock_arg); 1961 1962 map->async = true; 1963 1964 ret = _regmap_raw_write(map, reg, val, val_len); 1965 1966 map->async = false; 1967 1968 map->unlock(map->lock_arg); 1969 1970 return ret; 1971 } 1972 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 1973 1974 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 1975 unsigned int val_len) 1976 { 1977 struct regmap_range_node *range; 1978 u8 *u8 = map->work_buf; 1979 int ret; 1980 1981 WARN_ON(!map->bus); 1982 1983 range = _regmap_range_lookup(map, reg); 1984 if (range) { 1985 ret = _regmap_select_page(map, ®, range, 1986 val_len / map->format.val_bytes); 1987 if (ret != 0) 1988 return ret; 1989 } 1990 1991 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1992 1993 /* 1994 * Some buses or devices flag reads by setting the high bits in the 1995 * register addresss; since it's always the high bits for all 1996 * current formats we can do this here rather than in 1997 * formatting. This may break if we get interesting formats. 1998 */ 1999 u8[0] |= map->read_flag_mask; 2000 2001 trace_regmap_hw_read_start(map->dev, reg, 2002 val_len / map->format.val_bytes); 2003 2004 ret = map->bus->read(map->bus_context, map->work_buf, 2005 map->format.reg_bytes + map->format.pad_bytes, 2006 val, val_len); 2007 2008 trace_regmap_hw_read_done(map->dev, reg, 2009 val_len / map->format.val_bytes); 2010 2011 return ret; 2012 } 2013 2014 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2015 unsigned int *val) 2016 { 2017 struct regmap *map = context; 2018 2019 return map->bus->reg_read(map->bus_context, reg, val); 2020 } 2021 2022 static int _regmap_bus_read(void *context, unsigned int reg, 2023 unsigned int *val) 2024 { 2025 int ret; 2026 struct regmap *map = context; 2027 2028 if (!map->format.parse_val) 2029 return -EINVAL; 2030 2031 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 2032 if (ret == 0) 2033 *val = map->format.parse_val(map->work_buf); 2034 2035 return ret; 2036 } 2037 2038 static int _regmap_read(struct regmap *map, unsigned int reg, 2039 unsigned int *val) 2040 { 2041 int ret; 2042 void *context = _regmap_map_get_context(map); 2043 2044 WARN_ON(!map->reg_read); 2045 2046 if (!map->cache_bypass) { 2047 ret = regcache_read(map, reg, val); 2048 if (ret == 0) 2049 return 0; 2050 } 2051 2052 if (map->cache_only) 2053 return -EBUSY; 2054 2055 if (!regmap_readable(map, reg)) 2056 return -EIO; 2057 2058 ret = map->reg_read(context, reg, val); 2059 if (ret == 0) { 2060 #ifdef LOG_DEVICE 2061 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2062 dev_info(map->dev, "%x => %x\n", reg, *val); 2063 #endif 2064 2065 trace_regmap_reg_read(map->dev, reg, *val); 2066 2067 if (!map->cache_bypass) 2068 regcache_write(map, reg, *val); 2069 } 2070 2071 return ret; 2072 } 2073 2074 /** 2075 * regmap_read(): Read a value from a single register 2076 * 2077 * @map: Register map to read from 2078 * @reg: Register to be read from 2079 * @val: Pointer to store read value 2080 * 2081 * A value of zero will be returned on success, a negative errno will 2082 * be returned in error cases. 2083 */ 2084 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2085 { 2086 int ret; 2087 2088 if (reg % map->reg_stride) 2089 return -EINVAL; 2090 2091 map->lock(map->lock_arg); 2092 2093 ret = _regmap_read(map, reg, val); 2094 2095 map->unlock(map->lock_arg); 2096 2097 return ret; 2098 } 2099 EXPORT_SYMBOL_GPL(regmap_read); 2100 2101 /** 2102 * regmap_raw_read(): Read raw data from the device 2103 * 2104 * @map: Register map to read from 2105 * @reg: First register to be read from 2106 * @val: Pointer to store read value 2107 * @val_len: Size of data to read 2108 * 2109 * A value of zero will be returned on success, a negative errno will 2110 * be returned in error cases. 2111 */ 2112 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2113 size_t val_len) 2114 { 2115 size_t val_bytes = map->format.val_bytes; 2116 size_t val_count = val_len / val_bytes; 2117 unsigned int v; 2118 int ret, i; 2119 2120 if (!map->bus) 2121 return -EINVAL; 2122 if (val_len % map->format.val_bytes) 2123 return -EINVAL; 2124 if (reg % map->reg_stride) 2125 return -EINVAL; 2126 2127 map->lock(map->lock_arg); 2128 2129 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2130 map->cache_type == REGCACHE_NONE) { 2131 /* Physical block read if there's no cache involved */ 2132 ret = _regmap_raw_read(map, reg, val, val_len); 2133 2134 } else { 2135 /* Otherwise go word by word for the cache; should be low 2136 * cost as we expect to hit the cache. 2137 */ 2138 for (i = 0; i < val_count; i++) { 2139 ret = _regmap_read(map, reg + (i * map->reg_stride), 2140 &v); 2141 if (ret != 0) 2142 goto out; 2143 2144 map->format.format_val(val + (i * val_bytes), v, 0); 2145 } 2146 } 2147 2148 out: 2149 map->unlock(map->lock_arg); 2150 2151 return ret; 2152 } 2153 EXPORT_SYMBOL_GPL(regmap_raw_read); 2154 2155 /** 2156 * regmap_field_read(): Read a value to a single register field 2157 * 2158 * @field: Register field to read from 2159 * @val: Pointer to store read value 2160 * 2161 * A value of zero will be returned on success, a negative errno will 2162 * be returned in error cases. 2163 */ 2164 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2165 { 2166 int ret; 2167 unsigned int reg_val; 2168 ret = regmap_read(field->regmap, field->reg, ®_val); 2169 if (ret != 0) 2170 return ret; 2171 2172 reg_val &= field->mask; 2173 reg_val >>= field->shift; 2174 *val = reg_val; 2175 2176 return ret; 2177 } 2178 EXPORT_SYMBOL_GPL(regmap_field_read); 2179 2180 /** 2181 * regmap_fields_read(): Read a value to a single register field with port ID 2182 * 2183 * @field: Register field to read from 2184 * @id: port ID 2185 * @val: Pointer to store read value 2186 * 2187 * A value of zero will be returned on success, a negative errno will 2188 * be returned in error cases. 2189 */ 2190 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2191 unsigned int *val) 2192 { 2193 int ret; 2194 unsigned int reg_val; 2195 2196 if (id >= field->id_size) 2197 return -EINVAL; 2198 2199 ret = regmap_read(field->regmap, 2200 field->reg + (field->id_offset * id), 2201 ®_val); 2202 if (ret != 0) 2203 return ret; 2204 2205 reg_val &= field->mask; 2206 reg_val >>= field->shift; 2207 *val = reg_val; 2208 2209 return ret; 2210 } 2211 EXPORT_SYMBOL_GPL(regmap_fields_read); 2212 2213 /** 2214 * regmap_bulk_read(): Read multiple registers from the device 2215 * 2216 * @map: Register map to read from 2217 * @reg: First register to be read from 2218 * @val: Pointer to store read value, in native register size for device 2219 * @val_count: Number of registers to read 2220 * 2221 * A value of zero will be returned on success, a negative errno will 2222 * be returned in error cases. 2223 */ 2224 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2225 size_t val_count) 2226 { 2227 int ret, i; 2228 size_t val_bytes = map->format.val_bytes; 2229 bool vol = regmap_volatile_range(map, reg, val_count); 2230 2231 if (reg % map->reg_stride) 2232 return -EINVAL; 2233 2234 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2235 /* 2236 * Some devices does not support bulk read, for 2237 * them we have a series of single read operations. 2238 */ 2239 if (map->use_single_rw) { 2240 for (i = 0; i < val_count; i++) { 2241 ret = regmap_raw_read(map, 2242 reg + (i * map->reg_stride), 2243 val + (i * val_bytes), 2244 val_bytes); 2245 if (ret != 0) 2246 return ret; 2247 } 2248 } else { 2249 ret = regmap_raw_read(map, reg, val, 2250 val_bytes * val_count); 2251 if (ret != 0) 2252 return ret; 2253 } 2254 2255 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2256 map->format.parse_inplace(val + i); 2257 } else { 2258 for (i = 0; i < val_count; i++) { 2259 unsigned int ival; 2260 ret = regmap_read(map, reg + (i * map->reg_stride), 2261 &ival); 2262 if (ret != 0) 2263 return ret; 2264 memcpy(val + (i * val_bytes), &ival, val_bytes); 2265 } 2266 } 2267 2268 return 0; 2269 } 2270 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2271 2272 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2273 unsigned int mask, unsigned int val, 2274 bool *change) 2275 { 2276 int ret; 2277 unsigned int tmp, orig; 2278 2279 ret = _regmap_read(map, reg, &orig); 2280 if (ret != 0) 2281 return ret; 2282 2283 tmp = orig & ~mask; 2284 tmp |= val & mask; 2285 2286 if (tmp != orig) { 2287 ret = _regmap_write(map, reg, tmp); 2288 if (change) 2289 *change = true; 2290 } else { 2291 if (change) 2292 *change = false; 2293 } 2294 2295 return ret; 2296 } 2297 2298 /** 2299 * regmap_update_bits: Perform a read/modify/write cycle on the register map 2300 * 2301 * @map: Register map to update 2302 * @reg: Register to update 2303 * @mask: Bitmask to change 2304 * @val: New value for bitmask 2305 * 2306 * Returns zero for success, a negative number on error. 2307 */ 2308 int regmap_update_bits(struct regmap *map, unsigned int reg, 2309 unsigned int mask, unsigned int val) 2310 { 2311 int ret; 2312 2313 map->lock(map->lock_arg); 2314 ret = _regmap_update_bits(map, reg, mask, val, NULL); 2315 map->unlock(map->lock_arg); 2316 2317 return ret; 2318 } 2319 EXPORT_SYMBOL_GPL(regmap_update_bits); 2320 2321 /** 2322 * regmap_update_bits_async: Perform a read/modify/write cycle on the register 2323 * map asynchronously 2324 * 2325 * @map: Register map to update 2326 * @reg: Register to update 2327 * @mask: Bitmask to change 2328 * @val: New value for bitmask 2329 * 2330 * With most buses the read must be done synchronously so this is most 2331 * useful for devices with a cache which do not need to interact with 2332 * the hardware to determine the current register value. 2333 * 2334 * Returns zero for success, a negative number on error. 2335 */ 2336 int regmap_update_bits_async(struct regmap *map, unsigned int reg, 2337 unsigned int mask, unsigned int val) 2338 { 2339 int ret; 2340 2341 map->lock(map->lock_arg); 2342 2343 map->async = true; 2344 2345 ret = _regmap_update_bits(map, reg, mask, val, NULL); 2346 2347 map->async = false; 2348 2349 map->unlock(map->lock_arg); 2350 2351 return ret; 2352 } 2353 EXPORT_SYMBOL_GPL(regmap_update_bits_async); 2354 2355 /** 2356 * regmap_update_bits_check: Perform a read/modify/write cycle on the 2357 * register map and report if updated 2358 * 2359 * @map: Register map to update 2360 * @reg: Register to update 2361 * @mask: Bitmask to change 2362 * @val: New value for bitmask 2363 * @change: Boolean indicating if a write was done 2364 * 2365 * Returns zero for success, a negative number on error. 2366 */ 2367 int regmap_update_bits_check(struct regmap *map, unsigned int reg, 2368 unsigned int mask, unsigned int val, 2369 bool *change) 2370 { 2371 int ret; 2372 2373 map->lock(map->lock_arg); 2374 ret = _regmap_update_bits(map, reg, mask, val, change); 2375 map->unlock(map->lock_arg); 2376 return ret; 2377 } 2378 EXPORT_SYMBOL_GPL(regmap_update_bits_check); 2379 2380 /** 2381 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the 2382 * register map asynchronously and report if 2383 * updated 2384 * 2385 * @map: Register map to update 2386 * @reg: Register to update 2387 * @mask: Bitmask to change 2388 * @val: New value for bitmask 2389 * @change: Boolean indicating if a write was done 2390 * 2391 * With most buses the read must be done synchronously so this is most 2392 * useful for devices with a cache which do not need to interact with 2393 * the hardware to determine the current register value. 2394 * 2395 * Returns zero for success, a negative number on error. 2396 */ 2397 int regmap_update_bits_check_async(struct regmap *map, unsigned int reg, 2398 unsigned int mask, unsigned int val, 2399 bool *change) 2400 { 2401 int ret; 2402 2403 map->lock(map->lock_arg); 2404 2405 map->async = true; 2406 2407 ret = _regmap_update_bits(map, reg, mask, val, change); 2408 2409 map->async = false; 2410 2411 map->unlock(map->lock_arg); 2412 2413 return ret; 2414 } 2415 EXPORT_SYMBOL_GPL(regmap_update_bits_check_async); 2416 2417 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2418 { 2419 struct regmap *map = async->map; 2420 bool wake; 2421 2422 trace_regmap_async_io_complete(map->dev); 2423 2424 spin_lock(&map->async_lock); 2425 list_move(&async->list, &map->async_free); 2426 wake = list_empty(&map->async_list); 2427 2428 if (ret != 0) 2429 map->async_ret = ret; 2430 2431 spin_unlock(&map->async_lock); 2432 2433 if (wake) 2434 wake_up(&map->async_waitq); 2435 } 2436 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2437 2438 static int regmap_async_is_done(struct regmap *map) 2439 { 2440 unsigned long flags; 2441 int ret; 2442 2443 spin_lock_irqsave(&map->async_lock, flags); 2444 ret = list_empty(&map->async_list); 2445 spin_unlock_irqrestore(&map->async_lock, flags); 2446 2447 return ret; 2448 } 2449 2450 /** 2451 * regmap_async_complete: Ensure all asynchronous I/O has completed. 2452 * 2453 * @map: Map to operate on. 2454 * 2455 * Blocks until any pending asynchronous I/O has completed. Returns 2456 * an error code for any failed I/O operations. 2457 */ 2458 int regmap_async_complete(struct regmap *map) 2459 { 2460 unsigned long flags; 2461 int ret; 2462 2463 /* Nothing to do with no async support */ 2464 if (!map->bus || !map->bus->async_write) 2465 return 0; 2466 2467 trace_regmap_async_complete_start(map->dev); 2468 2469 wait_event(map->async_waitq, regmap_async_is_done(map)); 2470 2471 spin_lock_irqsave(&map->async_lock, flags); 2472 ret = map->async_ret; 2473 map->async_ret = 0; 2474 spin_unlock_irqrestore(&map->async_lock, flags); 2475 2476 trace_regmap_async_complete_done(map->dev); 2477 2478 return ret; 2479 } 2480 EXPORT_SYMBOL_GPL(regmap_async_complete); 2481 2482 /** 2483 * regmap_register_patch: Register and apply register updates to be applied 2484 * on device initialistion 2485 * 2486 * @map: Register map to apply updates to. 2487 * @regs: Values to update. 2488 * @num_regs: Number of entries in regs. 2489 * 2490 * Register a set of register updates to be applied to the device 2491 * whenever the device registers are synchronised with the cache and 2492 * apply them immediately. Typically this is used to apply 2493 * corrections to be applied to the device defaults on startup, such 2494 * as the updates some vendors provide to undocumented registers. 2495 * 2496 * The caller must ensure that this function cannot be called 2497 * concurrently with either itself or regcache_sync(). 2498 */ 2499 int regmap_register_patch(struct regmap *map, const struct reg_default *regs, 2500 int num_regs) 2501 { 2502 struct reg_default *p; 2503 int ret; 2504 bool bypass; 2505 2506 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2507 num_regs)) 2508 return 0; 2509 2510 p = krealloc(map->patch, 2511 sizeof(struct reg_default) * (map->patch_regs + num_regs), 2512 GFP_KERNEL); 2513 if (p) { 2514 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2515 map->patch = p; 2516 map->patch_regs += num_regs; 2517 } else { 2518 return -ENOMEM; 2519 } 2520 2521 map->lock(map->lock_arg); 2522 2523 bypass = map->cache_bypass; 2524 2525 map->cache_bypass = true; 2526 map->async = true; 2527 2528 ret = _regmap_multi_reg_write(map, regs, num_regs); 2529 if (ret != 0) 2530 goto out; 2531 2532 out: 2533 map->async = false; 2534 map->cache_bypass = bypass; 2535 2536 map->unlock(map->lock_arg); 2537 2538 regmap_async_complete(map); 2539 2540 return ret; 2541 } 2542 EXPORT_SYMBOL_GPL(regmap_register_patch); 2543 2544 /* 2545 * regmap_get_val_bytes(): Report the size of a register value 2546 * 2547 * Report the size of a register value, mainly intended to for use by 2548 * generic infrastructure built on top of regmap. 2549 */ 2550 int regmap_get_val_bytes(struct regmap *map) 2551 { 2552 if (map->format.format_write) 2553 return -EINVAL; 2554 2555 return map->format.val_bytes; 2556 } 2557 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2558 2559 int regmap_parse_val(struct regmap *map, const void *buf, 2560 unsigned int *val) 2561 { 2562 if (!map->format.parse_val) 2563 return -EINVAL; 2564 2565 *val = map->format.parse_val(buf); 2566 2567 return 0; 2568 } 2569 EXPORT_SYMBOL_GPL(regmap_parse_val); 2570 2571 static int __init regmap_initcall(void) 2572 { 2573 regmap_debugfs_initcall(); 2574 2575 return 0; 2576 } 2577 postcore_initcall(regmap_initcall); 2578