1 /* 2 * Register cache access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/bsearch.h> 14 #include <linux/device.h> 15 #include <linux/export.h> 16 #include <linux/slab.h> 17 #include <linux/sort.h> 18 19 #include "trace.h" 20 #include "internal.h" 21 22 static const struct regcache_ops *cache_types[] = { 23 ®cache_rbtree_ops, 24 ®cache_lzo_ops, 25 ®cache_flat_ops, 26 }; 27 28 static int regcache_hw_init(struct regmap *map) 29 { 30 int i, j; 31 int ret; 32 int count; 33 unsigned int reg, val; 34 void *tmp_buf; 35 36 if (!map->num_reg_defaults_raw) 37 return -EINVAL; 38 39 /* calculate the size of reg_defaults */ 40 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) 41 if (!regmap_volatile(map, i * map->reg_stride)) 42 count++; 43 44 /* all registers are volatile, so just bypass */ 45 if (!count) { 46 map->cache_bypass = true; 47 return 0; 48 } 49 50 map->num_reg_defaults = count; 51 map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default), 52 GFP_KERNEL); 53 if (!map->reg_defaults) 54 return -ENOMEM; 55 56 if (!map->reg_defaults_raw) { 57 bool cache_bypass = map->cache_bypass; 58 dev_warn(map->dev, "No cache defaults, reading back from HW\n"); 59 60 /* Bypass the cache access till data read from HW */ 61 map->cache_bypass = true; 62 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); 63 if (!tmp_buf) { 64 ret = -ENOMEM; 65 goto err_free; 66 } 67 ret = regmap_raw_read(map, 0, tmp_buf, 68 map->cache_size_raw); 69 map->cache_bypass = cache_bypass; 70 if (ret == 0) { 71 map->reg_defaults_raw = tmp_buf; 72 map->cache_free = 1; 73 } else { 74 kfree(tmp_buf); 75 } 76 } 77 78 /* fill the reg_defaults */ 79 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { 80 reg = i * map->reg_stride; 81 82 if (!regmap_readable(map, reg)) 83 continue; 84 85 if (regmap_volatile(map, reg)) 86 continue; 87 88 if (map->reg_defaults_raw) { 89 val = regcache_get_val(map, map->reg_defaults_raw, i); 90 } else { 91 bool cache_bypass = map->cache_bypass; 92 93 map->cache_bypass = true; 94 ret = regmap_read(map, reg, &val); 95 map->cache_bypass = cache_bypass; 96 if (ret != 0) { 97 dev_err(map->dev, "Failed to read %d: %d\n", 98 reg, ret); 99 goto err_free; 100 } 101 } 102 103 map->reg_defaults[j].reg = reg; 104 map->reg_defaults[j].def = val; 105 j++; 106 } 107 108 return 0; 109 110 err_free: 111 kfree(map->reg_defaults); 112 113 return ret; 114 } 115 116 int regcache_init(struct regmap *map, const struct regmap_config *config) 117 { 118 int ret; 119 int i; 120 void *tmp_buf; 121 122 if (map->cache_type == REGCACHE_NONE) { 123 if (config->reg_defaults || config->num_reg_defaults_raw) 124 dev_warn(map->dev, 125 "No cache used with register defaults set!\n"); 126 127 map->cache_bypass = true; 128 return 0; 129 } 130 131 if (config->reg_defaults && !config->num_reg_defaults) { 132 dev_err(map->dev, 133 "Register defaults are set without the number!\n"); 134 return -EINVAL; 135 } 136 137 for (i = 0; i < config->num_reg_defaults; i++) 138 if (config->reg_defaults[i].reg % map->reg_stride) 139 return -EINVAL; 140 141 for (i = 0; i < ARRAY_SIZE(cache_types); i++) 142 if (cache_types[i]->type == map->cache_type) 143 break; 144 145 if (i == ARRAY_SIZE(cache_types)) { 146 dev_err(map->dev, "Could not match compress type: %d\n", 147 map->cache_type); 148 return -EINVAL; 149 } 150 151 map->num_reg_defaults = config->num_reg_defaults; 152 map->num_reg_defaults_raw = config->num_reg_defaults_raw; 153 map->reg_defaults_raw = config->reg_defaults_raw; 154 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); 155 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; 156 157 map->cache = NULL; 158 map->cache_ops = cache_types[i]; 159 160 if (!map->cache_ops->read || 161 !map->cache_ops->write || 162 !map->cache_ops->name) 163 return -EINVAL; 164 165 /* We still need to ensure that the reg_defaults 166 * won't vanish from under us. We'll need to make 167 * a copy of it. 168 */ 169 if (config->reg_defaults) { 170 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * 171 sizeof(struct reg_default), GFP_KERNEL); 172 if (!tmp_buf) 173 return -ENOMEM; 174 map->reg_defaults = tmp_buf; 175 } else if (map->num_reg_defaults_raw) { 176 /* Some devices such as PMICs don't have cache defaults, 177 * we cope with this by reading back the HW registers and 178 * crafting the cache defaults by hand. 179 */ 180 ret = regcache_hw_init(map); 181 if (ret < 0) 182 return ret; 183 if (map->cache_bypass) 184 return 0; 185 } 186 187 if (!map->max_register) 188 map->max_register = map->num_reg_defaults_raw; 189 190 if (map->cache_ops->init) { 191 dev_dbg(map->dev, "Initializing %s cache\n", 192 map->cache_ops->name); 193 ret = map->cache_ops->init(map); 194 if (ret) 195 goto err_free; 196 } 197 return 0; 198 199 err_free: 200 kfree(map->reg_defaults); 201 if (map->cache_free) 202 kfree(map->reg_defaults_raw); 203 204 return ret; 205 } 206 207 void regcache_exit(struct regmap *map) 208 { 209 if (map->cache_type == REGCACHE_NONE) 210 return; 211 212 BUG_ON(!map->cache_ops); 213 214 kfree(map->reg_defaults); 215 if (map->cache_free) 216 kfree(map->reg_defaults_raw); 217 218 if (map->cache_ops->exit) { 219 dev_dbg(map->dev, "Destroying %s cache\n", 220 map->cache_ops->name); 221 map->cache_ops->exit(map); 222 } 223 } 224 225 /** 226 * regcache_read: Fetch the value of a given register from the cache. 227 * 228 * @map: map to configure. 229 * @reg: The register index. 230 * @value: The value to be returned. 231 * 232 * Return a negative value on failure, 0 on success. 233 */ 234 int regcache_read(struct regmap *map, 235 unsigned int reg, unsigned int *value) 236 { 237 int ret; 238 239 if (map->cache_type == REGCACHE_NONE) 240 return -ENOSYS; 241 242 BUG_ON(!map->cache_ops); 243 244 if (!regmap_volatile(map, reg)) { 245 ret = map->cache_ops->read(map, reg, value); 246 247 if (ret == 0) 248 trace_regmap_reg_read_cache(map, reg, *value); 249 250 return ret; 251 } 252 253 return -EINVAL; 254 } 255 256 /** 257 * regcache_write: Set the value of a given register in the cache. 258 * 259 * @map: map to configure. 260 * @reg: The register index. 261 * @value: The new register value. 262 * 263 * Return a negative value on failure, 0 on success. 264 */ 265 int regcache_write(struct regmap *map, 266 unsigned int reg, unsigned int value) 267 { 268 if (map->cache_type == REGCACHE_NONE) 269 return 0; 270 271 BUG_ON(!map->cache_ops); 272 273 if (!regmap_volatile(map, reg)) 274 return map->cache_ops->write(map, reg, value); 275 276 return 0; 277 } 278 279 static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg, 280 unsigned int val) 281 { 282 int ret; 283 284 /* If we don't know the chip just got reset, then sync everything. */ 285 if (!map->no_sync_defaults) 286 return true; 287 288 /* Is this the hardware default? If so skip. */ 289 ret = regcache_lookup_reg(map, reg); 290 if (ret >= 0 && val == map->reg_defaults[ret].def) 291 return false; 292 return true; 293 } 294 295 static int regcache_default_sync(struct regmap *map, unsigned int min, 296 unsigned int max) 297 { 298 unsigned int reg; 299 300 for (reg = min; reg <= max; reg += map->reg_stride) { 301 unsigned int val; 302 int ret; 303 304 if (regmap_volatile(map, reg) || 305 !regmap_writeable(map, reg)) 306 continue; 307 308 ret = regcache_read(map, reg, &val); 309 if (ret) 310 return ret; 311 312 if (!regcache_reg_needs_sync(map, reg, val)) 313 continue; 314 315 map->cache_bypass = true; 316 ret = _regmap_write(map, reg, val); 317 map->cache_bypass = false; 318 if (ret) { 319 dev_err(map->dev, "Unable to sync register %#x. %d\n", 320 reg, ret); 321 return ret; 322 } 323 dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); 324 } 325 326 return 0; 327 } 328 329 /** 330 * regcache_sync: Sync the register cache with the hardware. 331 * 332 * @map: map to configure. 333 * 334 * Any registers that should not be synced should be marked as 335 * volatile. In general drivers can choose not to use the provided 336 * syncing functionality if they so require. 337 * 338 * Return a negative value on failure, 0 on success. 339 */ 340 int regcache_sync(struct regmap *map) 341 { 342 int ret = 0; 343 unsigned int i; 344 const char *name; 345 bool bypass; 346 347 BUG_ON(!map->cache_ops); 348 349 map->lock(map->lock_arg); 350 /* Remember the initial bypass state */ 351 bypass = map->cache_bypass; 352 dev_dbg(map->dev, "Syncing %s cache\n", 353 map->cache_ops->name); 354 name = map->cache_ops->name; 355 trace_regcache_sync(map, name, "start"); 356 357 if (!map->cache_dirty) 358 goto out; 359 360 map->async = true; 361 362 /* Apply any patch first */ 363 map->cache_bypass = true; 364 for (i = 0; i < map->patch_regs; i++) { 365 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); 366 if (ret != 0) { 367 dev_err(map->dev, "Failed to write %x = %x: %d\n", 368 map->patch[i].reg, map->patch[i].def, ret); 369 goto out; 370 } 371 } 372 map->cache_bypass = false; 373 374 if (map->cache_ops->sync) 375 ret = map->cache_ops->sync(map, 0, map->max_register); 376 else 377 ret = regcache_default_sync(map, 0, map->max_register); 378 379 if (ret == 0) 380 map->cache_dirty = false; 381 382 out: 383 /* Restore the bypass state */ 384 map->async = false; 385 map->cache_bypass = bypass; 386 map->no_sync_defaults = false; 387 map->unlock(map->lock_arg); 388 389 regmap_async_complete(map); 390 391 trace_regcache_sync(map, name, "stop"); 392 393 return ret; 394 } 395 EXPORT_SYMBOL_GPL(regcache_sync); 396 397 /** 398 * regcache_sync_region: Sync part of the register cache with the hardware. 399 * 400 * @map: map to sync. 401 * @min: first register to sync 402 * @max: last register to sync 403 * 404 * Write all non-default register values in the specified region to 405 * the hardware. 406 * 407 * Return a negative value on failure, 0 on success. 408 */ 409 int regcache_sync_region(struct regmap *map, unsigned int min, 410 unsigned int max) 411 { 412 int ret = 0; 413 const char *name; 414 bool bypass; 415 416 BUG_ON(!map->cache_ops); 417 418 map->lock(map->lock_arg); 419 420 /* Remember the initial bypass state */ 421 bypass = map->cache_bypass; 422 423 name = map->cache_ops->name; 424 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); 425 426 trace_regcache_sync(map, name, "start region"); 427 428 if (!map->cache_dirty) 429 goto out; 430 431 map->async = true; 432 433 if (map->cache_ops->sync) 434 ret = map->cache_ops->sync(map, min, max); 435 else 436 ret = regcache_default_sync(map, min, max); 437 438 out: 439 /* Restore the bypass state */ 440 map->cache_bypass = bypass; 441 map->async = false; 442 map->no_sync_defaults = false; 443 map->unlock(map->lock_arg); 444 445 regmap_async_complete(map); 446 447 trace_regcache_sync(map, name, "stop region"); 448 449 return ret; 450 } 451 EXPORT_SYMBOL_GPL(regcache_sync_region); 452 453 /** 454 * regcache_drop_region: Discard part of the register cache 455 * 456 * @map: map to operate on 457 * @min: first register to discard 458 * @max: last register to discard 459 * 460 * Discard part of the register cache. 461 * 462 * Return a negative value on failure, 0 on success. 463 */ 464 int regcache_drop_region(struct regmap *map, unsigned int min, 465 unsigned int max) 466 { 467 int ret = 0; 468 469 if (!map->cache_ops || !map->cache_ops->drop) 470 return -EINVAL; 471 472 map->lock(map->lock_arg); 473 474 trace_regcache_drop_region(map, min, max); 475 476 ret = map->cache_ops->drop(map, min, max); 477 478 map->unlock(map->lock_arg); 479 480 return ret; 481 } 482 EXPORT_SYMBOL_GPL(regcache_drop_region); 483 484 /** 485 * regcache_cache_only: Put a register map into cache only mode 486 * 487 * @map: map to configure 488 * @cache_only: flag if changes should be written to the hardware 489 * 490 * When a register map is marked as cache only writes to the register 491 * map API will only update the register cache, they will not cause 492 * any hardware changes. This is useful for allowing portions of 493 * drivers to act as though the device were functioning as normal when 494 * it is disabled for power saving reasons. 495 */ 496 void regcache_cache_only(struct regmap *map, bool enable) 497 { 498 map->lock(map->lock_arg); 499 WARN_ON(map->cache_bypass && enable); 500 map->cache_only = enable; 501 trace_regmap_cache_only(map, enable); 502 map->unlock(map->lock_arg); 503 } 504 EXPORT_SYMBOL_GPL(regcache_cache_only); 505 506 /** 507 * regcache_mark_dirty: Indicate that HW registers were reset to default values 508 * 509 * @map: map to mark 510 * 511 * Inform regcache that the device has been powered down or reset, so that 512 * on resume, regcache_sync() knows to write out all non-default values 513 * stored in the cache. 514 * 515 * If this function is not called, regcache_sync() will assume that 516 * the hardware state still matches the cache state, modulo any writes that 517 * happened when cache_only was true. 518 */ 519 void regcache_mark_dirty(struct regmap *map) 520 { 521 map->lock(map->lock_arg); 522 map->cache_dirty = true; 523 map->no_sync_defaults = true; 524 map->unlock(map->lock_arg); 525 } 526 EXPORT_SYMBOL_GPL(regcache_mark_dirty); 527 528 /** 529 * regcache_cache_bypass: Put a register map into cache bypass mode 530 * 531 * @map: map to configure 532 * @cache_bypass: flag if changes should not be written to the cache 533 * 534 * When a register map is marked with the cache bypass option, writes 535 * to the register map API will only update the hardware and not the 536 * the cache directly. This is useful when syncing the cache back to 537 * the hardware. 538 */ 539 void regcache_cache_bypass(struct regmap *map, bool enable) 540 { 541 map->lock(map->lock_arg); 542 WARN_ON(map->cache_only && enable); 543 map->cache_bypass = enable; 544 trace_regmap_cache_bypass(map, enable); 545 map->unlock(map->lock_arg); 546 } 547 EXPORT_SYMBOL_GPL(regcache_cache_bypass); 548 549 bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, 550 unsigned int val) 551 { 552 if (regcache_get_val(map, base, idx) == val) 553 return true; 554 555 /* Use device native format if possible */ 556 if (map->format.format_val) { 557 map->format.format_val(base + (map->cache_word_size * idx), 558 val, 0); 559 return false; 560 } 561 562 switch (map->cache_word_size) { 563 case 1: { 564 u8 *cache = base; 565 566 cache[idx] = val; 567 break; 568 } 569 case 2: { 570 u16 *cache = base; 571 572 cache[idx] = val; 573 break; 574 } 575 case 4: { 576 u32 *cache = base; 577 578 cache[idx] = val; 579 break; 580 } 581 #ifdef CONFIG_64BIT 582 case 8: { 583 u64 *cache = base; 584 585 cache[idx] = val; 586 break; 587 } 588 #endif 589 default: 590 BUG(); 591 } 592 return false; 593 } 594 595 unsigned int regcache_get_val(struct regmap *map, const void *base, 596 unsigned int idx) 597 { 598 if (!base) 599 return -EINVAL; 600 601 /* Use device native format if possible */ 602 if (map->format.parse_val) 603 return map->format.parse_val(regcache_get_val_addr(map, base, 604 idx)); 605 606 switch (map->cache_word_size) { 607 case 1: { 608 const u8 *cache = base; 609 610 return cache[idx]; 611 } 612 case 2: { 613 const u16 *cache = base; 614 615 return cache[idx]; 616 } 617 case 4: { 618 const u32 *cache = base; 619 620 return cache[idx]; 621 } 622 #ifdef CONFIG_64BIT 623 case 8: { 624 const u64 *cache = base; 625 626 return cache[idx]; 627 } 628 #endif 629 default: 630 BUG(); 631 } 632 /* unreachable */ 633 return -1; 634 } 635 636 static int regcache_default_cmp(const void *a, const void *b) 637 { 638 const struct reg_default *_a = a; 639 const struct reg_default *_b = b; 640 641 return _a->reg - _b->reg; 642 } 643 644 int regcache_lookup_reg(struct regmap *map, unsigned int reg) 645 { 646 struct reg_default key; 647 struct reg_default *r; 648 649 key.reg = reg; 650 key.def = 0; 651 652 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, 653 sizeof(struct reg_default), regcache_default_cmp); 654 655 if (r) 656 return r - map->reg_defaults; 657 else 658 return -ENOENT; 659 } 660 661 static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) 662 { 663 if (!cache_present) 664 return true; 665 666 return test_bit(idx, cache_present); 667 } 668 669 static int regcache_sync_block_single(struct regmap *map, void *block, 670 unsigned long *cache_present, 671 unsigned int block_base, 672 unsigned int start, unsigned int end) 673 { 674 unsigned int i, regtmp, val; 675 int ret; 676 677 for (i = start; i < end; i++) { 678 regtmp = block_base + (i * map->reg_stride); 679 680 if (!regcache_reg_present(cache_present, i) || 681 !regmap_writeable(map, regtmp)) 682 continue; 683 684 val = regcache_get_val(map, block, i); 685 if (!regcache_reg_needs_sync(map, regtmp, val)) 686 continue; 687 688 map->cache_bypass = true; 689 690 ret = _regmap_write(map, regtmp, val); 691 692 map->cache_bypass = false; 693 if (ret != 0) { 694 dev_err(map->dev, "Unable to sync register %#x. %d\n", 695 regtmp, ret); 696 return ret; 697 } 698 dev_dbg(map->dev, "Synced register %#x, value %#x\n", 699 regtmp, val); 700 } 701 702 return 0; 703 } 704 705 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, 706 unsigned int base, unsigned int cur) 707 { 708 size_t val_bytes = map->format.val_bytes; 709 int ret, count; 710 711 if (*data == NULL) 712 return 0; 713 714 count = (cur - base) / map->reg_stride; 715 716 dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", 717 count * val_bytes, count, base, cur - map->reg_stride); 718 719 map->cache_bypass = true; 720 721 ret = _regmap_raw_write(map, base, *data, count * val_bytes); 722 if (ret) 723 dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", 724 base, cur - map->reg_stride, ret); 725 726 map->cache_bypass = false; 727 728 *data = NULL; 729 730 return ret; 731 } 732 733 static int regcache_sync_block_raw(struct regmap *map, void *block, 734 unsigned long *cache_present, 735 unsigned int block_base, unsigned int start, 736 unsigned int end) 737 { 738 unsigned int i, val; 739 unsigned int regtmp = 0; 740 unsigned int base = 0; 741 const void *data = NULL; 742 int ret; 743 744 for (i = start; i < end; i++) { 745 regtmp = block_base + (i * map->reg_stride); 746 747 if (!regcache_reg_present(cache_present, i) || 748 !regmap_writeable(map, regtmp)) { 749 ret = regcache_sync_block_raw_flush(map, &data, 750 base, regtmp); 751 if (ret != 0) 752 return ret; 753 continue; 754 } 755 756 val = regcache_get_val(map, block, i); 757 if (!regcache_reg_needs_sync(map, regtmp, val)) { 758 ret = regcache_sync_block_raw_flush(map, &data, 759 base, regtmp); 760 if (ret != 0) 761 return ret; 762 continue; 763 } 764 765 if (!data) { 766 data = regcache_get_val_addr(map, block, i); 767 base = regtmp; 768 } 769 } 770 771 return regcache_sync_block_raw_flush(map, &data, base, regtmp + 772 map->reg_stride); 773 } 774 775 int regcache_sync_block(struct regmap *map, void *block, 776 unsigned long *cache_present, 777 unsigned int block_base, unsigned int start, 778 unsigned int end) 779 { 780 if (regmap_can_raw_write(map) && !map->use_single_write) 781 return regcache_sync_block_raw(map, block, cache_present, 782 block_base, start, end); 783 else 784 return regcache_sync_block_single(map, block, cache_present, 785 block_base, start, end); 786 } 787