1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register cache access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> 8 9 #include <linux/bsearch.h> 10 #include <linux/device.h> 11 #include <linux/export.h> 12 #include <linux/slab.h> 13 #include <linux/sort.h> 14 15 #include "trace.h" 16 #include "internal.h" 17 18 static const struct regcache_ops *cache_types[] = { 19 ®cache_rbtree_ops, 20 #if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED) 21 ®cache_lzo_ops, 22 #endif 23 ®cache_flat_ops, 24 }; 25 26 static int regcache_hw_init(struct regmap *map) 27 { 28 int i, j; 29 int ret; 30 int count; 31 unsigned int reg, val; 32 void *tmp_buf; 33 34 if (!map->num_reg_defaults_raw) 35 return -EINVAL; 36 37 /* calculate the size of reg_defaults */ 38 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) 39 if (regmap_readable(map, i * map->reg_stride) && 40 !regmap_volatile(map, i * map->reg_stride)) 41 count++; 42 43 /* all registers are unreadable or volatile, so just bypass */ 44 if (!count) { 45 map->cache_bypass = true; 46 return 0; 47 } 48 49 map->num_reg_defaults = count; 50 map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default), 51 GFP_KERNEL); 52 if (!map->reg_defaults) 53 return -ENOMEM; 54 55 if (!map->reg_defaults_raw) { 56 bool cache_bypass = map->cache_bypass; 57 dev_warn(map->dev, "No cache defaults, reading back from HW\n"); 58 59 /* Bypass the cache access till data read from HW */ 60 map->cache_bypass = true; 61 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); 62 if (!tmp_buf) { 63 ret = -ENOMEM; 64 goto err_free; 65 } 66 ret = regmap_raw_read(map, 0, tmp_buf, 67 map->cache_size_raw); 68 map->cache_bypass = cache_bypass; 69 if (ret == 0) { 70 map->reg_defaults_raw = tmp_buf; 71 map->cache_free = true; 72 } else { 73 kfree(tmp_buf); 74 } 75 } 76 77 /* fill the reg_defaults */ 78 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { 79 reg = i * map->reg_stride; 80 81 if (!regmap_readable(map, reg)) 82 continue; 83 84 if (regmap_volatile(map, reg)) 85 continue; 86 87 if (map->reg_defaults_raw) { 88 val = regcache_get_val(map, map->reg_defaults_raw, i); 89 } else { 90 bool cache_bypass = map->cache_bypass; 91 92 map->cache_bypass = true; 93 ret = regmap_read(map, reg, &val); 94 map->cache_bypass = cache_bypass; 95 if (ret != 0) { 96 dev_err(map->dev, "Failed to read %d: %d\n", 97 reg, ret); 98 goto err_free; 99 } 100 } 101 102 map->reg_defaults[j].reg = reg; 103 map->reg_defaults[j].def = val; 104 j++; 105 } 106 107 return 0; 108 109 err_free: 110 kfree(map->reg_defaults); 111 112 return ret; 113 } 114 115 int regcache_init(struct regmap *map, const struct regmap_config *config) 116 { 117 int ret; 118 int i; 119 void *tmp_buf; 120 121 if (map->cache_type == REGCACHE_NONE) { 122 if (config->reg_defaults || config->num_reg_defaults_raw) 123 dev_warn(map->dev, 124 "No cache used with register defaults set!\n"); 125 126 map->cache_bypass = true; 127 return 0; 128 } 129 130 if (config->reg_defaults && !config->num_reg_defaults) { 131 dev_err(map->dev, 132 "Register defaults are set without the number!\n"); 133 return -EINVAL; 134 } 135 136 if (config->num_reg_defaults && !config->reg_defaults) { 137 dev_err(map->dev, 138 "Register defaults number are set without the reg!\n"); 139 return -EINVAL; 140 } 141 142 for (i = 0; i < config->num_reg_defaults; i++) 143 if (config->reg_defaults[i].reg % map->reg_stride) 144 return -EINVAL; 145 146 for (i = 0; i < ARRAY_SIZE(cache_types); i++) 147 if (cache_types[i]->type == map->cache_type) 148 break; 149 150 if (i == ARRAY_SIZE(cache_types)) { 151 dev_err(map->dev, "Could not match compress type: %d\n", 152 map->cache_type); 153 return -EINVAL; 154 } 155 156 map->num_reg_defaults = config->num_reg_defaults; 157 map->num_reg_defaults_raw = config->num_reg_defaults_raw; 158 map->reg_defaults_raw = config->reg_defaults_raw; 159 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); 160 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; 161 162 map->cache = NULL; 163 map->cache_ops = cache_types[i]; 164 165 if (!map->cache_ops->read || 166 !map->cache_ops->write || 167 !map->cache_ops->name) 168 return -EINVAL; 169 170 /* We still need to ensure that the reg_defaults 171 * won't vanish from under us. We'll need to make 172 * a copy of it. 173 */ 174 if (config->reg_defaults) { 175 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * 176 sizeof(struct reg_default), GFP_KERNEL); 177 if (!tmp_buf) 178 return -ENOMEM; 179 map->reg_defaults = tmp_buf; 180 } else if (map->num_reg_defaults_raw) { 181 /* Some devices such as PMICs don't have cache defaults, 182 * we cope with this by reading back the HW registers and 183 * crafting the cache defaults by hand. 184 */ 185 ret = regcache_hw_init(map); 186 if (ret < 0) 187 return ret; 188 if (map->cache_bypass) 189 return 0; 190 } 191 192 if (!map->max_register && map->num_reg_defaults_raw) 193 map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride; 194 195 if (map->cache_ops->init) { 196 dev_dbg(map->dev, "Initializing %s cache\n", 197 map->cache_ops->name); 198 ret = map->cache_ops->init(map); 199 if (ret) 200 goto err_free; 201 } 202 return 0; 203 204 err_free: 205 kfree(map->reg_defaults); 206 if (map->cache_free) 207 kfree(map->reg_defaults_raw); 208 209 return ret; 210 } 211 212 void regcache_exit(struct regmap *map) 213 { 214 if (map->cache_type == REGCACHE_NONE) 215 return; 216 217 BUG_ON(!map->cache_ops); 218 219 kfree(map->reg_defaults); 220 if (map->cache_free) 221 kfree(map->reg_defaults_raw); 222 223 if (map->cache_ops->exit) { 224 dev_dbg(map->dev, "Destroying %s cache\n", 225 map->cache_ops->name); 226 map->cache_ops->exit(map); 227 } 228 } 229 230 /** 231 * regcache_read - Fetch the value of a given register from the cache. 232 * 233 * @map: map to configure. 234 * @reg: The register index. 235 * @value: The value to be returned. 236 * 237 * Return a negative value on failure, 0 on success. 238 */ 239 int regcache_read(struct regmap *map, 240 unsigned int reg, unsigned int *value) 241 { 242 int ret; 243 244 if (map->cache_type == REGCACHE_NONE) 245 return -ENOSYS; 246 247 BUG_ON(!map->cache_ops); 248 249 if (!regmap_volatile(map, reg)) { 250 ret = map->cache_ops->read(map, reg, value); 251 252 if (ret == 0) 253 trace_regmap_reg_read_cache(map, reg, *value); 254 255 return ret; 256 } 257 258 return -EINVAL; 259 } 260 261 /** 262 * regcache_write - Set the value of a given register in the cache. 263 * 264 * @map: map to configure. 265 * @reg: The register index. 266 * @value: The new register value. 267 * 268 * Return a negative value on failure, 0 on success. 269 */ 270 int regcache_write(struct regmap *map, 271 unsigned int reg, unsigned int value) 272 { 273 if (map->cache_type == REGCACHE_NONE) 274 return 0; 275 276 BUG_ON(!map->cache_ops); 277 278 if (!regmap_volatile(map, reg)) 279 return map->cache_ops->write(map, reg, value); 280 281 return 0; 282 } 283 284 static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg, 285 unsigned int val) 286 { 287 int ret; 288 289 /* If we don't know the chip just got reset, then sync everything. */ 290 if (!map->no_sync_defaults) 291 return true; 292 293 /* Is this the hardware default? If so skip. */ 294 ret = regcache_lookup_reg(map, reg); 295 if (ret >= 0 && val == map->reg_defaults[ret].def) 296 return false; 297 return true; 298 } 299 300 static int regcache_default_sync(struct regmap *map, unsigned int min, 301 unsigned int max) 302 { 303 unsigned int reg; 304 305 for (reg = min; reg <= max; reg += map->reg_stride) { 306 unsigned int val; 307 int ret; 308 309 if (regmap_volatile(map, reg) || 310 !regmap_writeable(map, reg)) 311 continue; 312 313 ret = regcache_read(map, reg, &val); 314 if (ret) 315 return ret; 316 317 if (!regcache_reg_needs_sync(map, reg, val)) 318 continue; 319 320 map->cache_bypass = true; 321 ret = _regmap_write(map, reg, val); 322 map->cache_bypass = false; 323 if (ret) { 324 dev_err(map->dev, "Unable to sync register %#x. %d\n", 325 reg, ret); 326 return ret; 327 } 328 dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); 329 } 330 331 return 0; 332 } 333 334 /** 335 * regcache_sync - Sync the register cache with the hardware. 336 * 337 * @map: map to configure. 338 * 339 * Any registers that should not be synced should be marked as 340 * volatile. In general drivers can choose not to use the provided 341 * syncing functionality if they so require. 342 * 343 * Return a negative value on failure, 0 on success. 344 */ 345 int regcache_sync(struct regmap *map) 346 { 347 int ret = 0; 348 unsigned int i; 349 const char *name; 350 bool bypass; 351 352 BUG_ON(!map->cache_ops); 353 354 map->lock(map->lock_arg); 355 /* Remember the initial bypass state */ 356 bypass = map->cache_bypass; 357 dev_dbg(map->dev, "Syncing %s cache\n", 358 map->cache_ops->name); 359 name = map->cache_ops->name; 360 trace_regcache_sync(map, name, "start"); 361 362 if (!map->cache_dirty) 363 goto out; 364 365 map->async = true; 366 367 /* Apply any patch first */ 368 map->cache_bypass = true; 369 for (i = 0; i < map->patch_regs; i++) { 370 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); 371 if (ret != 0) { 372 dev_err(map->dev, "Failed to write %x = %x: %d\n", 373 map->patch[i].reg, map->patch[i].def, ret); 374 goto out; 375 } 376 } 377 map->cache_bypass = false; 378 379 if (map->cache_ops->sync) 380 ret = map->cache_ops->sync(map, 0, map->max_register); 381 else 382 ret = regcache_default_sync(map, 0, map->max_register); 383 384 if (ret == 0) 385 map->cache_dirty = false; 386 387 out: 388 /* Restore the bypass state */ 389 map->async = false; 390 map->cache_bypass = bypass; 391 map->no_sync_defaults = false; 392 map->unlock(map->lock_arg); 393 394 regmap_async_complete(map); 395 396 trace_regcache_sync(map, name, "stop"); 397 398 return ret; 399 } 400 EXPORT_SYMBOL_GPL(regcache_sync); 401 402 /** 403 * regcache_sync_region - Sync part of the register cache with the hardware. 404 * 405 * @map: map to sync. 406 * @min: first register to sync 407 * @max: last register to sync 408 * 409 * Write all non-default register values in the specified region to 410 * the hardware. 411 * 412 * Return a negative value on failure, 0 on success. 413 */ 414 int regcache_sync_region(struct regmap *map, unsigned int min, 415 unsigned int max) 416 { 417 int ret = 0; 418 const char *name; 419 bool bypass; 420 421 BUG_ON(!map->cache_ops); 422 423 map->lock(map->lock_arg); 424 425 /* Remember the initial bypass state */ 426 bypass = map->cache_bypass; 427 428 name = map->cache_ops->name; 429 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); 430 431 trace_regcache_sync(map, name, "start region"); 432 433 if (!map->cache_dirty) 434 goto out; 435 436 map->async = true; 437 438 if (map->cache_ops->sync) 439 ret = map->cache_ops->sync(map, min, max); 440 else 441 ret = regcache_default_sync(map, min, max); 442 443 out: 444 /* Restore the bypass state */ 445 map->cache_bypass = bypass; 446 map->async = false; 447 map->no_sync_defaults = false; 448 map->unlock(map->lock_arg); 449 450 regmap_async_complete(map); 451 452 trace_regcache_sync(map, name, "stop region"); 453 454 return ret; 455 } 456 EXPORT_SYMBOL_GPL(regcache_sync_region); 457 458 /** 459 * regcache_drop_region - Discard part of the register cache 460 * 461 * @map: map to operate on 462 * @min: first register to discard 463 * @max: last register to discard 464 * 465 * Discard part of the register cache. 466 * 467 * Return a negative value on failure, 0 on success. 468 */ 469 int regcache_drop_region(struct regmap *map, unsigned int min, 470 unsigned int max) 471 { 472 int ret = 0; 473 474 if (!map->cache_ops || !map->cache_ops->drop) 475 return -EINVAL; 476 477 map->lock(map->lock_arg); 478 479 trace_regcache_drop_region(map, min, max); 480 481 ret = map->cache_ops->drop(map, min, max); 482 483 map->unlock(map->lock_arg); 484 485 return ret; 486 } 487 EXPORT_SYMBOL_GPL(regcache_drop_region); 488 489 /** 490 * regcache_cache_only - Put a register map into cache only mode 491 * 492 * @map: map to configure 493 * @enable: flag if changes should be written to the hardware 494 * 495 * When a register map is marked as cache only writes to the register 496 * map API will only update the register cache, they will not cause 497 * any hardware changes. This is useful for allowing portions of 498 * drivers to act as though the device were functioning as normal when 499 * it is disabled for power saving reasons. 500 */ 501 void regcache_cache_only(struct regmap *map, bool enable) 502 { 503 map->lock(map->lock_arg); 504 WARN_ON(map->cache_type != REGCACHE_NONE && 505 map->cache_bypass && enable); 506 map->cache_only = enable; 507 trace_regmap_cache_only(map, enable); 508 map->unlock(map->lock_arg); 509 } 510 EXPORT_SYMBOL_GPL(regcache_cache_only); 511 512 /** 513 * regcache_mark_dirty - Indicate that HW registers were reset to default values 514 * 515 * @map: map to mark 516 * 517 * Inform regcache that the device has been powered down or reset, so that 518 * on resume, regcache_sync() knows to write out all non-default values 519 * stored in the cache. 520 * 521 * If this function is not called, regcache_sync() will assume that 522 * the hardware state still matches the cache state, modulo any writes that 523 * happened when cache_only was true. 524 */ 525 void regcache_mark_dirty(struct regmap *map) 526 { 527 map->lock(map->lock_arg); 528 map->cache_dirty = true; 529 map->no_sync_defaults = true; 530 map->unlock(map->lock_arg); 531 } 532 EXPORT_SYMBOL_GPL(regcache_mark_dirty); 533 534 /** 535 * regcache_cache_bypass - Put a register map into cache bypass mode 536 * 537 * @map: map to configure 538 * @enable: flag if changes should not be written to the cache 539 * 540 * When a register map is marked with the cache bypass option, writes 541 * to the register map API will only update the hardware and not 542 * the cache directly. This is useful when syncing the cache back to 543 * the hardware. 544 */ 545 void regcache_cache_bypass(struct regmap *map, bool enable) 546 { 547 map->lock(map->lock_arg); 548 WARN_ON(map->cache_only && enable); 549 map->cache_bypass = enable; 550 trace_regmap_cache_bypass(map, enable); 551 map->unlock(map->lock_arg); 552 } 553 EXPORT_SYMBOL_GPL(regcache_cache_bypass); 554 555 bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, 556 unsigned int val) 557 { 558 if (regcache_get_val(map, base, idx) == val) 559 return true; 560 561 /* Use device native format if possible */ 562 if (map->format.format_val) { 563 map->format.format_val(base + (map->cache_word_size * idx), 564 val, 0); 565 return false; 566 } 567 568 switch (map->cache_word_size) { 569 case 1: { 570 u8 *cache = base; 571 572 cache[idx] = val; 573 break; 574 } 575 case 2: { 576 u16 *cache = base; 577 578 cache[idx] = val; 579 break; 580 } 581 case 4: { 582 u32 *cache = base; 583 584 cache[idx] = val; 585 break; 586 } 587 #ifdef CONFIG_64BIT 588 case 8: { 589 u64 *cache = base; 590 591 cache[idx] = val; 592 break; 593 } 594 #endif 595 default: 596 BUG(); 597 } 598 return false; 599 } 600 601 unsigned int regcache_get_val(struct regmap *map, const void *base, 602 unsigned int idx) 603 { 604 if (!base) 605 return -EINVAL; 606 607 /* Use device native format if possible */ 608 if (map->format.parse_val) 609 return map->format.parse_val(regcache_get_val_addr(map, base, 610 idx)); 611 612 switch (map->cache_word_size) { 613 case 1: { 614 const u8 *cache = base; 615 616 return cache[idx]; 617 } 618 case 2: { 619 const u16 *cache = base; 620 621 return cache[idx]; 622 } 623 case 4: { 624 const u32 *cache = base; 625 626 return cache[idx]; 627 } 628 #ifdef CONFIG_64BIT 629 case 8: { 630 const u64 *cache = base; 631 632 return cache[idx]; 633 } 634 #endif 635 default: 636 BUG(); 637 } 638 /* unreachable */ 639 return -1; 640 } 641 642 static int regcache_default_cmp(const void *a, const void *b) 643 { 644 const struct reg_default *_a = a; 645 const struct reg_default *_b = b; 646 647 return _a->reg - _b->reg; 648 } 649 650 int regcache_lookup_reg(struct regmap *map, unsigned int reg) 651 { 652 struct reg_default key; 653 struct reg_default *r; 654 655 key.reg = reg; 656 key.def = 0; 657 658 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, 659 sizeof(struct reg_default), regcache_default_cmp); 660 661 if (r) 662 return r - map->reg_defaults; 663 else 664 return -ENOENT; 665 } 666 667 static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) 668 { 669 if (!cache_present) 670 return true; 671 672 return test_bit(idx, cache_present); 673 } 674 675 static int regcache_sync_block_single(struct regmap *map, void *block, 676 unsigned long *cache_present, 677 unsigned int block_base, 678 unsigned int start, unsigned int end) 679 { 680 unsigned int i, regtmp, val; 681 int ret; 682 683 for (i = start; i < end; i++) { 684 regtmp = block_base + (i * map->reg_stride); 685 686 if (!regcache_reg_present(cache_present, i) || 687 !regmap_writeable(map, regtmp)) 688 continue; 689 690 val = regcache_get_val(map, block, i); 691 if (!regcache_reg_needs_sync(map, regtmp, val)) 692 continue; 693 694 map->cache_bypass = true; 695 696 ret = _regmap_write(map, regtmp, val); 697 698 map->cache_bypass = false; 699 if (ret != 0) { 700 dev_err(map->dev, "Unable to sync register %#x. %d\n", 701 regtmp, ret); 702 return ret; 703 } 704 dev_dbg(map->dev, "Synced register %#x, value %#x\n", 705 regtmp, val); 706 } 707 708 return 0; 709 } 710 711 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, 712 unsigned int base, unsigned int cur) 713 { 714 size_t val_bytes = map->format.val_bytes; 715 int ret, count; 716 717 if (*data == NULL) 718 return 0; 719 720 count = (cur - base) / map->reg_stride; 721 722 dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", 723 count * val_bytes, count, base, cur - map->reg_stride); 724 725 map->cache_bypass = true; 726 727 ret = _regmap_raw_write(map, base, *data, count * val_bytes, false); 728 if (ret) 729 dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", 730 base, cur - map->reg_stride, ret); 731 732 map->cache_bypass = false; 733 734 *data = NULL; 735 736 return ret; 737 } 738 739 static int regcache_sync_block_raw(struct regmap *map, void *block, 740 unsigned long *cache_present, 741 unsigned int block_base, unsigned int start, 742 unsigned int end) 743 { 744 unsigned int i, val; 745 unsigned int regtmp = 0; 746 unsigned int base = 0; 747 const void *data = NULL; 748 int ret; 749 750 for (i = start; i < end; i++) { 751 regtmp = block_base + (i * map->reg_stride); 752 753 if (!regcache_reg_present(cache_present, i) || 754 !regmap_writeable(map, regtmp)) { 755 ret = regcache_sync_block_raw_flush(map, &data, 756 base, regtmp); 757 if (ret != 0) 758 return ret; 759 continue; 760 } 761 762 val = regcache_get_val(map, block, i); 763 if (!regcache_reg_needs_sync(map, regtmp, val)) { 764 ret = regcache_sync_block_raw_flush(map, &data, 765 base, regtmp); 766 if (ret != 0) 767 return ret; 768 continue; 769 } 770 771 if (!data) { 772 data = regcache_get_val_addr(map, block, i); 773 base = regtmp; 774 } 775 } 776 777 return regcache_sync_block_raw_flush(map, &data, base, regtmp + 778 map->reg_stride); 779 } 780 781 int regcache_sync_block(struct regmap *map, void *block, 782 unsigned long *cache_present, 783 unsigned int block_base, unsigned int start, 784 unsigned int end) 785 { 786 if (regmap_can_raw_write(map) && !map->use_single_write) 787 return regcache_sync_block_raw(map, block, cache_present, 788 block_base, start, end); 789 else 790 return regcache_sync_block_single(map, block, cache_present, 791 block_base, start, end); 792 } 793