1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API - debugfs 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/slab.h> 10 #include <linux/mutex.h> 11 #include <linux/debugfs.h> 12 #include <linux/uaccess.h> 13 #include <linux/device.h> 14 #include <linux/list.h> 15 16 #include "internal.h" 17 18 struct regmap_debugfs_node { 19 struct regmap *map; 20 const char *name; 21 struct list_head link; 22 }; 23 24 static unsigned int dummy_index; 25 static struct dentry *regmap_debugfs_root; 26 static LIST_HEAD(regmap_debugfs_early_list); 27 static DEFINE_MUTEX(regmap_debugfs_early_lock); 28 29 /* Calculate the length of a fixed format */ 30 static size_t regmap_calc_reg_len(int max_val) 31 { 32 return snprintf(NULL, 0, "%x", max_val); 33 } 34 35 static ssize_t regmap_name_read_file(struct file *file, 36 char __user *user_buf, size_t count, 37 loff_t *ppos) 38 { 39 struct regmap *map = file->private_data; 40 const char *name = "nodev"; 41 int ret; 42 char *buf; 43 44 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 45 if (!buf) 46 return -ENOMEM; 47 48 if (map->dev && map->dev->driver) 49 name = map->dev->driver->name; 50 51 ret = snprintf(buf, PAGE_SIZE, "%s\n", name); 52 if (ret < 0) { 53 kfree(buf); 54 return ret; 55 } 56 57 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 58 kfree(buf); 59 return ret; 60 } 61 62 static const struct file_operations regmap_name_fops = { 63 .open = simple_open, 64 .read = regmap_name_read_file, 65 .llseek = default_llseek, 66 }; 67 68 static void regmap_debugfs_free_dump_cache(struct regmap *map) 69 { 70 struct regmap_debugfs_off_cache *c; 71 72 while (!list_empty(&map->debugfs_off_cache)) { 73 c = list_first_entry(&map->debugfs_off_cache, 74 struct regmap_debugfs_off_cache, 75 list); 76 list_del(&c->list); 77 kfree(c); 78 } 79 } 80 81 static bool regmap_printable(struct regmap *map, unsigned int reg) 82 { 83 if (regmap_precious(map, reg)) 84 return false; 85 86 if (!regmap_readable(map, reg) && !regmap_cached(map, reg)) 87 return false; 88 89 return true; 90 } 91 92 /* 93 * Work out where the start offset maps into register numbers, bearing 94 * in mind that we suppress hidden registers. 95 */ 96 static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, 97 unsigned int base, 98 loff_t from, 99 loff_t *pos) 100 { 101 struct regmap_debugfs_off_cache *c = NULL; 102 loff_t p = 0; 103 unsigned int i, ret; 104 unsigned int fpos_offset; 105 unsigned int reg_offset; 106 107 /* Suppress the cache if we're using a subrange */ 108 if (base) 109 return base; 110 111 /* 112 * If we don't have a cache build one so we don't have to do a 113 * linear scan each time. 114 */ 115 mutex_lock(&map->cache_lock); 116 i = base; 117 if (list_empty(&map->debugfs_off_cache)) { 118 for (; i <= map->max_register; i += map->reg_stride) { 119 /* Skip unprinted registers, closing off cache entry */ 120 if (!regmap_printable(map, i)) { 121 if (c) { 122 c->max = p - 1; 123 c->max_reg = i - map->reg_stride; 124 list_add_tail(&c->list, 125 &map->debugfs_off_cache); 126 c = NULL; 127 } 128 129 continue; 130 } 131 132 /* No cache entry? Start a new one */ 133 if (!c) { 134 c = kzalloc(sizeof(*c), GFP_KERNEL); 135 if (!c) { 136 regmap_debugfs_free_dump_cache(map); 137 mutex_unlock(&map->cache_lock); 138 return base; 139 } 140 c->min = p; 141 c->base_reg = i; 142 } 143 144 p += map->debugfs_tot_len; 145 } 146 } 147 148 /* Close the last entry off if we didn't scan beyond it */ 149 if (c) { 150 c->max = p - 1; 151 c->max_reg = i - map->reg_stride; 152 list_add_tail(&c->list, 153 &map->debugfs_off_cache); 154 } 155 156 /* 157 * This should never happen; we return above if we fail to 158 * allocate and we should never be in this code if there are 159 * no registers at all. 160 */ 161 WARN_ON(list_empty(&map->debugfs_off_cache)); 162 ret = base; 163 164 /* Find the relevant block:offset */ 165 list_for_each_entry(c, &map->debugfs_off_cache, list) { 166 if (from >= c->min && from <= c->max) { 167 fpos_offset = from - c->min; 168 reg_offset = fpos_offset / map->debugfs_tot_len; 169 *pos = c->min + (reg_offset * map->debugfs_tot_len); 170 mutex_unlock(&map->cache_lock); 171 return c->base_reg + (reg_offset * map->reg_stride); 172 } 173 174 *pos = c->max; 175 ret = c->max_reg; 176 } 177 mutex_unlock(&map->cache_lock); 178 179 return ret; 180 } 181 182 static inline void regmap_calc_tot_len(struct regmap *map, 183 void *buf, size_t count) 184 { 185 /* Calculate the length of a fixed format */ 186 if (!map->debugfs_tot_len) { 187 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register), 188 map->debugfs_val_len = 2 * map->format.val_bytes; 189 map->debugfs_tot_len = map->debugfs_reg_len + 190 map->debugfs_val_len + 3; /* : \n */ 191 } 192 } 193 194 static int regmap_next_readable_reg(struct regmap *map, int reg) 195 { 196 struct regmap_debugfs_off_cache *c; 197 int ret = -EINVAL; 198 199 if (regmap_printable(map, reg + map->reg_stride)) { 200 ret = reg + map->reg_stride; 201 } else { 202 mutex_lock(&map->cache_lock); 203 list_for_each_entry(c, &map->debugfs_off_cache, list) { 204 if (reg > c->max_reg) 205 continue; 206 if (reg < c->base_reg) { 207 ret = c->base_reg; 208 break; 209 } 210 } 211 mutex_unlock(&map->cache_lock); 212 } 213 return ret; 214 } 215 216 static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, 217 unsigned int to, char __user *user_buf, 218 size_t count, loff_t *ppos) 219 { 220 size_t buf_pos = 0; 221 loff_t p = *ppos; 222 ssize_t ret; 223 int i; 224 char *buf; 225 unsigned int val, start_reg; 226 227 if (*ppos < 0 || !count) 228 return -EINVAL; 229 230 if (count > (PAGE_SIZE << (MAX_ORDER - 1))) 231 count = PAGE_SIZE << (MAX_ORDER - 1); 232 233 buf = kmalloc(count, GFP_KERNEL); 234 if (!buf) 235 return -ENOMEM; 236 237 regmap_calc_tot_len(map, buf, count); 238 239 /* Work out which register we're starting at */ 240 start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); 241 242 for (i = start_reg; i >= 0 && i <= to; 243 i = regmap_next_readable_reg(map, i)) { 244 245 /* If we're in the region the user is trying to read */ 246 if (p >= *ppos) { 247 /* ...but not beyond it */ 248 if (buf_pos + map->debugfs_tot_len > count) 249 break; 250 251 /* Format the register */ 252 snprintf(buf + buf_pos, count - buf_pos, "%.*x: ", 253 map->debugfs_reg_len, i - from); 254 buf_pos += map->debugfs_reg_len + 2; 255 256 /* Format the value, write all X if we can't read */ 257 ret = regmap_read(map, i, &val); 258 if (ret == 0) 259 snprintf(buf + buf_pos, count - buf_pos, 260 "%.*x", map->debugfs_val_len, val); 261 else 262 memset(buf + buf_pos, 'X', 263 map->debugfs_val_len); 264 buf_pos += 2 * map->format.val_bytes; 265 266 buf[buf_pos++] = '\n'; 267 } 268 p += map->debugfs_tot_len; 269 } 270 271 ret = buf_pos; 272 273 if (copy_to_user(user_buf, buf, buf_pos)) { 274 ret = -EFAULT; 275 goto out; 276 } 277 278 *ppos += buf_pos; 279 280 out: 281 kfree(buf); 282 return ret; 283 } 284 285 static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, 286 size_t count, loff_t *ppos) 287 { 288 struct regmap *map = file->private_data; 289 290 return regmap_read_debugfs(map, 0, map->max_register, user_buf, 291 count, ppos); 292 } 293 294 #undef REGMAP_ALLOW_WRITE_DEBUGFS 295 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS 296 /* 297 * This can be dangerous especially when we have clients such as 298 * PMICs, therefore don't provide any real compile time configuration option 299 * for this feature, people who want to use this will need to modify 300 * the source code directly. 301 */ 302 static ssize_t regmap_map_write_file(struct file *file, 303 const char __user *user_buf, 304 size_t count, loff_t *ppos) 305 { 306 char buf[32]; 307 size_t buf_size; 308 char *start = buf; 309 unsigned long reg, value; 310 struct regmap *map = file->private_data; 311 int ret; 312 313 buf_size = min(count, (sizeof(buf)-1)); 314 if (copy_from_user(buf, user_buf, buf_size)) 315 return -EFAULT; 316 buf[buf_size] = 0; 317 318 while (*start == ' ') 319 start++; 320 reg = simple_strtoul(start, &start, 16); 321 while (*start == ' ') 322 start++; 323 if (kstrtoul(start, 16, &value)) 324 return -EINVAL; 325 326 /* Userspace has been fiddling around behind the kernel's back */ 327 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 328 329 ret = regmap_write(map, reg, value); 330 if (ret < 0) 331 return ret; 332 return buf_size; 333 } 334 #else 335 #define regmap_map_write_file NULL 336 #endif 337 338 static const struct file_operations regmap_map_fops = { 339 .open = simple_open, 340 .read = regmap_map_read_file, 341 .write = regmap_map_write_file, 342 .llseek = default_llseek, 343 }; 344 345 static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf, 346 size_t count, loff_t *ppos) 347 { 348 struct regmap_range_node *range = file->private_data; 349 struct regmap *map = range->map; 350 351 return regmap_read_debugfs(map, range->range_min, range->range_max, 352 user_buf, count, ppos); 353 } 354 355 static const struct file_operations regmap_range_fops = { 356 .open = simple_open, 357 .read = regmap_range_read_file, 358 .llseek = default_llseek, 359 }; 360 361 static ssize_t regmap_reg_ranges_read_file(struct file *file, 362 char __user *user_buf, size_t count, 363 loff_t *ppos) 364 { 365 struct regmap *map = file->private_data; 366 struct regmap_debugfs_off_cache *c; 367 loff_t p = 0; 368 size_t buf_pos = 0; 369 char *buf; 370 char *entry; 371 int ret; 372 unsigned entry_len; 373 374 if (*ppos < 0 || !count) 375 return -EINVAL; 376 377 if (count > (PAGE_SIZE << (MAX_ORDER - 1))) 378 count = PAGE_SIZE << (MAX_ORDER - 1); 379 380 buf = kmalloc(count, GFP_KERNEL); 381 if (!buf) 382 return -ENOMEM; 383 384 entry = kmalloc(PAGE_SIZE, GFP_KERNEL); 385 if (!entry) { 386 kfree(buf); 387 return -ENOMEM; 388 } 389 390 /* While we are at it, build the register dump cache 391 * now so the read() operation on the `registers' file 392 * can benefit from using the cache. We do not care 393 * about the file position information that is contained 394 * in the cache, just about the actual register blocks */ 395 regmap_calc_tot_len(map, buf, count); 396 regmap_debugfs_get_dump_start(map, 0, *ppos, &p); 397 398 /* Reset file pointer as the fixed-format of the `registers' 399 * file is not compatible with the `range' file */ 400 p = 0; 401 mutex_lock(&map->cache_lock); 402 list_for_each_entry(c, &map->debugfs_off_cache, list) { 403 entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n", 404 c->base_reg, c->max_reg); 405 if (p >= *ppos) { 406 if (buf_pos + entry_len > count) 407 break; 408 memcpy(buf + buf_pos, entry, entry_len); 409 buf_pos += entry_len; 410 } 411 p += entry_len; 412 } 413 mutex_unlock(&map->cache_lock); 414 415 kfree(entry); 416 ret = buf_pos; 417 418 if (copy_to_user(user_buf, buf, buf_pos)) { 419 ret = -EFAULT; 420 goto out_buf; 421 } 422 423 *ppos += buf_pos; 424 out_buf: 425 kfree(buf); 426 return ret; 427 } 428 429 static const struct file_operations regmap_reg_ranges_fops = { 430 .open = simple_open, 431 .read = regmap_reg_ranges_read_file, 432 .llseek = default_llseek, 433 }; 434 435 static int regmap_access_show(struct seq_file *s, void *ignored) 436 { 437 struct regmap *map = s->private; 438 int i, reg_len; 439 440 reg_len = regmap_calc_reg_len(map->max_register); 441 442 for (i = 0; i <= map->max_register; i += map->reg_stride) { 443 /* Ignore registers which are neither readable nor writable */ 444 if (!regmap_readable(map, i) && !regmap_writeable(map, i)) 445 continue; 446 447 /* Format the register */ 448 seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i, 449 regmap_readable(map, i) ? 'y' : 'n', 450 regmap_writeable(map, i) ? 'y' : 'n', 451 regmap_volatile(map, i) ? 'y' : 'n', 452 regmap_precious(map, i) ? 'y' : 'n'); 453 } 454 455 return 0; 456 } 457 458 DEFINE_SHOW_ATTRIBUTE(regmap_access); 459 460 static ssize_t regmap_cache_only_write_file(struct file *file, 461 const char __user *user_buf, 462 size_t count, loff_t *ppos) 463 { 464 struct regmap *map = container_of(file->private_data, 465 struct regmap, cache_only); 466 ssize_t result; 467 bool was_enabled, require_sync = false; 468 int err; 469 470 map->lock(map->lock_arg); 471 472 was_enabled = map->cache_only; 473 474 result = debugfs_write_file_bool(file, user_buf, count, ppos); 475 if (result < 0) { 476 map->unlock(map->lock_arg); 477 return result; 478 } 479 480 if (map->cache_only && !was_enabled) { 481 dev_warn(map->dev, "debugfs cache_only=Y forced\n"); 482 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 483 } else if (!map->cache_only && was_enabled) { 484 dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); 485 require_sync = true; 486 } 487 488 map->unlock(map->lock_arg); 489 490 if (require_sync) { 491 err = regcache_sync(map); 492 if (err) 493 dev_err(map->dev, "Failed to sync cache %d\n", err); 494 } 495 496 return result; 497 } 498 499 static const struct file_operations regmap_cache_only_fops = { 500 .open = simple_open, 501 .read = debugfs_read_file_bool, 502 .write = regmap_cache_only_write_file, 503 }; 504 505 static ssize_t regmap_cache_bypass_write_file(struct file *file, 506 const char __user *user_buf, 507 size_t count, loff_t *ppos) 508 { 509 struct regmap *map = container_of(file->private_data, 510 struct regmap, cache_bypass); 511 ssize_t result; 512 bool was_enabled; 513 514 map->lock(map->lock_arg); 515 516 was_enabled = map->cache_bypass; 517 518 result = debugfs_write_file_bool(file, user_buf, count, ppos); 519 if (result < 0) 520 goto out; 521 522 if (map->cache_bypass && !was_enabled) { 523 dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); 524 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 525 } else if (!map->cache_bypass && was_enabled) { 526 dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); 527 } 528 529 out: 530 map->unlock(map->lock_arg); 531 532 return result; 533 } 534 535 static const struct file_operations regmap_cache_bypass_fops = { 536 .open = simple_open, 537 .read = debugfs_read_file_bool, 538 .write = regmap_cache_bypass_write_file, 539 }; 540 541 void regmap_debugfs_init(struct regmap *map, const char *name) 542 { 543 struct rb_node *next; 544 struct regmap_range_node *range_node; 545 const char *devname = "dummy"; 546 547 /* 548 * Userspace can initiate reads from the hardware over debugfs. 549 * Normally internal regmap structures and buffers are protected with 550 * a mutex or a spinlock, but if the regmap owner decided to disable 551 * all locking mechanisms, this is no longer the case. For safety: 552 * don't create the debugfs entries if locking is disabled. 553 */ 554 if (map->debugfs_disable) { 555 dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n"); 556 return; 557 } 558 559 /* If we don't have the debugfs root yet, postpone init */ 560 if (!regmap_debugfs_root) { 561 struct regmap_debugfs_node *node; 562 node = kzalloc(sizeof(*node), GFP_KERNEL); 563 if (!node) 564 return; 565 node->map = map; 566 node->name = name; 567 mutex_lock(®map_debugfs_early_lock); 568 list_add(&node->link, ®map_debugfs_early_list); 569 mutex_unlock(®map_debugfs_early_lock); 570 return; 571 } 572 573 INIT_LIST_HEAD(&map->debugfs_off_cache); 574 mutex_init(&map->cache_lock); 575 576 if (map->dev) 577 devname = dev_name(map->dev); 578 579 if (name) { 580 map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", 581 devname, name); 582 name = map->debugfs_name; 583 } else { 584 name = devname; 585 } 586 587 if (!strcmp(name, "dummy")) { 588 kfree(map->debugfs_name); 589 590 map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", 591 dummy_index); 592 name = map->debugfs_name; 593 dummy_index++; 594 } 595 596 map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); 597 598 debugfs_create_file("name", 0400, map->debugfs, 599 map, ®map_name_fops); 600 601 debugfs_create_file("range", 0400, map->debugfs, 602 map, ®map_reg_ranges_fops); 603 604 if (map->max_register || regmap_readable(map, 0)) { 605 umode_t registers_mode; 606 607 #if defined(REGMAP_ALLOW_WRITE_DEBUGFS) 608 registers_mode = 0600; 609 #else 610 registers_mode = 0400; 611 #endif 612 613 debugfs_create_file("registers", registers_mode, map->debugfs, 614 map, ®map_map_fops); 615 debugfs_create_file("access", 0400, map->debugfs, 616 map, ®map_access_fops); 617 } 618 619 if (map->cache_type) { 620 debugfs_create_file("cache_only", 0600, map->debugfs, 621 &map->cache_only, ®map_cache_only_fops); 622 debugfs_create_bool("cache_dirty", 0400, map->debugfs, 623 &map->cache_dirty); 624 debugfs_create_file("cache_bypass", 0600, map->debugfs, 625 &map->cache_bypass, 626 ®map_cache_bypass_fops); 627 } 628 629 next = rb_first(&map->range_tree); 630 while (next) { 631 range_node = rb_entry(next, struct regmap_range_node, node); 632 633 if (range_node->name) 634 debugfs_create_file(range_node->name, 0400, 635 map->debugfs, range_node, 636 ®map_range_fops); 637 638 next = rb_next(&range_node->node); 639 } 640 641 if (map->cache_ops && map->cache_ops->debugfs_init) 642 map->cache_ops->debugfs_init(map); 643 } 644 645 void regmap_debugfs_exit(struct regmap *map) 646 { 647 if (map->debugfs) { 648 debugfs_remove_recursive(map->debugfs); 649 mutex_lock(&map->cache_lock); 650 regmap_debugfs_free_dump_cache(map); 651 mutex_unlock(&map->cache_lock); 652 kfree(map->debugfs_name); 653 } else { 654 struct regmap_debugfs_node *node, *tmp; 655 656 mutex_lock(®map_debugfs_early_lock); 657 list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, 658 link) { 659 if (node->map == map) { 660 list_del(&node->link); 661 kfree(node); 662 } 663 } 664 mutex_unlock(®map_debugfs_early_lock); 665 } 666 } 667 668 void regmap_debugfs_initcall(void) 669 { 670 struct regmap_debugfs_node *node, *tmp; 671 672 regmap_debugfs_root = debugfs_create_dir("regmap", NULL); 673 674 mutex_lock(®map_debugfs_early_lock); 675 list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) { 676 regmap_debugfs_init(node->map, node->name); 677 list_del(&node->link); 678 kfree(node); 679 } 680 mutex_unlock(®map_debugfs_early_lock); 681 } 682