1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API - debugfs 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/slab.h> 10 #include <linux/mutex.h> 11 #include <linux/debugfs.h> 12 #include <linux/uaccess.h> 13 #include <linux/device.h> 14 #include <linux/list.h> 15 16 #include "internal.h" 17 18 struct regmap_debugfs_node { 19 struct regmap *map; 20 struct list_head link; 21 }; 22 23 static unsigned int dummy_index; 24 static struct dentry *regmap_debugfs_root; 25 static LIST_HEAD(regmap_debugfs_early_list); 26 static DEFINE_MUTEX(regmap_debugfs_early_lock); 27 28 /* Calculate the length of a fixed format */ 29 static size_t regmap_calc_reg_len(int max_val) 30 { 31 return snprintf(NULL, 0, "%x", max_val); 32 } 33 34 static ssize_t regmap_name_read_file(struct file *file, 35 char __user *user_buf, size_t count, 36 loff_t *ppos) 37 { 38 struct regmap *map = file->private_data; 39 const char *name = "nodev"; 40 int ret; 41 char *buf; 42 43 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 44 if (!buf) 45 return -ENOMEM; 46 47 if (map->dev && map->dev->driver) 48 name = map->dev->driver->name; 49 50 ret = snprintf(buf, PAGE_SIZE, "%s\n", name); 51 if (ret < 0) { 52 kfree(buf); 53 return ret; 54 } 55 56 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 57 kfree(buf); 58 return ret; 59 } 60 61 static const struct file_operations regmap_name_fops = { 62 .open = simple_open, 63 .read = regmap_name_read_file, 64 .llseek = default_llseek, 65 }; 66 67 static void regmap_debugfs_free_dump_cache(struct regmap *map) 68 { 69 struct regmap_debugfs_off_cache *c; 70 71 while (!list_empty(&map->debugfs_off_cache)) { 72 c = list_first_entry(&map->debugfs_off_cache, 73 struct regmap_debugfs_off_cache, 74 list); 75 list_del(&c->list); 76 kfree(c); 77 } 78 } 79 80 static bool regmap_printable(struct regmap *map, unsigned int reg) 81 { 82 if (regmap_precious(map, reg)) 83 return false; 84 85 if (!regmap_readable(map, reg) && !regmap_cached(map, reg)) 86 return false; 87 88 return true; 89 } 90 91 /* 92 * Work out where the start offset maps into register numbers, bearing 93 * in mind that we suppress hidden registers. 94 */ 95 static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, 96 unsigned int base, 97 loff_t from, 98 loff_t *pos) 99 { 100 struct regmap_debugfs_off_cache *c = NULL; 101 loff_t p = 0; 102 unsigned int i, ret; 103 unsigned int fpos_offset; 104 unsigned int reg_offset; 105 106 /* Suppress the cache if we're using a subrange */ 107 if (base) 108 return base; 109 110 /* 111 * If we don't have a cache build one so we don't have to do a 112 * linear scan each time. 113 */ 114 mutex_lock(&map->cache_lock); 115 i = base; 116 if (list_empty(&map->debugfs_off_cache)) { 117 for (; i <= map->max_register; i += map->reg_stride) { 118 /* Skip unprinted registers, closing off cache entry */ 119 if (!regmap_printable(map, i)) { 120 if (c) { 121 c->max = p - 1; 122 c->max_reg = i - map->reg_stride; 123 list_add_tail(&c->list, 124 &map->debugfs_off_cache); 125 c = NULL; 126 } 127 128 continue; 129 } 130 131 /* No cache entry? Start a new one */ 132 if (!c) { 133 c = kzalloc(sizeof(*c), GFP_KERNEL); 134 if (!c) { 135 regmap_debugfs_free_dump_cache(map); 136 mutex_unlock(&map->cache_lock); 137 return base; 138 } 139 c->min = p; 140 c->base_reg = i; 141 } 142 143 p += map->debugfs_tot_len; 144 } 145 } 146 147 /* Close the last entry off if we didn't scan beyond it */ 148 if (c) { 149 c->max = p - 1; 150 c->max_reg = i - map->reg_stride; 151 list_add_tail(&c->list, 152 &map->debugfs_off_cache); 153 } 154 155 /* 156 * This should never happen; we return above if we fail to 157 * allocate and we should never be in this code if there are 158 * no registers at all. 159 */ 160 WARN_ON(list_empty(&map->debugfs_off_cache)); 161 ret = base; 162 163 /* Find the relevant block:offset */ 164 list_for_each_entry(c, &map->debugfs_off_cache, list) { 165 if (from >= c->min && from <= c->max) { 166 fpos_offset = from - c->min; 167 reg_offset = fpos_offset / map->debugfs_tot_len; 168 *pos = c->min + (reg_offset * map->debugfs_tot_len); 169 mutex_unlock(&map->cache_lock); 170 return c->base_reg + (reg_offset * map->reg_stride); 171 } 172 173 *pos = c->max; 174 ret = c->max_reg; 175 } 176 mutex_unlock(&map->cache_lock); 177 178 return ret; 179 } 180 181 static inline void regmap_calc_tot_len(struct regmap *map, 182 void *buf, size_t count) 183 { 184 /* Calculate the length of a fixed format */ 185 if (!map->debugfs_tot_len) { 186 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register); 187 map->debugfs_val_len = 2 * map->format.val_bytes; 188 map->debugfs_tot_len = map->debugfs_reg_len + 189 map->debugfs_val_len + 3; /* : \n */ 190 } 191 } 192 193 static int regmap_next_readable_reg(struct regmap *map, int reg) 194 { 195 struct regmap_debugfs_off_cache *c; 196 int ret = -EINVAL; 197 198 if (regmap_printable(map, reg + map->reg_stride)) { 199 ret = reg + map->reg_stride; 200 } else { 201 mutex_lock(&map->cache_lock); 202 list_for_each_entry(c, &map->debugfs_off_cache, list) { 203 if (reg > c->max_reg) 204 continue; 205 if (reg < c->base_reg) { 206 ret = c->base_reg; 207 break; 208 } 209 } 210 mutex_unlock(&map->cache_lock); 211 } 212 return ret; 213 } 214 215 static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, 216 unsigned int to, char __user *user_buf, 217 size_t count, loff_t *ppos) 218 { 219 size_t buf_pos = 0; 220 loff_t p = *ppos; 221 ssize_t ret; 222 int i; 223 char *buf; 224 unsigned int val, start_reg; 225 226 if (*ppos < 0 || !count) 227 return -EINVAL; 228 229 if (count > (PAGE_SIZE << (MAX_ORDER - 1))) 230 count = PAGE_SIZE << (MAX_ORDER - 1); 231 232 buf = kmalloc(count, GFP_KERNEL); 233 if (!buf) 234 return -ENOMEM; 235 236 regmap_calc_tot_len(map, buf, count); 237 238 /* Work out which register we're starting at */ 239 start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); 240 241 for (i = start_reg; i >= 0 && i <= to; 242 i = regmap_next_readable_reg(map, i)) { 243 244 /* If we're in the region the user is trying to read */ 245 if (p >= *ppos) { 246 /* ...but not beyond it */ 247 if (buf_pos + map->debugfs_tot_len > count) 248 break; 249 250 /* Format the register */ 251 snprintf(buf + buf_pos, count - buf_pos, "%.*x: ", 252 map->debugfs_reg_len, i - from); 253 buf_pos += map->debugfs_reg_len + 2; 254 255 /* Format the value, write all X if we can't read */ 256 ret = regmap_read(map, i, &val); 257 if (ret == 0) 258 snprintf(buf + buf_pos, count - buf_pos, 259 "%.*x", map->debugfs_val_len, val); 260 else 261 memset(buf + buf_pos, 'X', 262 map->debugfs_val_len); 263 buf_pos += 2 * map->format.val_bytes; 264 265 buf[buf_pos++] = '\n'; 266 } 267 p += map->debugfs_tot_len; 268 } 269 270 ret = buf_pos; 271 272 if (copy_to_user(user_buf, buf, buf_pos)) { 273 ret = -EFAULT; 274 goto out; 275 } 276 277 *ppos += buf_pos; 278 279 out: 280 kfree(buf); 281 return ret; 282 } 283 284 static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, 285 size_t count, loff_t *ppos) 286 { 287 struct regmap *map = file->private_data; 288 289 return regmap_read_debugfs(map, 0, map->max_register, user_buf, 290 count, ppos); 291 } 292 293 #undef REGMAP_ALLOW_WRITE_DEBUGFS 294 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS 295 /* 296 * This can be dangerous especially when we have clients such as 297 * PMICs, therefore don't provide any real compile time configuration option 298 * for this feature, people who want to use this will need to modify 299 * the source code directly. 300 */ 301 static ssize_t regmap_map_write_file(struct file *file, 302 const char __user *user_buf, 303 size_t count, loff_t *ppos) 304 { 305 char buf[32]; 306 size_t buf_size; 307 char *start = buf; 308 unsigned long reg, value; 309 struct regmap *map = file->private_data; 310 int ret; 311 312 buf_size = min(count, (sizeof(buf)-1)); 313 if (copy_from_user(buf, user_buf, buf_size)) 314 return -EFAULT; 315 buf[buf_size] = 0; 316 317 while (*start == ' ') 318 start++; 319 reg = simple_strtoul(start, &start, 16); 320 while (*start == ' ') 321 start++; 322 if (kstrtoul(start, 16, &value)) 323 return -EINVAL; 324 325 /* Userspace has been fiddling around behind the kernel's back */ 326 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 327 328 ret = regmap_write(map, reg, value); 329 if (ret < 0) 330 return ret; 331 return buf_size; 332 } 333 #else 334 #define regmap_map_write_file NULL 335 #endif 336 337 static const struct file_operations regmap_map_fops = { 338 .open = simple_open, 339 .read = regmap_map_read_file, 340 .write = regmap_map_write_file, 341 .llseek = default_llseek, 342 }; 343 344 static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf, 345 size_t count, loff_t *ppos) 346 { 347 struct regmap_range_node *range = file->private_data; 348 struct regmap *map = range->map; 349 350 return regmap_read_debugfs(map, range->range_min, range->range_max, 351 user_buf, count, ppos); 352 } 353 354 static const struct file_operations regmap_range_fops = { 355 .open = simple_open, 356 .read = regmap_range_read_file, 357 .llseek = default_llseek, 358 }; 359 360 static ssize_t regmap_reg_ranges_read_file(struct file *file, 361 char __user *user_buf, size_t count, 362 loff_t *ppos) 363 { 364 struct regmap *map = file->private_data; 365 struct regmap_debugfs_off_cache *c; 366 loff_t p = 0; 367 size_t buf_pos = 0; 368 char *buf; 369 char *entry; 370 int ret; 371 unsigned int entry_len; 372 373 if (*ppos < 0 || !count) 374 return -EINVAL; 375 376 if (count > (PAGE_SIZE << (MAX_ORDER - 1))) 377 count = PAGE_SIZE << (MAX_ORDER - 1); 378 379 buf = kmalloc(count, GFP_KERNEL); 380 if (!buf) 381 return -ENOMEM; 382 383 entry = kmalloc(PAGE_SIZE, GFP_KERNEL); 384 if (!entry) { 385 kfree(buf); 386 return -ENOMEM; 387 } 388 389 /* While we are at it, build the register dump cache 390 * now so the read() operation on the `registers' file 391 * can benefit from using the cache. We do not care 392 * about the file position information that is contained 393 * in the cache, just about the actual register blocks */ 394 regmap_calc_tot_len(map, buf, count); 395 regmap_debugfs_get_dump_start(map, 0, *ppos, &p); 396 397 /* Reset file pointer as the fixed-format of the `registers' 398 * file is not compatible with the `range' file */ 399 p = 0; 400 mutex_lock(&map->cache_lock); 401 list_for_each_entry(c, &map->debugfs_off_cache, list) { 402 entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n", 403 c->base_reg, c->max_reg); 404 if (p >= *ppos) { 405 if (buf_pos + entry_len > count) 406 break; 407 memcpy(buf + buf_pos, entry, entry_len); 408 buf_pos += entry_len; 409 } 410 p += entry_len; 411 } 412 mutex_unlock(&map->cache_lock); 413 414 kfree(entry); 415 ret = buf_pos; 416 417 if (copy_to_user(user_buf, buf, buf_pos)) { 418 ret = -EFAULT; 419 goto out_buf; 420 } 421 422 *ppos += buf_pos; 423 out_buf: 424 kfree(buf); 425 return ret; 426 } 427 428 static const struct file_operations regmap_reg_ranges_fops = { 429 .open = simple_open, 430 .read = regmap_reg_ranges_read_file, 431 .llseek = default_llseek, 432 }; 433 434 static int regmap_access_show(struct seq_file *s, void *ignored) 435 { 436 struct regmap *map = s->private; 437 int i, reg_len; 438 439 reg_len = regmap_calc_reg_len(map->max_register); 440 441 for (i = 0; i <= map->max_register; i += map->reg_stride) { 442 /* Ignore registers which are neither readable nor writable */ 443 if (!regmap_readable(map, i) && !regmap_writeable(map, i)) 444 continue; 445 446 /* Format the register */ 447 seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i, 448 regmap_readable(map, i) ? 'y' : 'n', 449 regmap_writeable(map, i) ? 'y' : 'n', 450 regmap_volatile(map, i) ? 'y' : 'n', 451 regmap_precious(map, i) ? 'y' : 'n'); 452 } 453 454 return 0; 455 } 456 457 DEFINE_SHOW_ATTRIBUTE(regmap_access); 458 459 static ssize_t regmap_cache_only_write_file(struct file *file, 460 const char __user *user_buf, 461 size_t count, loff_t *ppos) 462 { 463 struct regmap *map = container_of(file->private_data, 464 struct regmap, cache_only); 465 bool new_val, require_sync = false; 466 int err; 467 468 err = kstrtobool_from_user(user_buf, count, &new_val); 469 /* Ignore malforned data like debugfs_write_file_bool() */ 470 if (err) 471 return count; 472 473 err = debugfs_file_get(file->f_path.dentry); 474 if (err) 475 return err; 476 477 map->lock(map->lock_arg); 478 479 if (new_val && !map->cache_only) { 480 dev_warn(map->dev, "debugfs cache_only=Y forced\n"); 481 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 482 } else if (!new_val && map->cache_only) { 483 dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); 484 require_sync = true; 485 } 486 map->cache_only = new_val; 487 488 map->unlock(map->lock_arg); 489 debugfs_file_put(file->f_path.dentry); 490 491 if (require_sync) { 492 err = regcache_sync(map); 493 if (err) 494 dev_err(map->dev, "Failed to sync cache %d\n", err); 495 } 496 497 return count; 498 } 499 500 static const struct file_operations regmap_cache_only_fops = { 501 .open = simple_open, 502 .read = debugfs_read_file_bool, 503 .write = regmap_cache_only_write_file, 504 }; 505 506 static ssize_t regmap_cache_bypass_write_file(struct file *file, 507 const char __user *user_buf, 508 size_t count, loff_t *ppos) 509 { 510 struct regmap *map = container_of(file->private_data, 511 struct regmap, cache_bypass); 512 bool new_val; 513 int err; 514 515 err = kstrtobool_from_user(user_buf, count, &new_val); 516 /* Ignore malforned data like debugfs_write_file_bool() */ 517 if (err) 518 return count; 519 520 err = debugfs_file_get(file->f_path.dentry); 521 if (err) 522 return err; 523 524 map->lock(map->lock_arg); 525 526 if (new_val && !map->cache_bypass) { 527 dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); 528 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 529 } else if (!new_val && map->cache_bypass) { 530 dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); 531 } 532 map->cache_bypass = new_val; 533 534 map->unlock(map->lock_arg); 535 debugfs_file_put(file->f_path.dentry); 536 537 return count; 538 } 539 540 static const struct file_operations regmap_cache_bypass_fops = { 541 .open = simple_open, 542 .read = debugfs_read_file_bool, 543 .write = regmap_cache_bypass_write_file, 544 }; 545 546 void regmap_debugfs_init(struct regmap *map) 547 { 548 struct rb_node *next; 549 struct regmap_range_node *range_node; 550 const char *devname = "dummy"; 551 const char *name = map->name; 552 553 /* 554 * Userspace can initiate reads from the hardware over debugfs. 555 * Normally internal regmap structures and buffers are protected with 556 * a mutex or a spinlock, but if the regmap owner decided to disable 557 * all locking mechanisms, this is no longer the case. For safety: 558 * don't create the debugfs entries if locking is disabled. 559 */ 560 if (map->debugfs_disable) { 561 dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n"); 562 return; 563 } 564 565 /* If we don't have the debugfs root yet, postpone init */ 566 if (!regmap_debugfs_root) { 567 struct regmap_debugfs_node *node; 568 node = kzalloc(sizeof(*node), GFP_KERNEL); 569 if (!node) 570 return; 571 node->map = map; 572 mutex_lock(®map_debugfs_early_lock); 573 list_add(&node->link, ®map_debugfs_early_list); 574 mutex_unlock(®map_debugfs_early_lock); 575 return; 576 } 577 578 INIT_LIST_HEAD(&map->debugfs_off_cache); 579 mutex_init(&map->cache_lock); 580 581 if (map->dev) 582 devname = dev_name(map->dev); 583 584 if (name) { 585 if (!map->debugfs_name) { 586 map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", 587 devname, name); 588 if (!map->debugfs_name) 589 return; 590 } 591 name = map->debugfs_name; 592 } else { 593 name = devname; 594 } 595 596 if (!strcmp(name, "dummy")) { 597 kfree(map->debugfs_name); 598 map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", 599 dummy_index); 600 if (!map->debugfs_name) 601 return; 602 name = map->debugfs_name; 603 dummy_index++; 604 } 605 606 map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); 607 608 debugfs_create_file("name", 0400, map->debugfs, 609 map, ®map_name_fops); 610 611 debugfs_create_file("range", 0400, map->debugfs, 612 map, ®map_reg_ranges_fops); 613 614 if (map->max_register || regmap_readable(map, 0)) { 615 umode_t registers_mode; 616 617 #if defined(REGMAP_ALLOW_WRITE_DEBUGFS) 618 registers_mode = 0600; 619 #else 620 registers_mode = 0400; 621 #endif 622 623 debugfs_create_file("registers", registers_mode, map->debugfs, 624 map, ®map_map_fops); 625 debugfs_create_file("access", 0400, map->debugfs, 626 map, ®map_access_fops); 627 } 628 629 if (map->cache_type) { 630 debugfs_create_file("cache_only", 0600, map->debugfs, 631 &map->cache_only, ®map_cache_only_fops); 632 debugfs_create_bool("cache_dirty", 0400, map->debugfs, 633 &map->cache_dirty); 634 debugfs_create_file("cache_bypass", 0600, map->debugfs, 635 &map->cache_bypass, 636 ®map_cache_bypass_fops); 637 } 638 639 next = rb_first(&map->range_tree); 640 while (next) { 641 range_node = rb_entry(next, struct regmap_range_node, node); 642 643 if (range_node->name) 644 debugfs_create_file(range_node->name, 0400, 645 map->debugfs, range_node, 646 ®map_range_fops); 647 648 next = rb_next(&range_node->node); 649 } 650 651 if (map->cache_ops && map->cache_ops->debugfs_init) 652 map->cache_ops->debugfs_init(map); 653 } 654 655 void regmap_debugfs_exit(struct regmap *map) 656 { 657 if (map->debugfs) { 658 debugfs_remove_recursive(map->debugfs); 659 mutex_lock(&map->cache_lock); 660 regmap_debugfs_free_dump_cache(map); 661 mutex_unlock(&map->cache_lock); 662 kfree(map->debugfs_name); 663 map->debugfs_name = NULL; 664 } else { 665 struct regmap_debugfs_node *node, *tmp; 666 667 mutex_lock(®map_debugfs_early_lock); 668 list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, 669 link) { 670 if (node->map == map) { 671 list_del(&node->link); 672 kfree(node); 673 } 674 } 675 mutex_unlock(®map_debugfs_early_lock); 676 } 677 } 678 679 void regmap_debugfs_initcall(void) 680 { 681 struct regmap_debugfs_node *node, *tmp; 682 683 regmap_debugfs_root = debugfs_create_dir("regmap", NULL); 684 685 mutex_lock(®map_debugfs_early_lock); 686 list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) { 687 regmap_debugfs_init(node->map); 688 list_del(&node->link); 689 kfree(node); 690 } 691 mutex_unlock(®map_debugfs_early_lock); 692 } 693